From c67244c27ce6b0b7e89c57f0385d5730e4249438 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Fri, 31 May 2019 18:30:47 +0200 Subject: [PATCH] Replace MPI_Allgather by MPI_Exscan Do not store the index sets of all processors on each processor. Fill them on demand. --- include/deal.II/dofs/dof_handler.h | 40 +++- include/deal.II/dofs/number_cache.h | 23 ++ include/deal.II/hp/dof_handler.h | 40 +++- source/distributed/tria.cc | 70 ++---- source/dofs/dof_handler_policy.cc | 275 +++++------------------ source/dofs/dof_renumbering.cc | 80 +++---- source/dofs/number_cache.cc | 149 ++++++++++++ source/grid/grid_tools.cc | 24 +- source/multigrid/mg_transfer_internal.cc | 9 +- source/multigrid/mg_transfer_prebuilt.cc | 2 +- tests/sharedtria/no_cells_01.cc | 22 +- 11 files changed, 364 insertions(+), 370 deletions(-) diff --git a/include/deal.II/dofs/dof_handler.h b/include/deal.II/dofs/dof_handler.h index bac0319cfa..980cf95f3a 100644 --- a/include/deal.II/dofs/dof_handler.h +++ b/include/deal.II/dofs/dof_handler.h @@ -1046,7 +1046,7 @@ public: * MPI processes but the Triangulation on which this DoFHandler builds works * only on one MPI process.) */ - const std::vector & + std::vector locally_owned_dofs_per_processor() const; /** @@ -1065,7 +1065,7 @@ public: * or that there are multiple MPI processes but the Triangulation on which * this DoFHandler builds works only on one MPI process.) */ - const std::vector & + std::vector n_locally_owned_dofs_per_processor() const; /** @@ -1079,7 +1079,7 @@ public: * MPI processes but the Triangulation on which this DoFHandler builds works * only on one MPI process.) */ - const std::vector & + std::vector locally_owned_mg_dofs_per_processor(const unsigned int level) const; /** @@ -1461,25 +1461,39 @@ DoFHandler::locally_owned_mg_dofs(const unsigned int level) const template -const std::vector & +std::vector DoFHandler::n_locally_owned_dofs_per_processor() const { - return number_cache.n_locally_owned_dofs_per_processor; + const parallel::Triangulation *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + return number_cache.get_n_locally_owned_dofs_per_processor( + tr->get_communicator()); + else + return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF); } template -const std::vector & +std::vector DoFHandler::locally_owned_dofs_per_processor() const { - return number_cache.locally_owned_dofs_per_processor; + const parallel::Triangulation *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + return number_cache.get_locally_owned_dofs_per_processor( + tr->get_communicator()); + else + return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF); } template -const std::vector & +std::vector DoFHandler::locally_owned_mg_dofs_per_processor( const unsigned int level) const { @@ -1489,7 +1503,15 @@ DoFHandler::locally_owned_mg_dofs_per_processor( mg_number_cache.size() == this->get_triangulation().n_global_levels(), ExcMessage( "The level dofs are not set up properly! Did you call distribute_mg_dofs()?")); - return mg_number_cache[level].locally_owned_dofs_per_processor; + const parallel::Triangulation *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + return mg_number_cache[level].get_locally_owned_dofs_per_processor( + tr->get_communicator()); + else + return mg_number_cache[level].get_locally_owned_dofs_per_processor( + MPI_COMM_SELF); } diff --git a/include/deal.II/dofs/number_cache.h b/include/deal.II/dofs/number_cache.h index 2d273d9440..56e8b792f0 100644 --- a/include/deal.II/dofs/number_cache.h +++ b/include/deal.II/dofs/number_cache.h @@ -19,6 +19,7 @@ #include #include +#include #include @@ -107,6 +108,28 @@ namespace internal void clear(); + /** + * Return a representation of @p n_locally_owned_dofs_per_processor both + * in case it was set up (directly returning the array) or in case we + * need to accumulate some information over all processors. The latter + * case involves global communication and is typically expensive to set + * up because it invokes MPI_Allgather. + */ + std::vector + get_n_locally_owned_dofs_per_processor( + const MPI_Comm mpi_communicator) const; + + /** + * Return a representation of @p locally_owned_dofs_per_processor both + * in case it was set up (directly returning the array of IndexSet + * fields) or in case we need to accumulate some information over all + * processors. The latter case involves global communication and is + * typically expensive to set up because it invokes MPI_Allgather. + */ + std::vector + get_locally_owned_dofs_per_processor( + const MPI_Comm mpi_communicator) const; + /** * Total number of dofs, accumulated over all processors that may * participate on this mesh. diff --git a/include/deal.II/hp/dof_handler.h b/include/deal.II/hp/dof_handler.h index 20a83a24dc..622cacb9e9 100644 --- a/include/deal.II/hp/dof_handler.h +++ b/include/deal.II/hp/dof_handler.h @@ -838,7 +838,7 @@ namespace hp * processes but the Triangulation on which this DoFHandler builds * works only on one MPI process.) */ - const std::vector & + std::vector locally_owned_dofs_per_processor() const; /** @@ -857,7 +857,7 @@ namespace hp * process, or that there are multiple MPI processes but the Triangulation * on which this DoFHandler builds works only on one MPI process.) */ - const std::vector & + std::vector n_locally_owned_dofs_per_processor() const; /** @@ -875,7 +875,7 @@ namespace hp * support multilevel methods yet, this function throws an exception * ExcNotImplemented() independent of its argument. */ - const std::vector & + std::vector locally_owned_mg_dofs_per_processor(const unsigned int level) const; /** @@ -1487,19 +1487,33 @@ namespace hp template - const std::vector & + std::vector DoFHandler::n_locally_owned_dofs_per_processor() const { - return number_cache.n_locally_owned_dofs_per_processor; + const parallel::Triangulation *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + return number_cache.get_n_locally_owned_dofs_per_processor( + tr->get_communicator()); + else + return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF); } template - const std::vector & + std::vector DoFHandler::locally_owned_dofs_per_processor() const { - return number_cache.locally_owned_dofs_per_processor; + const parallel::Triangulation *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + return number_cache.get_locally_owned_dofs_per_processor( + tr->get_communicator()); + else + return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF); } @@ -1518,7 +1532,7 @@ namespace hp template - const std::vector & + std::vector DoFHandler::locally_owned_mg_dofs_per_processor( const unsigned int level) const { @@ -1526,7 +1540,15 @@ namespace hp (void)level; Assert(level < this->get_triangulation().n_global_levels(), ExcMessage("invalid level in locally_owned_mg_dofs_per_processor")); - return mg_number_cache[0].locally_owned_dofs_per_processor; + const parallel::Triangulation *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + return mg_number_cache[level].get_locally_owned_dofs_per_processor( + tr->get_communicator()); + else + return mg_number_cache[level].get_locally_owned_dofs_per_processor( + MPI_COMM_SELF); } diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 1281b79598..f9f267bb8d 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1862,8 +1862,6 @@ namespace parallel const std::string fname_variable = std::string(filename) + "_variable.data"; - const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator); - MPI_Info info; int ierr = MPI_Info_create(&info); AssertThrowMPI(ierr); @@ -1906,36 +1904,26 @@ namespace parallel // Gather size of data in bytes we want to store from this processor. const unsigned int size_on_proc = src_data_variable.size(); - // Share information among all processors. - std::vector sizes_on_all_procs(n_procs); - ierr = MPI_Allgather(DEAL_II_MPI_CONST_CAST(&size_on_proc), - 1, - MPI_UNSIGNED, - sizes_on_all_procs.data(), - 1, - MPI_UNSIGNED, - mpi_communicator); + // Compute prefix sum + unsigned int prefix_sum = 0; + ierr = MPI_Exscan(DEAL_II_MPI_CONST_CAST(&size_on_proc), + &prefix_sum, + 1, + MPI_UNSIGNED, + MPI_SUM, + mpi_communicator); AssertThrowMPI(ierr); - // Generate accumulated sum to get an offset for writing variable - // size data. - std::partial_sum(sizes_on_all_procs.begin(), - sizes_on_all_procs.end(), - sizes_on_all_procs.begin()); - const char *data = src_data_variable.data(); // Write data consecutively into file. - ierr = MPI_File_write_at( - fh, - offset_variable + - ((myrank == 0) ? - 0 : - sizes_on_all_procs[myrank - 1]), // global position in file - DEAL_II_MPI_CONST_CAST(data), - src_data_variable.size(), // local buffer - MPI_CHAR, - MPI_STATUS_IGNORE); + ierr = MPI_File_write_at(fh, + offset_variable + + prefix_sum, // global position in file + DEAL_II_MPI_CONST_CAST(data), + src_data_variable.size(), // local buffer + MPI_CHAR, + MPI_STATUS_IGNORE); AssertThrowMPI(ierr); ierr = MPI_File_close(&fh); @@ -2033,8 +2021,6 @@ namespace parallel const std::string fname_variable = std::string(filename) + "_variable.data"; - const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator); - MPI_Info info; int ierr = MPI_Info_create(&info); AssertThrowMPI(ierr); @@ -2070,27 +2056,19 @@ namespace parallel dest_sizes_variable.end(), 0); - // share information among all processors - std::vector sizes_on_all_procs(n_procs); - ierr = MPI_Allgather(DEAL_II_MPI_CONST_CAST(&size_on_proc), - 1, - MPI_UNSIGNED, - sizes_on_all_procs.data(), - 1, - MPI_UNSIGNED, - mpi_communicator); + // share information among all processors by prefix sum + unsigned int prefix_sum = 0; + ierr = MPI_Exscan(DEAL_II_MPI_CONST_CAST(&size_on_proc), + &prefix_sum, + 1, + MPI_UNSIGNED, + MPI_SUM, + mpi_communicator); AssertThrowMPI(ierr); - // generate accumulated sum - std::partial_sum(sizes_on_all_procs.begin(), - sizes_on_all_procs.end(), - sizes_on_all_procs.begin()); - dest_data_variable.resize(size_on_proc); ierr = MPI_File_read_at(fh, - offset + ((myrank == 0) ? - 0 : - sizes_on_all_procs[myrank - 1]), + offset + prefix_sum, dest_data_variable.data(), dest_data_variable.size(), MPI_CHAR, diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index c8cf270131..50e6b1f64c 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -5020,9 +5020,6 @@ namespace internal &dof_handler->get_triangulation()))); Assert(triangulation != nullptr, ExcInternalError()); - const unsigned int n_cpus = - Utilities::MPI::n_mpi_processes(triangulation->get_communicator()); - const types::subdomain_id subdomain_id = triangulation->locally_owned_subdomain(); @@ -5091,25 +5088,16 @@ namespace internal // --------- Phase 4: shift indices so that each processor has a unique // range of indices - std::vector - n_locally_owned_dofs_per_processor(n_cpus); - - const int ierr = - MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs), - 1, - DEAL_II_DOF_INDEX_MPI_TYPE, - n_locally_owned_dofs_per_processor.data(), - 1, - DEAL_II_DOF_INDEX_MPI_TYPE, - triangulation->get_communicator()); + dealii::types::global_dof_index my_shift = 0; + const int ierr = + MPI_Exscan(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs), + &my_shift, + 1, + DEAL_II_DOF_INDEX_MPI_TYPE, + MPI_SUM, + triangulation->get_communicator()); AssertThrowMPI(ierr); - const dealii::types::global_dof_index my_shift = - std::accumulate(n_locally_owned_dofs_per_processor.begin(), - n_locally_owned_dofs_per_processor.begin() + - subdomain_id, - static_cast(0)); - // make dof indices globally consecutive for (auto &new_index : renumbering) if (new_index != numbers::invalid_dof_index) @@ -5125,39 +5113,17 @@ namespace internal // now a little bit of housekeeping const dealii::types::global_dof_index n_global_dofs = - std::accumulate(n_locally_owned_dofs_per_processor.begin(), - n_locally_owned_dofs_per_processor.end(), - dealii::types::global_dof_index(0)); - - std::vector locally_owned_dofs_per_processor( - n_cpus, IndexSet(n_global_dofs)); - { - dealii::types::global_dof_index current_shift = 0; - for (unsigned int i = 0; i < n_cpus; ++i) - { - locally_owned_dofs_per_processor[i].add_range( - current_shift, - current_shift + n_locally_owned_dofs_per_processor[i]); - current_shift += n_locally_owned_dofs_per_processor[i]; - } - } - NumberCache number_cache(locally_owned_dofs_per_processor, - triangulation->locally_owned_subdomain()); - Assert(number_cache - .locally_owned_dofs_per_processor - [triangulation->locally_owned_subdomain()] - .n_elements() == number_cache.n_locally_owned_dofs, - ExcInternalError()); - Assert( - !number_cache - .locally_owned_dofs_per_processor[triangulation - ->locally_owned_subdomain()] - .n_elements() || - number_cache - .locally_owned_dofs_per_processor[triangulation - ->locally_owned_subdomain()] - .nth_index_in_set(0) == my_shift, - ExcInternalError()); + Utilities::MPI::sum(n_locally_owned_dofs, + triangulation->get_communicator()); + + NumberCache number_cache; + number_cache.n_global_dofs = n_global_dofs; + number_cache.n_locally_owned_dofs = n_locally_owned_dofs; + number_cache.locally_owned_dofs = IndexSet(n_global_dofs); + number_cache.locally_owned_dofs.add_range(my_shift, + my_shift + + n_locally_owned_dofs); + number_cache.locally_owned_dofs.compress(); // this ends the phase where we enumerate degrees of freedom on // each processor. what is missing is communicating DoF indices @@ -5305,10 +5271,6 @@ namespace internal "Triangulation if the flag construct_multigrid_hierarchy " "is set in the constructor.")); - - const unsigned int n_cpus = - Utilities::MPI::n_mpi_processes(triangulation->get_communicator()); - // loop over all levels that exist globally (across all // processors), even if the current processor does not in fact // have any cells on that level or if the local part of the @@ -5365,8 +5327,6 @@ namespace internal } } - // TODO: make this code simpler with the new constructors of - // NumberCache make indices consecutive level_number_cache.n_locally_owned_dofs = 0; for (types::global_dof_index &index : renumbering) if (index != numbers::invalid_dof_index) @@ -5374,27 +5334,20 @@ namespace internal //* 3. communicate local dofcount and shift ids to make // them unique - level_number_cache.n_locally_owned_dofs_per_processor.resize( - n_cpus); - - int ierr = MPI_Allgather( - &level_number_cache.n_locally_owned_dofs, - 1, - DEAL_II_DOF_INDEX_MPI_TYPE, - level_number_cache.n_locally_owned_dofs_per_processor.data(), - 1, - DEAL_II_DOF_INDEX_MPI_TYPE, - triangulation->get_communicator()); + dealii::types::global_dof_index my_shift = 0; + const int ierr = + MPI_Exscan(DEAL_II_MPI_CONST_CAST( + &level_number_cache.n_locally_owned_dofs), + &my_shift, + 1, + DEAL_II_DOF_INDEX_MPI_TYPE, + MPI_SUM, + triangulation->get_communicator()); AssertThrowMPI(ierr); - const dealii::types::global_dof_index shift = std::accumulate( - level_number_cache.n_locally_owned_dofs_per_processor.begin(), - level_number_cache.n_locally_owned_dofs_per_processor.begin() + - triangulation->locally_owned_subdomain(), - static_cast(0)); for (types::global_dof_index &index : renumbering) if (index != numbers::invalid_dof_index) - index += shift; + index += my_shift; // now re-enumerate all dofs to this shifted and condensed // numbering form. we renumber some dofs as invalid, so @@ -5409,49 +5362,16 @@ namespace internal renumbering, IndexSet(0), *dof_handler, level, false); // now a little bit of housekeeping - level_number_cache.n_global_dofs = std::accumulate( - level_number_cache.n_locally_owned_dofs_per_processor.begin(), - level_number_cache.n_locally_owned_dofs_per_processor.end(), - static_cast(0)); + level_number_cache.n_global_dofs = + Utilities::MPI::sum(level_number_cache.n_locally_owned_dofs, + triangulation->get_communicator()); level_number_cache.locally_owned_dofs = IndexSet(level_number_cache.n_global_dofs); level_number_cache.locally_owned_dofs.add_range( - shift, shift + level_number_cache.n_locally_owned_dofs); + my_shift, my_shift + level_number_cache.n_locally_owned_dofs); level_number_cache.locally_owned_dofs.compress(); - // fill global_dof_indexsets - level_number_cache.locally_owned_dofs_per_processor.resize(n_cpus); - { - dealii::types::global_dof_index current_shift = 0; - for (unsigned int i = 0; i < n_cpus; ++i) - { - level_number_cache.locally_owned_dofs_per_processor[i] = - IndexSet(level_number_cache.n_global_dofs); - level_number_cache.locally_owned_dofs_per_processor[i] - .add_range(current_shift, - current_shift + - level_number_cache - .n_locally_owned_dofs_per_processor[i]); - current_shift += - level_number_cache.n_locally_owned_dofs_per_processor[i]; - } - } - Assert(level_number_cache - .locally_owned_dofs_per_processor - [triangulation->locally_owned_subdomain()] - .n_elements() == level_number_cache.n_locally_owned_dofs, - ExcInternalError()); - Assert(!level_number_cache - .locally_owned_dofs_per_processor - [triangulation->locally_owned_subdomain()] - .n_elements() || - level_number_cache - .locally_owned_dofs_per_processor - [triangulation->locally_owned_subdomain()] - .nth_index_in_set(0) == shift, - ExcInternalError()); - number_caches.emplace_back(level_number_cache); } @@ -5679,11 +5599,12 @@ namespace internal *dof_handler, /*check_validity=*/false); - // Since we have not updated the number cache yet, we can use the - // index sets contained in the DoFHandler at this stage. - return NumberCache(dof_handler->locally_owned_dofs_per_processor(), - Utilities::MPI::this_mpi_process( - triangulation->get_communicator())); + NumberCache number_cache; + number_cache.locally_owned_dofs = dof_handler->locally_owned_dofs(); + number_cache.n_global_dofs = dof_handler->n_dofs(); + number_cache.n_locally_owned_dofs = + number_cache.locally_owned_dofs.n_elements(); + return number_cache; } else { @@ -5809,103 +5730,12 @@ namespace internal triangulation->load_user_flags(user_flags); } - // the last step is to update the NumberCache, including knowing - // which processor owns which DoF index. this requires - // communication. - // - // this step is substantially more complicated than it is in - // distribute_dofs() in case the IndexSets of locally owned DoFs - // after renumbering are not contiguous any more (which we have done - // at the top of this function). for distribute_dofs() it was enough - // to exchange the starting indices for each processor and the - // global number of DoFs, but here we actually have to serialize the - // IndexSet objects and shop them across the network. - const unsigned int n_cpus = Utilities::MPI::n_mpi_processes( - triangulation->get_communicator()); - std::vector locally_owned_dofs_per_processor( - n_cpus, IndexSet(dof_handler->n_dofs())); - // serialize our own IndexSet - std::vector my_data; - { -# ifdef DEAL_II_WITH_ZLIB - - boost::iostreams::filtering_ostream out; - out.push( - boost::iostreams::gzip_compressor(boost::iostreams::gzip_params( - boost::iostreams::gzip::best_compression))); - out.push(boost::iostreams::back_inserter(my_data)); - - boost::archive::binary_oarchive archive(out); - - archive << my_locally_owned_new_dof_indices; - out.flush(); -# else - std::ostringstream out; - boost::archive::binary_oarchive archive(out); - archive << my_locally_owned_new_dof_indices; - const std::string &s = out.str(); - my_data.reserve(s.size()); - my_data.assign(s.begin(), s.end()); -# endif - } - - // determine maximum size of IndexSet - const unsigned int max_size = - Utilities::MPI::max(my_data.size(), - triangulation->get_communicator()); - - // as the MPI_Allgather call will be reading max_size elements, and - // as this may be past the end of my_data, we need to increase the - // size of the local buffer. This is filled with zeros. - my_data.resize(max_size); - - std::vector buffer(max_size * n_cpus); - const int ierr = MPI_Allgather(my_data.data(), - max_size, - MPI_BYTE, - buffer.data(), - max_size, - MPI_BYTE, - triangulation->get_communicator()); - AssertThrowMPI(ierr); - - for (unsigned int i = 0; i < n_cpus; ++i) - if (i == Utilities::MPI::this_mpi_process( - triangulation->get_communicator())) - locally_owned_dofs_per_processor[i] = - my_locally_owned_new_dof_indices; - else - { - // copy the data previously received into a stringstream - // object and then read the IndexSet from it - std::string decompressed_buffer; - - // first decompress the buffer - { -# ifdef DEAL_II_WITH_ZLIB - - boost::iostreams::filtering_ostream decompressing_stream; - decompressing_stream.push( - boost::iostreams::gzip_decompressor()); - decompressing_stream.push( - boost::iostreams::back_inserter(decompressed_buffer)); - - decompressing_stream.write(&buffer[i * max_size], max_size); -# else - decompressed_buffer.assign(&buffer[i * max_size], max_size); -# endif - } - - // then restore the object from the buffer - std::istringstream in(decompressed_buffer); - boost::archive::binary_iarchive archive(in); - - archive >> locally_owned_dofs_per_processor[i]; - } - - return NumberCache(locally_owned_dofs_per_processor, - Utilities::MPI::this_mpi_process( - triangulation->get_communicator())); + NumberCache number_cache; + number_cache.locally_owned_dofs = my_locally_owned_new_dof_indices; + number_cache.n_global_dofs = dof_handler->n_dofs(); + number_cache.n_locally_owned_dofs = + number_cache.locally_owned_dofs.n_elements(); + return number_cache; } #endif } @@ -5921,8 +5751,7 @@ namespace internal // we only implement the case where the multigrid numbers are // renumbered within the processor's partition, rather than the most // general case - const std::vector &index_sets = - dof_handler->locally_owned_mg_dofs_per_processor(level); + const IndexSet index_set = dof_handler->locally_owned_mg_dofs(level); constexpr int dim = DoFHandlerType::dimension; constexpr int spacedim = DoFHandlerType::space_dimension; @@ -5938,7 +5767,7 @@ namespace internal # ifdef DEBUG for (types::global_dof_index i : new_numbers) { - Assert(index_sets[my_rank].is_element(i), + Assert(index_set.is_element(i), ExcNotImplemented( "Renumberings that change the locally owned mg dofs " "partitioning are currently not implemented for " @@ -5956,7 +5785,7 @@ namespace internal std::vector ghosted_new_numbers( relevant_dofs.n_elements()); { - Utilities::MPI::Partitioner partitioner(index_sets[my_rank], + Utilities::MPI::Partitioner partitioner(index_set, relevant_dofs, tr->get_communicator()); std::vector temp_array( @@ -6018,8 +5847,12 @@ namespace internal Assert(false, ExcNotImplemented()); #endif - return NumberCache( - index_sets, Utilities::MPI::this_mpi_process(tr->get_communicator())); + NumberCache number_cache; + number_cache.locally_owned_dofs = index_set; + number_cache.n_global_dofs = dof_handler->n_dofs(); + number_cache.n_locally_owned_dofs = + number_cache.locally_owned_dofs.n_elements(); + return number_cache; } } // namespace Policy } // namespace DoFHandlerImplementation diff --git a/source/dofs/dof_renumbering.cc b/source/dofs/dof_renumbering.cc index 6aff386c27..777250c0b1 100644 --- a/source/dofs/dof_renumbering.cc +++ b/source/dofs/dof_renumbering.cc @@ -877,39 +877,26 @@ namespace DoFRenumbering for (unsigned int c = 0; c < n_buckets; ++c) local_dof_count[c] = component_to_dof_map[c].size(); - - // gather information from all CPUs - std::vector all_dof_counts( - fe_collection.n_components() * - Utilities::MPI::n_mpi_processes(tria->get_communicator())); - - const int ierr = MPI_Allgather(local_dof_count.data(), - n_buckets, - DEAL_II_DOF_INDEX_MPI_TYPE, - all_dof_counts.data(), - n_buckets, - DEAL_II_DOF_INDEX_MPI_TYPE, - tria->get_communicator()); + std::vector prefix_dof_count(n_buckets); + const int ierr = MPI_Exscan(local_dof_count.data(), + prefix_dof_count.data(), + n_buckets, + DEAL_II_DOF_INDEX_MPI_TYPE, + MPI_SUM, + tria->get_communicator()); AssertThrowMPI(ierr); - for (unsigned int i = 0; i < n_buckets; ++i) - Assert( - all_dof_counts[n_buckets * tria->locally_owned_subdomain() + i] == - local_dof_count[i], - ExcInternalError()); + std::vector global_dof_count(n_buckets); + Utilities::MPI::sum(local_dof_count, + tria->get_communicator(), + global_dof_count); // calculate shifts - unsigned int cumulated = 0; + types::global_dof_index cumulated = 0; for (unsigned int c = 0; c < n_buckets; ++c) { - shifts[c] = cumulated; - for (types::subdomain_id i = 0; i < tria->locally_owned_subdomain(); - ++i) - shifts[c] += all_dof_counts[c + n_buckets * i]; - for (unsigned int i = 0; - i < Utilities::MPI::n_mpi_processes(tria->get_communicator()); - ++i) - cumulated += all_dof_counts[c + n_buckets * i]; + shifts[c] = prefix_dof_count[c] + cumulated; + cumulated += global_dof_count[c]; } #else (void)tria; @@ -1193,39 +1180,26 @@ namespace DoFRenumbering for (unsigned int c = 0; c < n_buckets; ++c) local_dof_count[c] = block_to_dof_map[c].size(); - - // gather information from all CPUs - std::vector all_dof_counts( - fe_collection.n_components() * - Utilities::MPI::n_mpi_processes(tria->get_communicator())); - - const int ierr = MPI_Allgather(local_dof_count.data(), - n_buckets, - DEAL_II_DOF_INDEX_MPI_TYPE, - all_dof_counts.data(), - n_buckets, - DEAL_II_DOF_INDEX_MPI_TYPE, - tria->get_communicator()); + std::vector prefix_dof_count(n_buckets); + const int ierr = MPI_Exscan(local_dof_count.data(), + prefix_dof_count.data(), + n_buckets, + DEAL_II_DOF_INDEX_MPI_TYPE, + MPI_SUM, + tria->get_communicator()); AssertThrowMPI(ierr); - for (unsigned int i = 0; i < n_buckets; ++i) - Assert( - all_dof_counts[n_buckets * tria->locally_owned_subdomain() + i] == - local_dof_count[i], - ExcInternalError()); + std::vector global_dof_count(n_buckets); + Utilities::MPI::sum(local_dof_count, + tria->get_communicator(), + global_dof_count); // calculate shifts types::global_dof_index cumulated = 0; for (unsigned int c = 0; c < n_buckets; ++c) { - shifts[c] = cumulated; - for (types::subdomain_id i = 0; i < tria->locally_owned_subdomain(); - ++i) - shifts[c] += all_dof_counts[c + n_buckets * i]; - for (unsigned int i = 0; - i < Utilities::MPI::n_mpi_processes(tria->get_communicator()); - ++i) - cumulated += all_dof_counts[c + n_buckets * i]; + shifts[c] = prefix_dof_count[c] + cumulated; + cumulated += global_dof_count[c]; } #else (void)tria; diff --git a/source/dofs/number_cache.cc b/source/dofs/number_cache.cc index 6deff5e034..5d5aa0125e 100644 --- a/source/dofs/number_cache.cc +++ b/source/dofs/number_cache.cc @@ -14,6 +14,7 @@ // --------------------------------------------------------------------- #include +#include #include @@ -80,6 +81,154 @@ namespace internal locally_owned_dofs_per_processor.clear(); } + + + std::vector + NumberCache::get_n_locally_owned_dofs_per_processor( + const MPI_Comm mpi_communicator) const + { + const unsigned int n_procs = + Utilities::MPI::job_supports_mpi() ? + Utilities::MPI::n_mpi_processes(mpi_communicator) : + 1; + if (n_global_dofs == 0) + return std::vector(); + else if (n_locally_owned_dofs_per_processor.empty() == false) + { + AssertDimension(n_locally_owned_dofs_per_processor.size(), n_procs); + return n_locally_owned_dofs_per_processor; + } + else + { + std::vector result(n_procs, + n_locally_owned_dofs); +#ifdef DEAL_II_WITH_MPI + if (n_procs > 1) + MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs), + 1, + DEAL_II_DOF_INDEX_MPI_TYPE, + result.data(), + 1, + DEAL_II_DOF_INDEX_MPI_TYPE, + mpi_communicator); +#endif + return result; + } + } + + + + std::vector + NumberCache::get_locally_owned_dofs_per_processor( + const MPI_Comm mpi_communicator) const + { + AssertDimension(locally_owned_dofs.size(), n_global_dofs); + const unsigned int n_procs = + Utilities::MPI::job_supports_mpi() ? + Utilities::MPI::n_mpi_processes(mpi_communicator) : + 1; + if (n_global_dofs == 0) + return std::vector(); + else if (locally_owned_dofs_per_processor.empty() == false) + { + AssertDimension(locally_owned_dofs_per_processor.size(), n_procs); + return locally_owned_dofs_per_processor; + } + else + { + std::vector locally_owned_dofs_per_processor( + n_procs, locally_owned_dofs); + +#ifdef DEAL_II_WITH_MPI + if (n_procs > 1) + { + // this step is substantially more complicated because indices + // might be distributed arbitrarily among the processors. Here we + // have to serialize the IndexSet objects and shop them across the + // network. + std::vector my_data; + { +# ifdef DEAL_II_WITH_ZLIB + + boost::iostreams::filtering_ostream out; + out.push(boost::iostreams::gzip_compressor( + boost::iostreams::gzip_params( + boost::iostreams::gzip::best_compression))); + out.push(boost::iostreams::back_inserter(my_data)); + + boost::archive::binary_oarchive archive(out); + + archive << locally_owned_dofs; + out.flush(); +# else + std::ostringstream out; + boost::archive::binary_oarchive archive(out); + archive << locally_owned_dofs; + const std::string &s = out.str(); + my_data.reserve(s.size()); + my_data.assign(s.begin(), s.end()); +# endif + } + + // determine maximum size of IndexSet + const unsigned int max_size = + Utilities::MPI::max(my_data.size(), mpi_communicator); + + // as the MPI_Allgather call will be reading max_size elements, + // and as this may be past the end of my_data, we need to increase + // the size of the local buffer. This is filled with zeros. + my_data.resize(max_size); + + std::vector buffer(max_size * n_procs); + const int ierr = MPI_Allgather(my_data.data(), + max_size, + MPI_BYTE, + buffer.data(), + max_size, + MPI_BYTE, + mpi_communicator); + AssertThrowMPI(ierr); + + for (unsigned int i = 0; i < n_procs; ++i) + if (i == Utilities::MPI::this_mpi_process(mpi_communicator)) + locally_owned_dofs_per_processor[i] = locally_owned_dofs; + else + { + // copy the data previously received into a stringstream + // object and then read the IndexSet from it + std::string decompressed_buffer; + + // first decompress the buffer + { +# ifdef DEAL_II_WITH_ZLIB + + boost::iostreams::filtering_ostream decompressing_stream; + decompressing_stream.push( + boost::iostreams::gzip_decompressor()); + decompressing_stream.push( + boost::iostreams::back_inserter(decompressed_buffer)); + + decompressing_stream.write(&buffer[i * max_size], + max_size); +# else + decompressed_buffer.assign(&buffer[i * max_size], + max_size); +# endif + } + + // then restore the object from the buffer + std::istringstream in(decompressed_buffer); + boost::archive::binary_iarchive archive(in); + + archive >> locally_owned_dofs_per_processor[i]; + } + } +#endif + return locally_owned_dofs_per_processor; + } + } + + std::size_t NumberCache::memory_consumption() const { diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index 7e1d8b70c8..3f28240a68 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -2347,24 +2347,14 @@ namespace GridTools // Make indices global by getting the number of vertices owned by each // processors and shifting the indices accordingly - const unsigned int n_cpu = - Utilities::MPI::n_mpi_processes(triangulation.get_communicator()); - std::vector indices(n_cpu); - int ierr = MPI_Allgather(&next_index, - 1, - DEAL_II_VERTEX_INDEX_MPI_TYPE, - indices.data(), - 1, - DEAL_II_VERTEX_INDEX_MPI_TYPE, - triangulation.get_communicator()); + types::global_dof_index shift = 0; + int ierr = MPI_Exscan(&next_index, + &shift, + 1, + DEAL_II_VERTEX_INDEX_MPI_TYPE, + MPI_SUM, + triangulation.get_communicator()); AssertThrowMPI(ierr); - Assert(indices.begin() + triangulation.locally_owned_subdomain() < - indices.end(), - ExcInternalError()); - const types::global_vertex_index shift = - std::accumulate(indices.begin(), - indices.begin() + triangulation.locally_owned_subdomain(), - types::global_vertex_index(0)); std::map::iterator global_index_it = local_to_global_vertex_index.begin(), diff --git a/source/multigrid/mg_transfer_internal.cc b/source/multigrid/mg_transfer_internal.cc index 9a0f3b1753..34fa8a6c08 100644 --- a/source/multigrid/mg_transfer_internal.cc +++ b/source/multigrid/mg_transfer_internal.cc @@ -187,6 +187,12 @@ namespace internal // The list of neighbors is symmetric (our neighbors have us as a // neighbor), so we can use it to send and to know how many messages // we will get. + std::vector> + locally_owned_mg_dofs_per_processor; + for (unsigned int l = 0; l < tria->n_global_levels(); ++l) + locally_owned_mg_dofs_per_processor.push_back( + mg_dof.locally_owned_mg_dofs_per_processor(l)); + const std::set &neighbors = tria->level_ghost_owners(); std::map> send_data; @@ -198,8 +204,7 @@ namespace internal std::set::iterator it; for (it = neighbors.begin(); it != neighbors.end(); ++it) { - if (mg_dof - .locally_owned_mg_dofs_per_processor(dofpair.level)[*it] + if (locally_owned_mg_dofs_per_processor[dofpair.level][*it] .is_element(dofpair.level_dof_index)) { send_data[*it].push_back(dofpair); diff --git a/source/multigrid/mg_transfer_prebuilt.cc b/source/multigrid/mg_transfer_prebuilt.cc index 6f8388bfe7..0217ff46d4 100644 --- a/source/multigrid/mg_transfer_prebuilt.cc +++ b/source/multigrid/mg_transfer_prebuilt.cc @@ -283,7 +283,7 @@ MGTransferPrebuilt::build_matrices( // Compute # of locally owned MG dofs / processor for distribution const std::vector<::dealii::IndexSet> - &locally_owned_mg_dofs_per_processor = + locally_owned_mg_dofs_per_processor = mg_dof.locally_owned_mg_dofs_per_processor(level + 1); std::vector<::dealii::types::global_dof_index> n_locally_owned_mg_dofs_per_processor( diff --git a/tests/sharedtria/no_cells_01.cc b/tests/sharedtria/no_cells_01.cc index b476f4efed..6087a92294 100644 --- a/tests/sharedtria/no_cells_01.cc +++ b/tests/sharedtria/no_cells_01.cc @@ -73,7 +73,7 @@ test() << std::endl; deallog << "n_locally_owned_dofs_per_processor: "; - std::vector v = + const std::vector v = dof_handler.n_locally_owned_dofs_per_processor(); unsigned int sum = 0; for (unsigned int i = 0; i < v.size(); ++i) @@ -97,21 +97,19 @@ test() const unsigned int N = dof_handler.n_dofs(); Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError()); - Assert( - std::accumulate(dof_handler.n_locally_owned_dofs_per_processor().begin(), - dof_handler.n_locally_owned_dofs_per_processor().end(), - 0U) == N, - ExcInternalError()); + const std::vector n_owned_dofs = + dof_handler.n_locally_owned_dofs_per_processor(); + Assert(std::accumulate(n_owned_dofs.begin(), n_owned_dofs.end(), 0U) == N, + ExcInternalError()); + const std::vector owned_dofs = + dof_handler.locally_owned_dofs_per_processor(); IndexSet all(N); - for (unsigned int i = 0; - i < dof_handler.locally_owned_dofs_per_processor().size(); - ++i) + for (unsigned int i = 0; i < owned_dofs.size(); ++i) { - IndexSet intersect = - all & dof_handler.locally_owned_dofs_per_processor()[i]; + IndexSet intersect = all & owned_dofs[i]; Assert(intersect.n_elements() == 0, ExcInternalError()); - all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]); + all.add_indices(owned_dofs[i]); } Assert(all == complete_index_set(N), ExcInternalError()); -- 2.39.5