From: Peter Munch Date: Thu, 23 Apr 2020 08:01:38 +0000 (+0200) Subject: Remove parallel::TriangulationBase::compute_n_locally_owned_active_cells_per_processor X-Git-Tag: v9.2.0-rc1~153^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=78bdbb1a1eacfda424d19d62819a09c51ea4145f;p=dealii.git Remove parallel::TriangulationBase::compute_n_locally_owned_active_cells_per_processor --- diff --git a/examples/step-55/step-55.cc b/examples/step-55/step-55.cc index 419ad7cfd6..ebc29436fa 100644 --- a/examples/step-55/step-55.cc +++ b/examples/step-55/step-55.cc @@ -456,7 +456,8 @@ namespace Step55 dof_handler, coupling, dsp, constraints, false); SparsityTools::distribute_sparsity_pattern( dsp, - dof_handler.compute_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(mpi_communicator, + dof_handler.locally_owned_dofs()), mpi_communicator, locally_relevant_dofs); preconditioner_matrix.reinit(owned_partitioning, diff --git a/include/deal.II/base/mpi.h b/include/deal.II/base/mpi.h index 589260d15c..3c23e0f1fd 100644 --- a/include/deal.II/base/mpi.h +++ b/include/deal.II/base/mpi.h @@ -1200,6 +1200,9 @@ namespace Utilities std::vector all_gather(const MPI_Comm &comm, const T &object) { + if (job_supports_mpi() == false) + return {object}; + # ifndef DEAL_II_WITH_MPI (void)comm; std::vector v(1, object); diff --git a/include/deal.II/distributed/tria_base.h b/include/deal.II/distributed/tria_base.h index 6b6efd4b38..4b9bcb0341 100644 --- a/include/deal.II/distributed/tria_base.h +++ b/include/deal.II/distributed/tria_base.h @@ -114,17 +114,6 @@ namespace parallel copy_triangulation( const dealii::Triangulation &old_tria) override; - /** - * Return the number of active cells owned by each of the MPI processes - * that contribute to this triangulation. The element of this vector - * indexed by locally_owned_subdomain() equals the result of - * n_locally_owned_active_cells(). - * - * @note This function involves global communication! - */ - std::vector - compute_n_locally_owned_active_cells_per_processor() const; - /** * Return the number of active cells in the triangulation that are locally * owned, i.e. that have a subdomain_id equal to diff --git a/include/deal.II/dofs/dof_handler.h b/include/deal.II/dofs/dof_handler.h index db99c0dd48..2243ee16da 100644 --- a/include/deal.II/dofs/dof_handler.h +++ b/include/deal.II/dofs/dof_handler.h @@ -1014,78 +1014,17 @@ public: const IndexSet & locally_owned_mg_dofs(const unsigned int level) const; - /** - * Compute a vector with the locally owned DoFs of each processor. - * - * This function involves global communication via the @p MPI_Allgather - * function, so it must be called on all processors participating in the MPI - * communicator underlying the triangulation. - * - * If you are only interested in the number of elements each processor owns - * then compute_n_locally_owned_dofs_per_processor() is a better choice. - * - * If this is a sequential DoFHandler, then the vector has a single element - * that equals the IndexSet representing the entire range [0,n_dofs()]. (Here, - * "sequential" means that either the whole program does not use MPI, or that - * it uses MPI but only uses a single MPI process, or that there are multiple - * MPI processes but the Triangulation on which this DoFHandler builds works - * only on one MPI process.) - */ - std::vector - compute_locally_owned_dofs_per_processor() const; - - /** - * Compute a vector with the number of degrees of freedom each - * processor that participates in this triangulation owns locally. The sum - * of all these numbers equals the number of degrees of freedom that exist - * globally, i.e. what n_dofs() returns. - * - * This function involves global communication via the @p MPI_Allgather - * function, so it must be called on all processors participating in the MPI - * communicator underlying the triangulation. - * - * Each element of the vector returned by this function equals the number of - * elements of the corresponding sets returned by - * compute_locally_owned_dofs_per_processor(). - * - * If this is a sequential DoFHandler, then the vector has a single element - * equal to n_dofs(). (Here, "sequential" means that either the whole program - * does not use MPI, or that it uses MPI but only uses a single MPI process, - * or that there are multiple MPI processes but the Triangulation on which - * this DoFHandler builds works only on one MPI process.) - */ - std::vector - compute_n_locally_owned_dofs_per_processor() const; - - /** - * Compute a vector with the locally owned DoFs of each processor on - * the given level @p level for geometric multigrid. - * - * This function involves global communication via the @p MPI_Allgather - * function, so it must be called on all processors participating in the MPI - * communicator underlying the triangulation. - * - * If this is a sequential DoFHandler, then the vector has a single element - * that equals the IndexSet representing the entire range [0,n_dofs()]. (Here, - * "sequential" means that either the whole program does not use MPI, or that - * it uses MPI but only uses a single MPI process, or that there are multiple - * MPI processes but the Triangulation on which this DoFHandler builds works - * only on one MPI process.) - */ - std::vector - compute_locally_owned_mg_dofs_per_processor(const unsigned int level) const; - /** * Return a vector that stores the locally owned DoFs of each processor. * * @deprecated As of deal.II version 9.2, we do not populate a vector with * the index sets of all processors by default any more due to a possibly * large memory footprint on many processors. As a consequence, this - * function needs to call compute_locally_owned_dofs_per_processor() upon - * the first invocation, including global communication. Use - * compute_locally_owned_dofs_per_processor() instead if using up to a few - * thousands of MPI ranks or some variant involving local communication with - * more processors. + * function needs to call `Utilities::all_gather(comm, locally_owned_dofs())` + * upon the first invocation, including global communication. Use + * `Utilities::all_gather(comm, dof_handler.locally_owned_dofs())` instead if + * using up to a few thousands of MPI ranks or some variant involving local + * communication with more processors. */ DEAL_II_DEPRECATED const std::vector & locally_owned_dofs_per_processor() const; @@ -1099,11 +1038,12 @@ public: * @deprecated As of deal.II version 9.2, we do not populate a vector with * the numbers of dofs of all processors by default any more due to a * possibly large memory footprint on many processors. As a consequence, - * this function needs to call compute_n_locally_owned_dofs_per_processor() - * upon the first invocation, including global communication. Use - * compute_n_locally_owned_dofs_per_processor() instead if using up to a few - * thousands of MPI ranks or some variant involving local communication with - * more processors. + * this function needs to call `Utilities::all_gather(comm, + * n_locally_owned_dofs()` upon the first invocation, including global + * communication. Use `Utilities::all_gather(comm, + * dof_handler.n_locally_owned_dofs()` instead if using up to a few thousands + * of MPI ranks or some variant involving local communication with more + * processors. */ DEAL_II_DEPRECATED const std::vector & n_locally_owned_dofs_per_processor() const; @@ -1115,9 +1055,10 @@ public: * @deprecated As of deal.II version 9.2, we do not populate a vector with * the index sets of all processors by default any more due to a possibly * large memory footprint on many processors. As a consequence, this - * function needs to call compute_locally_owned_dofs_mg_per_processor() upon - * the first invocation, including global communication. Use - * compute_locally_owned_mg_dofs_per_processor() instead if using up to a few + * function needs to call `Utilities::all_gather(comm, + * locally_owned_dofs_mg())` upon the first invocation, including global + * communication. Use `Utilities::all_gather(comm, + * dof_handler.locally_owned_dofs_mg())` instead if using up to a few * thousands of MPI ranks or some variant involving local communication with * more processors. */ @@ -1508,10 +1449,20 @@ DoFHandler::n_locally_owned_dofs_per_processor() const if (number_cache.n_locally_owned_dofs_per_processor.empty() && number_cache.n_global_dofs > 0) { + MPI_Comm comm; + + const parallel::TriangulationBase *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + comm = tr->get_communicator(); + else + comm = MPI_COMM_SELF; + const_cast( number_cache) .n_locally_owned_dofs_per_processor = - compute_n_locally_owned_dofs_per_processor(); + number_cache.get_n_locally_owned_dofs_per_processor(comm); } return number_cache.n_locally_owned_dofs_per_processor; } @@ -1525,10 +1476,20 @@ DoFHandler::locally_owned_dofs_per_processor() const if (number_cache.locally_owned_dofs_per_processor.empty() && number_cache.n_global_dofs > 0) { + MPI_Comm comm; + + const parallel::TriangulationBase *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + comm = tr->get_communicator(); + else + comm = MPI_COMM_SELF; + const_cast( number_cache) .locally_owned_dofs_per_processor = - compute_locally_owned_dofs_per_processor(); + number_cache.get_locally_owned_dofs_per_processor(comm); } return number_cache.locally_owned_dofs_per_processor; } @@ -1550,73 +1511,26 @@ DoFHandler::locally_owned_mg_dofs_per_processor( if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() && mg_number_cache[level].n_global_dofs > 0) { + MPI_Comm comm; + + const parallel::TriangulationBase *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + comm = tr->get_communicator(); + else + comm = MPI_COMM_SELF; + const_cast( mg_number_cache[level]) .locally_owned_dofs_per_processor = - compute_locally_owned_mg_dofs_per_processor(level); + mg_number_cache[level].get_locally_owned_dofs_per_processor(comm); } return mg_number_cache[level].locally_owned_dofs_per_processor; } -template -std::vector -DoFHandler::compute_n_locally_owned_dofs_per_processor() const -{ - const parallel::TriangulationBase *tr = - (dynamic_cast *>( - &this->get_triangulation())); - if (tr != nullptr) - return number_cache.get_n_locally_owned_dofs_per_processor( - tr->get_communicator()); - else - return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF); -} - - - -template -std::vector -DoFHandler::compute_locally_owned_dofs_per_processor() const -{ - const parallel::TriangulationBase *tr = - (dynamic_cast *>( - &this->get_triangulation())); - if (tr != nullptr) - return number_cache.get_locally_owned_dofs_per_processor( - tr->get_communicator()); - else - return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF); -} - - - -template -std::vector -DoFHandler::compute_locally_owned_mg_dofs_per_processor( - const unsigned int level) const -{ - Assert(level < this->get_triangulation().n_global_levels(), - ExcMessage("The given level index exceeds the number of levels " - "present in the triangulation")); - Assert( - mg_number_cache.size() == this->get_triangulation().n_global_levels(), - ExcMessage( - "The level dofs are not set up properly! Did you call distribute_mg_dofs()?")); - const parallel::TriangulationBase *tr = - (dynamic_cast *>( - &this->get_triangulation())); - if (tr != nullptr) - return mg_number_cache[level].get_locally_owned_dofs_per_processor( - tr->get_communicator()); - else - return mg_number_cache[level].get_locally_owned_dofs_per_processor( - MPI_COMM_SELF); -} - - - template inline const FiniteElement & DoFHandler::get_fe(const unsigned int index) const diff --git a/include/deal.II/hp/dof_handler.h b/include/deal.II/hp/dof_handler.h index a6514f9447..4f7dad9058 100644 --- a/include/deal.II/hp/dof_handler.h +++ b/include/deal.II/hp/dof_handler.h @@ -821,60 +821,18 @@ namespace hp const IndexSet & locally_owned_dofs() const; - /** - * Compute a vector with the locally owned DoFs of each processor. - * - * This function involves global communication via the @p MPI_Allgather - * function, so it must be called on all processors participating in the MPI - * communicator underlying the triangulation. - * - * If you are only interested in the number of elements each processor owns - * then compute_n_locally_owned_dofs_per_processor() is a better choice. - * - * If this is a sequential DoFHandler, then the vector has a single element - * that equals the IndexSet representing the entire range [0,n_dofs()]. - * (Here, "sequential" means that either the whole program does not use MPI, - * or that it uses MPI but only uses a single MPI process, or that there are - * multiple MPI processes but the Triangulation on which this DoFHandler - * builds works only on one MPI process.) - */ - std::vector - compute_locally_owned_dofs_per_processor() const; - - /** - * Compute a vector with the number of degrees of freedom each - * processor that participates in this triangulation owns locally. The sum - * of all these numbers equals the number of degrees of freedom that exist - * globally, i.e. what n_dofs() returns. - * - * This function involves global communication via the @p MPI_Allgather - * function, so it must be called on all processors participating in the MPI - * communicator underlying the triangulation. - * - * Each element of the vector returned by this function equals the number of - * elements of the corresponding sets returned by - * compute_locally_owned_dofs_per_processor(). - * - * If this is a sequential DoFHandler, then the vector has a single element - * equal to n_dofs(). (Here, "sequential" means that either the whole - * program does not use MPI, or that it uses MPI but only uses a single MPI - * process, or that there are multiple MPI processes but the Triangulation - * on which this DoFHandler builds works only on one MPI process.) - */ - std::vector - compute_n_locally_owned_dofs_per_processor() const; - /** * Return a vector that stores the locally owned DoFs of each processor. * * @deprecated As of deal.II version 9.2, we do not populate a vector with * the index sets of all processors by default any more due to a possibly * large memory footprint on many processors. As a consequence, this - * function needs to call compute_locally_owned_dofs_per_processor() upon - * the first invocation, including global communication. Use - * compute_locally_owned_dofs_per_processor() instead if using up to a few - * thousands of MPI ranks or some variant involving local communication with - * more processors. + * function needs to call `Utilities::all_gather(comm, + * locally_owned_dofs())` upon the first invocation, including global + * communication. Use `Utilities::all_gather(comm, + * dof_handler.locally_owned_dofs())` instead if using up to a few thousands + * of MPI ranks or some variant involving local communication with more + * processors. */ DEAL_II_DEPRECATED const std::vector & locally_owned_dofs_per_processor() const; @@ -888,9 +846,10 @@ namespace hp * @deprecated As of deal.II version 9.2, we do not populate a vector with * the numbers of dofs of all processors by default any more due to a * possibly large memory footprint on many processors. As a consequence, - * this function needs to call compute_n_locally_owned_dofs_per_processor() - * upon the first invocation, including global communication. Use - * compute_n_locally_owned_dofs_per_processor() instead if using up to a few + * this function needs to call `Utilities::all_gather(comm, + * n_locally_owned_dofs()` upon the first invocation, including global + * communication. Use `Utilities::all_gather(comm, + * dof_handler.n_locally_owned_dofs()` instead if using up to a few * thousands of MPI ranks or some variant involving local communication with * more processors. */ @@ -906,24 +865,6 @@ namespace hp const IndexSet & locally_owned_mg_dofs(const unsigned int level) const; - /** - * Compute a vector with the locally owned DoFs of each processor on - * the given level @p level for geometric multigrid. - * - * This function involves global communication via the @p MPI_Allgather - * function, so it must be called on all processors participating in the MPI - * communicator underlying the triangulation. - * - * If this is a sequential DoFHandler, then the vector has a single element - * that equals the IndexSet representing the entire range [0,n_dofs()]. - * (Here, "sequential" means that either the whole program does not use MPI, - * or that it uses MPI but only uses a single MPI process, or that there are - * multiple MPI processes but the Triangulation on which this DoFHandler - * builds works only on one MPI process.) - */ - std::vector - compute_locally_owned_mg_dofs_per_processor(const unsigned int level) const; - /** * Return a vector that stores the locally owned DoFs of each processor on * the given level @p level. @@ -931,11 +872,12 @@ namespace hp * @deprecated As of deal.II version 9.2, we do not populate a vector with * the index sets of all processors by default any more due to a possibly * large memory footprint on many processors. As a consequence, this - * function needs to call compute_locally_owned_dofs_mg_per_processor() upon - * the first invocation, including global communication. Use - * compute_locally_owned_mg_dofs_per_processor() instead if using up to a - * few thousands of MPI ranks or some variant involving local communication - * with more processors. + * function needs to call `Utilities::all_gather(comm, + * locally_owned_dofs_mg())` upon the first invocation, including global + * communication. Use `Utilities::all_gather(comm, + * dof_handler.locally_owned_dofs_mg())` instead if using up to a few + * thousands of MPI ranks or some variant involving local communication with + * more processors. */ DEAL_II_DEPRECATED const std::vector & locally_owned_mg_dofs_per_processor(const unsigned int level) const; @@ -1573,10 +1515,20 @@ namespace hp if (number_cache.n_locally_owned_dofs_per_processor.empty() && number_cache.n_global_dofs > 0) { + MPI_Comm comm; + + const parallel::TriangulationBase *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + comm = tr->get_communicator(); + else + comm = MPI_COMM_SELF; + const_cast( number_cache) .n_locally_owned_dofs_per_processor = - compute_n_locally_owned_dofs_per_processor(); + number_cache.get_n_locally_owned_dofs_per_processor(comm); } return number_cache.n_locally_owned_dofs_per_processor; } @@ -1590,48 +1542,26 @@ namespace hp if (number_cache.locally_owned_dofs_per_processor.empty() && number_cache.n_global_dofs > 0) { + MPI_Comm comm; + + const parallel::TriangulationBase *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + comm = tr->get_communicator(); + else + comm = MPI_COMM_SELF; + const_cast( number_cache) .locally_owned_dofs_per_processor = - compute_locally_owned_dofs_per_processor(); + number_cache.get_locally_owned_dofs_per_processor(comm); } return number_cache.locally_owned_dofs_per_processor; } - template - std::vector - DoFHandler::compute_n_locally_owned_dofs_per_processor() const - { - const parallel::TriangulationBase *tr = - (dynamic_cast *>( - &this->get_triangulation())); - if (tr != nullptr) - return number_cache.get_n_locally_owned_dofs_per_processor( - tr->get_communicator()); - else - return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF); - } - - - - template - std::vector - DoFHandler::compute_locally_owned_dofs_per_processor() const - { - const parallel::TriangulationBase *tr = - (dynamic_cast *>( - &this->get_triangulation())); - if (tr != nullptr) - return number_cache.get_locally_owned_dofs_per_processor( - tr->get_communicator()); - else - return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF); - } - - - template const IndexSet & DoFHandler::locally_owned_mg_dofs( @@ -1662,39 +1592,26 @@ namespace hp if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() && mg_number_cache[level].n_global_dofs > 0) { + MPI_Comm comm; + + const parallel::TriangulationBase *tr = + (dynamic_cast *>( + &this->get_triangulation())); + if (tr != nullptr) + comm = tr->get_communicator(); + else + comm = MPI_COMM_SELF; + const_cast( mg_number_cache[level]) .locally_owned_dofs_per_processor = - compute_locally_owned_mg_dofs_per_processor(level); + mg_number_cache[level].get_locally_owned_dofs_per_processor(comm); } return mg_number_cache[level].locally_owned_dofs_per_processor; } - template - std::vector - DoFHandler::compute_locally_owned_mg_dofs_per_processor( - const unsigned int level) const - { - Assert(false, ExcNotImplemented()); - (void)level; - Assert(level < this->get_triangulation().n_global_levels(), - ExcMessage("The given level index exceeds the number of levels " - "present in the triangulation")); - const parallel::TriangulationBase *tr = - (dynamic_cast *>( - &this->get_triangulation())); - if (tr != nullptr) - return mg_number_cache[level].get_locally_owned_dofs_per_processor( - tr->get_communicator()); - else - return mg_number_cache[level].get_locally_owned_dofs_per_processor( - MPI_COMM_SELF); - } - - - template inline const FiniteElement & DoFHandler::get_fe(const unsigned int number) const diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index beecfd9486..1a27650ed5 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -133,35 +133,6 @@ namespace parallel return number_cache.n_global_active_cells; } - template - std::vector - TriangulationBase:: - compute_n_locally_owned_active_cells_per_processor() const - { - ; -#ifdef DEAL_II_WITH_MPI - std::vector n_locally_owned_active_cells_per_processor( - Utilities::MPI::n_mpi_processes(this->mpi_communicator), 0); - - if (this->n_levels() > 0) - { - const int ierr = - MPI_Allgather(&number_cache.n_locally_owned_active_cells, - 1, - MPI_UNSIGNED, - n_locally_owned_active_cells_per_processor.data(), - 1, - MPI_UNSIGNED, - this->mpi_communicator); - AssertThrowMPI(ierr); - } - - return n_locally_owned_active_cells_per_processor; -#else - return {number_cache.n_locally_owned_active_cells}; -#endif - } - template const MPI_Comm & TriangulationBase::get_communicator() const diff --git a/source/dofs/number_cache.cc b/source/dofs/number_cache.cc index 42e97076df..4852862236 100644 --- a/source/dofs/number_cache.cc +++ b/source/dofs/number_cache.cc @@ -87,32 +87,20 @@ namespace internal NumberCache::get_n_locally_owned_dofs_per_processor( const MPI_Comm mpi_communicator) const { - const unsigned int n_procs = - Utilities::MPI::job_supports_mpi() ? - Utilities::MPI::n_mpi_processes(mpi_communicator) : - 1; if (n_global_dofs == 0) return std::vector(); else if (n_locally_owned_dofs_per_processor.empty() == false) { - AssertDimension(n_locally_owned_dofs_per_processor.size(), n_procs); + AssertDimension(n_locally_owned_dofs_per_processor.size(), + (Utilities::MPI::job_supports_mpi() ? + Utilities::MPI::n_mpi_processes(mpi_communicator) : + 1)); return n_locally_owned_dofs_per_processor; } else { - std::vector result(n_procs, - n_locally_owned_dofs); -#ifdef DEAL_II_WITH_MPI - if (n_procs > 1) - MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs), - 1, - DEAL_II_DOF_INDEX_MPI_TYPE, - result.data(), - 1, - DEAL_II_DOF_INDEX_MPI_TYPE, - mpi_communicator); -#endif - return result; + return Utilities::MPI::all_gather(mpi_communicator, + n_locally_owned_dofs); } } @@ -123,108 +111,20 @@ namespace internal const MPI_Comm mpi_communicator) const { AssertDimension(locally_owned_dofs.size(), n_global_dofs); - const unsigned int n_procs = - Utilities::MPI::job_supports_mpi() ? - Utilities::MPI::n_mpi_processes(mpi_communicator) : - 1; if (n_global_dofs == 0) return std::vector(); else if (locally_owned_dofs_per_processor.empty() == false) { - AssertDimension(locally_owned_dofs_per_processor.size(), n_procs); + AssertDimension(locally_owned_dofs_per_processor.size(), + (Utilities::MPI::job_supports_mpi() ? + Utilities::MPI::n_mpi_processes(mpi_communicator) : + 1)); return locally_owned_dofs_per_processor; } else { - std::vector locally_owned_dofs_per_processor( - n_procs, locally_owned_dofs); - -#ifdef DEAL_II_WITH_MPI - if (n_procs > 1) - { - // this step is substantially more complicated because indices - // might be distributed arbitrarily among the processors. Here we - // have to serialize the IndexSet objects and shop them across the - // network. - std::vector my_data; - { -# ifdef DEAL_II_WITH_ZLIB - - boost::iostreams::filtering_ostream out; - out.push(boost::iostreams::gzip_compressor( - boost::iostreams::gzip_params( - boost::iostreams::gzip::best_speed))); - out.push(boost::iostreams::back_inserter(my_data)); - - boost::archive::binary_oarchive archive(out); - - archive << locally_owned_dofs; - out.flush(); -# else - std::ostringstream out; - boost::archive::binary_oarchive archive(out); - archive << locally_owned_dofs; - const std::string &s = out.str(); - my_data.reserve(s.size()); - my_data.assign(s.begin(), s.end()); -# endif - } - - // determine maximum size of IndexSet - const unsigned int max_size = - Utilities::MPI::max(my_data.size(), mpi_communicator); - - // as the MPI_Allgather call will be reading max_size elements, - // and as this may be past the end of my_data, we need to increase - // the size of the local buffer. This is filled with zeros. - my_data.resize(max_size); - - std::vector buffer(max_size * n_procs); - const int ierr = MPI_Allgather(my_data.data(), - max_size, - MPI_BYTE, - buffer.data(), - max_size, - MPI_BYTE, - mpi_communicator); - AssertThrowMPI(ierr); - - for (unsigned int i = 0; i < n_procs; ++i) - if (i == Utilities::MPI::this_mpi_process(mpi_communicator)) - locally_owned_dofs_per_processor[i] = locally_owned_dofs; - else - { - // copy the data previously received into a stringstream - // object and then read the IndexSet from it - std::string decompressed_buffer; - - // first decompress the buffer - { -# ifdef DEAL_II_WITH_ZLIB - - boost::iostreams::filtering_ostream decompressing_stream; - decompressing_stream.push( - boost::iostreams::gzip_decompressor()); - decompressing_stream.push( - boost::iostreams::back_inserter(decompressed_buffer)); - - decompressing_stream.write(&buffer[i * max_size], - max_size); -# else - decompressed_buffer.assign(&buffer[i * max_size], - max_size); -# endif - } - - // then restore the object from the buffer - std::istringstream in(decompressed_buffer); - boost::archive::binary_iarchive archive(in); - - archive >> locally_owned_dofs_per_processor[i]; - } - } -#endif - return locally_owned_dofs_per_processor; + return Utilities::MPI::all_gather(mpi_communicator, + locally_owned_dofs); } } diff --git a/tests/distributed_grids/dof_handler_number_cache.cc b/tests/distributed_grids/dof_handler_number_cache.cc index 85626e9bbf..00c8c5d301 100644 --- a/tests/distributed_grids/dof_handler_number_cache.cc +++ b/tests/distributed_grids/dof_handler_number_cache.cc @@ -99,10 +99,12 @@ test() AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError()); AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError()); - AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() == + AssertThrow(Utilities::MPI::all_gather( + MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) == std::vector(1, N), ExcInternalError()); - AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() == + AssertThrow(Utilities::MPI::all_gather( + MPI_COMM_SELF, dof_handler.locally_owned_dofs()) == std::vector(1, all), ExcInternalError()); } diff --git a/tests/distributed_grids/hp_dof_handler_number_cache.cc b/tests/distributed_grids/hp_dof_handler_number_cache.cc index c24a60f0c5..c8e2089dbb 100644 --- a/tests/distributed_grids/hp_dof_handler_number_cache.cc +++ b/tests/distributed_grids/hp_dof_handler_number_cache.cc @@ -106,10 +106,12 @@ test() AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError()); AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError()); - AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() == + AssertThrow(Utilities::MPI::all_gather( + MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) == std::vector(1, N), ExcInternalError()); - AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() == + AssertThrow(Utilities::MPI::all_gather( + MPI_COMM_SELF, dof_handler.locally_owned_dofs()) == std::vector(1, all), ExcInternalError()); } diff --git a/tests/dofs/dof_handler_number_cache.cc b/tests/dofs/dof_handler_number_cache.cc index 39c14497bf..da5f2f5a3b 100644 --- a/tests/dofs/dof_handler_number_cache.cc +++ b/tests/dofs/dof_handler_number_cache.cc @@ -96,10 +96,12 @@ test() AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError()); AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError()); - AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() == + AssertThrow(Utilities::MPI::all_gather( + MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) == std::vector(1, N), ExcInternalError()); - AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() == + AssertThrow(Utilities::MPI::all_gather( + MPI_COMM_SELF, dof_handler.locally_owned_dofs()) == std::vector(1, all), ExcInternalError()); } diff --git a/tests/dofs/dof_handler_number_cache_02.cc b/tests/dofs/dof_handler_number_cache_02.cc index a6c9124f87..180f651e85 100644 --- a/tests/dofs/dof_handler_number_cache_02.cc +++ b/tests/dofs/dof_handler_number_cache_02.cc @@ -95,17 +95,21 @@ test() Assert(dof_handler.n_locally_owned_dofs() == N, ExcInternalError()); Assert(dof_handler.locally_owned_dofs() == all, ExcInternalError()); - Assert(dof_handler.compute_n_locally_owned_dofs_per_processor() == + Assert(Utilities::MPI::all_gather(MPI_COMM_SELF, + dof_handler.n_locally_owned_dofs()) == std::vector(1, N), ExcInternalError()); - Assert(dof_handler.compute_locally_owned_dofs_per_processor() == + Assert(Utilities::MPI::all_gather(MPI_COMM_SELF, + dof_handler.locally_owned_dofs()) == std::vector(1, all), ExcInternalError()); dof_handler.clear(); deallog << "those should be zero: " << dof_handler.n_locally_owned_dofs() << " " - << dof_handler.compute_n_locally_owned_dofs_per_processor().size() + << Utilities::MPI::all_gather(MPI_COMM_SELF, + dof_handler.n_locally_owned_dofs()) + .size() << " " << dof_handler.n_dofs() << std::endl; } } diff --git a/tests/dofs/dof_handler_number_cache_02.output b/tests/dofs/dof_handler_number_cache_02.output index 5da9c71e96..1da0caa981 100644 --- a/tests/dofs/dof_handler_number_cache_02.output +++ b/tests/dofs/dof_handler_number_cache_02.output @@ -1,19 +1,19 @@ DEAL:1d::50 -DEAL:1d::those should be zero: 0 0 0 +DEAL:1d::those should be zero: 0 1 0 DEAL:1d::82 -DEAL:1d::those should be zero: 0 0 0 +DEAL:1d::those should be zero: 0 1 0 DEAL:1d::90 -DEAL:1d::those should be zero: 0 0 0 +DEAL:1d::those should be zero: 0 1 0 DEAL:1d::90 -DEAL:1d::those should be zero: 0 0 0 +DEAL:1d::those should be zero: 0 1 0 DEAL:2d::816 -DEAL:2d::those should be zero: 0 0 0 +DEAL:2d::those should be zero: 0 1 0 DEAL:2d::1264 -DEAL:2d::those should be zero: 0 0 0 +DEAL:2d::those should be zero: 0 1 0 DEAL:2d::2192 -DEAL:2d::those should be zero: 0 0 0 +DEAL:2d::those should be zero: 0 1 0 DEAL:3d::13524 -DEAL:3d::those should be zero: 0 0 0 +DEAL:3d::those should be zero: 0 1 0 DEAL:3d::42768 -DEAL:3d::those should be zero: 0 0 0 +DEAL:3d::those should be zero: 0 1 0 diff --git a/tests/gla/mat_04.cc b/tests/gla/mat_04.cc index f69091af48..3b7d3d9cdf 100644 --- a/tests/gla/mat_04.cc +++ b/tests/gla/mat_04.cc @@ -85,7 +85,8 @@ test() MPI_COMM_WORLD)); SparsityTools::distribute_sparsity_pattern( sp, - dof_handler.compute_n_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), MPI_COMM_WORLD, relevant); sp.compress(); diff --git a/tests/hp/dof_handler_number_cache.cc b/tests/hp/dof_handler_number_cache.cc index 07a5813c33..0e4d55235f 100644 --- a/tests/hp/dof_handler_number_cache.cc +++ b/tests/hp/dof_handler_number_cache.cc @@ -107,10 +107,12 @@ test() AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError()); AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError()); - AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() == + AssertThrow(Utilities::MPI::all_gather( + MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) == std::vector(1, N), ExcInternalError()); - AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() == + AssertThrow(Utilities::MPI::all_gather( + MPI_COMM_SELF, dof_handler.locally_owned_dofs()) == std::vector(1, all), ExcInternalError()); } diff --git a/tests/matrix_free/dg_pbc_01.cc b/tests/matrix_free/dg_pbc_01.cc index f6ea0ed100..d3842a17cb 100644 --- a/tests/matrix_free/dg_pbc_01.cc +++ b/tests/matrix_free/dg_pbc_01.cc @@ -96,7 +96,7 @@ test() solver.solve(mf, sol, rhs, PreconditionIdentity()); const std::vector locally_owned_dofs_per_processor = - dof.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dof.locally_owned_dofs()); // gather all data at root if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) { diff --git a/tests/mpi/cell_weights_01.cc b/tests/mpi/cell_weights_01.cc index 932e9a2858..e8b52acd39 100644 --- a/tests/mpi/cell_weights_01.cc +++ b/tests/mpi/cell_weights_01.cc @@ -54,7 +54,8 @@ test() tr.repartition(); const auto n_locally_owned_active_cells_per_processor = - tr.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather(tr.get_communicator(), + tr.n_locally_owned_active_cells()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) for (unsigned int p = 0; p < numproc; ++p) deallog << "processor " << p << ": " diff --git a/tests/mpi/cell_weights_01_back_and_forth_01.cc b/tests/mpi/cell_weights_01_back_and_forth_01.cc index 2686080140..305c0deddf 100644 --- a/tests/mpi/cell_weights_01_back_and_forth_01.cc +++ b/tests/mpi/cell_weights_01_back_and_forth_01.cc @@ -88,7 +88,8 @@ test() tr.repartition(); const auto n_locally_owned_active_cells_per_processor = - tr.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather(tr.get_communicator(), + tr.n_locally_owned_active_cells()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) for (unsigned int p = 0; p < numproc; ++p) deallog << "processor " << p << ": " diff --git a/tests/mpi/cell_weights_01_back_and_forth_02.cc b/tests/mpi/cell_weights_01_back_and_forth_02.cc index edbffd902f..e5ab0c5127 100644 --- a/tests/mpi/cell_weights_01_back_and_forth_02.cc +++ b/tests/mpi/cell_weights_01_back_and_forth_02.cc @@ -73,7 +73,8 @@ test() tr.repartition(); const auto n_locally_owned_active_cells_per_processor = - tr.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather(tr.get_communicator(), + tr.n_locally_owned_active_cells()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) for (unsigned int p = 0; p < numproc; ++p) deallog << "processor " << p << ": " diff --git a/tests/mpi/cell_weights_02.cc b/tests/mpi/cell_weights_02.cc index 862b0644fc..9cf48d36f5 100644 --- a/tests/mpi/cell_weights_02.cc +++ b/tests/mpi/cell_weights_02.cc @@ -60,7 +60,8 @@ test() tr.refine_global(1); const auto n_locally_owned_active_cells_per_processor = - tr.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather(tr.get_communicator(), + tr.n_locally_owned_active_cells()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) for (unsigned int p = 0; p < numproc; ++p) deallog << "processor " << p << ": " diff --git a/tests/mpi/cell_weights_03.cc b/tests/mpi/cell_weights_03.cc index 5a44ca85eb..c8cf1ae011 100644 --- a/tests/mpi/cell_weights_03.cc +++ b/tests/mpi/cell_weights_03.cc @@ -68,7 +68,8 @@ test() tr.repartition(); const auto n_locally_owned_active_cells_per_processor = - tr.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather(tr.get_communicator(), + tr.n_locally_owned_active_cells()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) for (unsigned int p = 0; p < numproc; ++p) deallog << "processor " << p << ": " diff --git a/tests/mpi/cell_weights_04.cc b/tests/mpi/cell_weights_04.cc index fab4b09040..ee8419f345 100644 --- a/tests/mpi/cell_weights_04.cc +++ b/tests/mpi/cell_weights_04.cc @@ -62,7 +62,8 @@ test() tr.refine_global(1); const auto n_locally_owned_active_cells_per_processor = - tr.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather(tr.get_communicator(), + tr.n_locally_owned_active_cells()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) for (unsigned int p = 0; p < numproc; ++p) deallog << "processor " << p << ": " diff --git a/tests/mpi/cell_weights_05.cc b/tests/mpi/cell_weights_05.cc index b0c2bee4d9..95fb7f724f 100644 --- a/tests/mpi/cell_weights_05.cc +++ b/tests/mpi/cell_weights_05.cc @@ -81,7 +81,8 @@ test() tr.repartition(); const auto n_locally_owned_active_cells_per_processor = - tr.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather(tr.get_communicator(), + tr.n_locally_owned_active_cells()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) for (unsigned int p = 0; p < numproc; ++p) deallog << "processor " << p << ": " diff --git a/tests/mpi/cell_weights_06.cc b/tests/mpi/cell_weights_06.cc index b57d4f3f10..6ecc9ee634 100644 --- a/tests/mpi/cell_weights_06.cc +++ b/tests/mpi/cell_weights_06.cc @@ -80,7 +80,8 @@ test() tr.repartition(); const auto n_locally_owned_active_cells_per_processor = - tr.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather(tr.get_communicator(), + tr.n_locally_owned_active_cells()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) for (unsigned int p = 0; p < numproc; ++p) deallog << "processor " << p << ": " diff --git a/tests/mpi/constraints_consistent_01.cc b/tests/mpi/constraints_consistent_01.cc index 2dae4d4008..668bb336c7 100644 --- a/tests/mpi/constraints_consistent_01.cc +++ b/tests/mpi/constraints_consistent_01.cc @@ -84,7 +84,8 @@ check(parallel::distributed::Triangulation &tria) constraints.print(deallog.get_file_stream()); deallog << "consistent? " << constraints.is_consistent_in_parallel( - dof_handler.compute_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()), locally_active_dofs, MPI_COMM_WORLD, true) diff --git a/tests/mpi/dof_handler_number_cache.cc b/tests/mpi/dof_handler_number_cache.cc index 507db4136b..df6989aca9 100644 --- a/tests/mpi/dof_handler_number_cache.cc +++ b/tests/mpi/dof_handler_number_cache.cc @@ -106,22 +106,22 @@ test() deallog << N << std::endl; Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError()); - for (unsigned int i = 0; - i < dof_handler.compute_n_locally_owned_dofs_per_processor().size(); - ++i) - AssertThrow( - dof_handler.compute_n_locally_owned_dofs_per_processor()[i] <= N, - ExcInternalError()); const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); + for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size(); + ++i) + AssertThrow(n_locally_owned_dofs_per_processor[i] <= N, + ExcInternalError()); AssertThrow(std::accumulate(n_locally_owned_dofs_per_processor.begin(), n_locally_owned_dofs_per_processor.end(), 0U) == N, ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N), really_all(N); // poor man's union operation for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size(); diff --git a/tests/mpi/hp_step-40.cc b/tests/mpi/hp_step-40.cc index 47df172800..0b4a386c40 100644 --- a/tests/mpi/hp_step-40.cc +++ b/tests/mpi/hp_step-40.cc @@ -165,8 +165,10 @@ namespace Step40 system_matrix.reinit( mpi_communicator, csp, - dof_handler.compute_n_locally_owned_dofs_per_processor(), - dof_handler.compute_n_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), Utilities::MPI::this_mpi_process(mpi_communicator)); } @@ -326,7 +328,9 @@ namespace Step40 << triangulation.n_global_active_cells() << std::endl << " "; const auto n_locally_owned_active_cells_per_processor = - triangulation.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather( + triangulation.get_communicator(), + triangulation.n_locally_owned_active_cells()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) @@ -339,7 +343,8 @@ namespace Step40 for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) - pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i] + pcout << Utilities::MPI::all_gather( + MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i] << '+'; pcout << std::endl; diff --git a/tests/mpi/hp_step-40_variable_01.cc b/tests/mpi/hp_step-40_variable_01.cc index ccdef1e3ee..fe592fbf3c 100644 --- a/tests/mpi/hp_step-40_variable_01.cc +++ b/tests/mpi/hp_step-40_variable_01.cc @@ -171,8 +171,10 @@ namespace Step40 system_matrix.reinit( mpi_communicator, csp, - dof_handler.compute_n_locally_owned_dofs_per_processor(), - dof_handler.compute_n_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), Utilities::MPI::this_mpi_process(mpi_communicator)); } @@ -329,7 +331,9 @@ namespace Step40 << triangulation.n_global_active_cells() << std::endl << " "; const auto n_locally_owned_active_cells_per_processor = - triangulation.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather( + triangulation.get_communicator(), + triangulation.n_locally_owned_active_cells()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) @@ -342,7 +346,8 @@ namespace Step40 for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) - pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i] + pcout << Utilities::MPI::all_gather( + MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i] << '+'; pcout << std::endl; diff --git a/tests/mpi/mg_02.cc b/tests/mpi/mg_02.cc index 5b11bb691e..89a4f744c6 100644 --- a/tests/mpi/mg_02.cc +++ b/tests/mpi/mg_02.cc @@ -96,7 +96,7 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dofh.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs()); deallog << "n_locally_owned_dofs_per_processor:" << std::endl; for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size(); ++i) deallog << n_locally_owned_dofs_per_processor[i] << std::endl; @@ -107,7 +107,8 @@ test() deallog << "level " << lvl << ":" << std::endl; const std::vector vec = - dofh.compute_locally_owned_mg_dofs_per_processor(lvl); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dofh.locally_owned_mg_dofs(lvl)); for (unsigned int i = 0; i < vec.size(); ++i) deallog << vec[i].n_elements() << std::endl; diff --git a/tests/mpi/p4est_2d_dofhandler_01.cc b/tests/mpi/p4est_2d_dofhandler_01.cc index 28ebb0fcb5..f5cd4dec1a 100644 --- a/tests/mpi/p4est_2d_dofhandler_01.cc +++ b/tests/mpi/p4est_2d_dofhandler_01.cc @@ -57,7 +57,7 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dofh.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs()); if (myid == 0) { deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor diff --git a/tests/mpi/p4est_2d_dofhandler_02.cc b/tests/mpi/p4est_2d_dofhandler_02.cc index 71df8c5d5b..489b8f3542 100644 --- a/tests/mpi/p4est_2d_dofhandler_02.cc +++ b/tests/mpi/p4est_2d_dofhandler_02.cc @@ -90,7 +90,7 @@ test() dofh.distribute_dofs(fe); std::vector n_locally_owned_dofs_per_processor = - dofh.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs()); if (myid == 0) { deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor diff --git a/tests/mpi/p4est_2d_dofhandler_03.cc b/tests/mpi/p4est_2d_dofhandler_03.cc index a0f9e53417..9b2d7c5e39 100644 --- a/tests/mpi/p4est_2d_dofhandler_03.cc +++ b/tests/mpi/p4est_2d_dofhandler_03.cc @@ -91,7 +91,7 @@ test() dofh.distribute_dofs(fe); std::vector n_locally_owned_dofs_per_processor = - dofh.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs()); if (myid == 0) { deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor diff --git a/tests/mpi/p4est_2d_dofhandler_04.cc b/tests/mpi/p4est_2d_dofhandler_04.cc index 6d202c2c02..63b7a64e50 100644 --- a/tests/mpi/p4est_2d_dofhandler_04.cc +++ b/tests/mpi/p4est_2d_dofhandler_04.cc @@ -91,7 +91,7 @@ test() dofh.distribute_dofs(fe); std::vector n_locally_owned_dofs_per_processor = - dofh.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs()); if (myid == 0) { deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor diff --git a/tests/mpi/p4est_2d_renumber_02.cc b/tests/mpi/p4est_2d_renumber_02.cc index 87dacefe41..2af6c4506d 100644 --- a/tests/mpi/p4est_2d_renumber_02.cc +++ b/tests/mpi/p4est_2d_renumber_02.cc @@ -81,7 +81,7 @@ test() DoFTools::extract_locally_active_dofs(dofh, dof_set); const std::vector owned_dofs = - dofh.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs()); if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) { dof_set.print(deallog); diff --git a/tests/mpi/p4est_data_out_01.cc b/tests/mpi/p4est_data_out_01.cc index aaa5aa3a02..62e95eafd6 100644 --- a/tests/mpi/p4est_data_out_01.cc +++ b/tests/mpi/p4est_data_out_01.cc @@ -75,7 +75,7 @@ test() data_out.build_patches(); std::vector n_locally_owned_dofs_per_processor = - dofh.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs()); if (myid == 0) { for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size(); diff --git a/tests/mpi/p4est_get_subdomain_association.cc b/tests/mpi/p4est_get_subdomain_association.cc index 73d4485a44..9e3aa7f5c2 100644 --- a/tests/mpi/p4est_get_subdomain_association.cc +++ b/tests/mpi/p4est_get_subdomain_association.cc @@ -61,7 +61,7 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dofh.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs()); if (myid == 1) { deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor diff --git a/tests/mpi/periodicity_01.cc b/tests/mpi/periodicity_01.cc index be0454f167..6fab1533c3 100644 --- a/tests/mpi/periodicity_01.cc +++ b/tests/mpi/periodicity_01.cc @@ -166,7 +166,8 @@ namespace Step40 constraints.close(); const std::vector &locally_owned_dofs = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet locally_active_dofs; DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs, @@ -186,8 +187,10 @@ namespace Step40 system_matrix.reinit( mpi_communicator, csp, - dof_handler.compute_n_locally_owned_dofs_per_processor(), - dof_handler.compute_n_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), Utilities::MPI::this_mpi_process(mpi_communicator)); } diff --git a/tests/mpi/periodicity_02.cc b/tests/mpi/periodicity_02.cc index ef7198dfa9..e98eee8f7a 100644 --- a/tests/mpi/periodicity_02.cc +++ b/tests/mpi/periodicity_02.cc @@ -394,7 +394,8 @@ namespace Step22 constraints.close(); const std::vector &locally_owned_dofs = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet locally_active_dofs; DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs, diff --git a/tests/mpi/periodicity_03.cc b/tests/mpi/periodicity_03.cc index 3898e35009..a7c0cb4240 100644 --- a/tests/mpi/periodicity_03.cc +++ b/tests/mpi/periodicity_03.cc @@ -324,7 +324,8 @@ namespace Step22 constraints.close(); const std::vector &locally_owned_dofs = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet locally_active_dofs; DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs, diff --git a/tests/mpi/periodicity_04.cc b/tests/mpi/periodicity_04.cc index c5b32fff20..56cd78cfba 100644 --- a/tests/mpi/periodicity_04.cc +++ b/tests/mpi/periodicity_04.cc @@ -205,7 +205,8 @@ check(const unsigned int orientation, bool reverse) constraints.print(deallog.get_file_stream()); const std::vector locally_owned_dofs_vector = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet locally_active_dofs; DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs_vector, diff --git a/tests/mpi/periodicity_06.cc b/tests/mpi/periodicity_06.cc index 5782c1c907..5698c51721 100644 --- a/tests/mpi/periodicity_06.cc +++ b/tests/mpi/periodicity_06.cc @@ -188,7 +188,8 @@ test(const unsigned numRefinementLevels = 2) constraints.close(); const std::vector &locally_owned_dofs = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet locally_active_dofs; DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs, diff --git a/tests/mpi/periodicity_07.cc b/tests/mpi/periodicity_07.cc index 2a1bf187ea..638526a31e 100644 --- a/tests/mpi/periodicity_07.cc +++ b/tests/mpi/periodicity_07.cc @@ -139,7 +139,8 @@ test(const unsigned numRefinementLevels = 2) DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs); const std::vector locally_owned_dofs = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); std::map> supportPoints; DoFTools::map_dofs_to_support_points(MappingQ1(), diff --git a/tests/mpi/renumber_cuthill_mckee.cc b/tests/mpi/renumber_cuthill_mckee.cc index b4ef68f8fc..aae1916677 100644 --- a/tests/mpi/renumber_cuthill_mckee.cc +++ b/tests/mpi/renumber_cuthill_mckee.cc @@ -76,7 +76,7 @@ test() complete_renumbering.begin()); unsigned int offset = renumbering.size(); const std::vector dofs_per_proc = - dofh.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs()); for (unsigned int i = 1; i < nprocs; ++i) { if (myid == i) diff --git a/tests/mpi/renumber_cuthill_mckee_02.cc b/tests/mpi/renumber_cuthill_mckee_02.cc index a8c4c356f7..ca3068cae1 100644 --- a/tests/mpi/renumber_cuthill_mckee_02.cc +++ b/tests/mpi/renumber_cuthill_mckee_02.cc @@ -79,7 +79,7 @@ test() complete_renumbering.begin()); unsigned int offset = renumbering.size(); const std::vector locally_owned_dofs_per_processor = - dofh.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs()); for (unsigned int i = 1; i < nprocs; ++i) { if (myid == i) diff --git a/tests/mpi/step-40.cc b/tests/mpi/step-40.cc index 200f282aba..85e669cf45 100644 --- a/tests/mpi/step-40.cc +++ b/tests/mpi/step-40.cc @@ -159,8 +159,10 @@ namespace Step40 system_matrix.reinit( mpi_communicator, csp, - dof_handler.compute_n_locally_owned_dofs_per_processor(), - dof_handler.compute_n_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()), Utilities::MPI::this_mpi_process(mpi_communicator)); } @@ -311,7 +313,9 @@ namespace Step40 << triangulation.n_global_active_cells() << std::endl << " "; const auto n_locally_owned_active_cells_per_processor = - triangulation.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather( + triangulation.get_communicator(), + triangulation.n_locally_owned_active_cells()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) @@ -324,7 +328,8 @@ namespace Step40 for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) - pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i] + pcout << Utilities::MPI::all_gather( + MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i] << '+'; pcout << std::endl; diff --git a/tests/mpi/step-40_cuthill_mckee.cc b/tests/mpi/step-40_cuthill_mckee.cc index c3cb0e2812..245b86e496 100644 --- a/tests/mpi/step-40_cuthill_mckee.cc +++ b/tests/mpi/step-40_cuthill_mckee.cc @@ -221,8 +221,10 @@ namespace Step40 system_matrix.reinit( mpi_communicator, csp, - dof_handler.compute_n_locally_owned_dofs_per_processor(), - dof_handler.compute_n_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(mpi_communicator, + dof_handler.n_locally_owned_dofs()), + Utilities::MPI::all_gather(mpi_communicator, + dof_handler.n_locally_owned_dofs()), Utilities::MPI::this_mpi_process(mpi_communicator)); } @@ -373,7 +375,9 @@ namespace Step40 << triangulation.n_global_active_cells() << std::endl << " "; const auto n_locally_owned_active_cells_per_processor = - triangulation.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather( + triangulation.get_communicator(), + triangulation.n_locally_owned_active_cells()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) @@ -386,7 +390,8 @@ namespace Step40 for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) - pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i] + pcout << Utilities::MPI::all_gather( + mpi_communicator, dof_handler.n_locally_owned_dofs())[i] << '+'; pcout << std::endl; diff --git a/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc b/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc index f4bb8844e5..8ee0de28f3 100644 --- a/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc +++ b/tests/mpi/step-40_cuthill_mckee_MPI-subset.cc @@ -222,8 +222,10 @@ namespace Step40 system_matrix.reinit( mpi_communicator, csp, - dof_handler.compute_n_locally_owned_dofs_per_processor(), - dof_handler.compute_n_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(mpi_communicator, + dof_handler.n_locally_owned_dofs()), + Utilities::MPI::all_gather(mpi_communicator, + dof_handler.n_locally_owned_dofs()), Utilities::MPI::this_mpi_process(mpi_communicator)); } @@ -374,7 +376,9 @@ namespace Step40 << triangulation.n_global_active_cells() << std::endl << " "; const auto n_locally_owned_active_cells_per_processor = - triangulation.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather( + triangulation.get_communicator(), + triangulation.n_locally_owned_active_cells()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) @@ -386,7 +390,8 @@ namespace Step40 << " "; const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(mpi_communicator, + dof_handler.n_locally_owned_dofs()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) diff --git a/tests/mpi/step-40_direct_solver.cc b/tests/mpi/step-40_direct_solver.cc index d53f1b62d9..428ff3be18 100644 --- a/tests/mpi/step-40_direct_solver.cc +++ b/tests/mpi/step-40_direct_solver.cc @@ -159,8 +159,10 @@ namespace Step40 system_matrix.reinit( mpi_communicator, csp, - dof_handler.compute_n_locally_owned_dofs_per_processor(), - dof_handler.compute_n_locally_owned_dofs_per_processor(), + Utilities::MPI::all_gather(mpi_communicator, + dof_handler.n_locally_owned_dofs()), + Utilities::MPI::all_gather(mpi_communicator, + dof_handler.n_locally_owned_dofs()), Utilities::MPI::this_mpi_process(mpi_communicator)); } @@ -289,7 +291,9 @@ namespace Step40 << triangulation.n_global_active_cells() << std::endl << " "; const auto n_locally_owned_active_cells_per_processor = - triangulation.compute_n_locally_owned_active_cells_per_processor(); + Utilities::MPI::all_gather( + triangulation.get_communicator(), + triangulation.n_locally_owned_active_cells()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) @@ -302,7 +306,8 @@ namespace Step40 for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(mpi_communicator); ++i) - pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i] + pcout << Utilities::MPI::all_gather( + mpi_communicator, dof_handler.locally_owned_dofs())[i] << '+'; pcout << std::endl; diff --git a/tests/sharedtria/dof_01.cc b/tests/sharedtria/dof_01.cc index a1a7fd2d37..4896d3838c 100644 --- a/tests/sharedtria/dof_01.cc +++ b/tests/sharedtria/dof_01.cc @@ -117,7 +117,8 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(dof_handler.n_locally_owned_dofs() == n_locally_owned_dofs_per_processor[triangulation .locally_owned_subdomain()], @@ -135,7 +136,8 @@ test() ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/dof_02.cc b/tests/sharedtria/dof_02.cc index de3b9d271a..857b35be36 100644 --- a/tests/sharedtria/dof_02.cc +++ b/tests/sharedtria/dof_02.cc @@ -117,7 +117,8 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(dof_handler.n_locally_owned_dofs() == n_locally_owned_dofs_per_processor[triangulation .locally_owned_subdomain()], @@ -135,7 +136,8 @@ test() ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/dof_03.cc b/tests/sharedtria/dof_03.cc index 58618ed41d..6a6d60193d 100644 --- a/tests/sharedtria/dof_03.cc +++ b/tests/sharedtria/dof_03.cc @@ -115,7 +115,8 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(dof_handler.n_locally_owned_dofs() == n_locally_owned_dofs_per_processor[triangulation .locally_owned_subdomain()], @@ -133,7 +134,8 @@ test() ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/dof_04.cc b/tests/sharedtria/dof_04.cc index d74b1c795b..5600d047bf 100644 --- a/tests/sharedtria/dof_04.cc +++ b/tests/sharedtria/dof_04.cc @@ -117,7 +117,8 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(dof_handler.n_locally_owned_dofs() == n_locally_owned_dofs_per_processor[triangulation .locally_owned_subdomain()], @@ -135,7 +136,8 @@ test() ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/dof_05.cc b/tests/sharedtria/dof_05.cc index 8c92c472ec..157ef34bdf 100644 --- a/tests/sharedtria/dof_05.cc +++ b/tests/sharedtria/dof_05.cc @@ -57,9 +57,11 @@ compare_meshes(DoFHandler &shared_dof_handler, shared_dofs.print(deallog.get_file_stream()); std::vector shared_dofs_per_proc = - shared_dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + shared_dof_handler.locally_owned_dofs()); std::vector distributed_dofs_per_proc = - distributed_dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + distributed_dof_handler.locally_owned_dofs()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); ++i) Assert(shared_dofs_per_proc[i] == distributed_dofs_per_proc[i], diff --git a/tests/sharedtria/dof_06.cc b/tests/sharedtria/dof_06.cc index 715b4b6d07..d2d83ed723 100644 --- a/tests/sharedtria/dof_06.cc +++ b/tests/sharedtria/dof_06.cc @@ -73,7 +73,8 @@ test() << dof_handler.n_locally_owned_dofs() << std::endl; std::vector shared_dofs_per_proc = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); ++i) shared_dofs_per_proc[i].print(deallog.get_file_stream()); diff --git a/tests/sharedtria/hp_dof_01.cc b/tests/sharedtria/hp_dof_01.cc index 45539023d1..805556f016 100644 --- a/tests/sharedtria/hp_dof_01.cc +++ b/tests/sharedtria/hp_dof_01.cc @@ -122,7 +122,8 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(dof_handler.n_locally_owned_dofs() == n_locally_owned_dofs_per_processor[triangulation .locally_owned_subdomain()], @@ -140,7 +141,8 @@ test() ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/hp_dof_02.cc b/tests/sharedtria/hp_dof_02.cc index 6aaee1de86..23a52ab560 100644 --- a/tests/sharedtria/hp_dof_02.cc +++ b/tests/sharedtria/hp_dof_02.cc @@ -123,7 +123,8 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(dof_handler.n_locally_owned_dofs() == n_locally_owned_dofs_per_processor[triangulation .locally_owned_subdomain()], @@ -141,7 +142,8 @@ test() ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/hp_dof_03.cc b/tests/sharedtria/hp_dof_03.cc index a410b009e5..cf23a218b8 100644 --- a/tests/sharedtria/hp_dof_03.cc +++ b/tests/sharedtria/hp_dof_03.cc @@ -120,7 +120,8 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(dof_handler.n_locally_owned_dofs() == n_locally_owned_dofs_per_processor[triangulation .locally_owned_subdomain()], @@ -138,7 +139,8 @@ test() ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/hp_dof_04.cc b/tests/sharedtria/hp_dof_04.cc index 7c56973b12..9f5cdd2192 100644 --- a/tests/sharedtria/hp_dof_04.cc +++ b/tests/sharedtria/hp_dof_04.cc @@ -123,7 +123,8 @@ test() const std::vector n_locally_owned_dofs_per_processor = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(dof_handler.n_locally_owned_dofs() == n_locally_owned_dofs_per_processor[triangulation .locally_owned_subdomain()], @@ -141,7 +142,8 @@ test() ExcInternalError()); const std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/hp_no_cells_01.cc b/tests/sharedtria/hp_no_cells_01.cc index a2ec552783..02b196308c 100644 --- a/tests/sharedtria/hp_no_cells_01.cc +++ b/tests/sharedtria/hp_no_cells_01.cc @@ -80,7 +80,8 @@ test() deallog << "n_locally_owned_dofs_per_processor: "; std::vector v = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); unsigned int sum = 0; for (unsigned int i = 0; i < v.size(); ++i) { @@ -105,7 +106,8 @@ test() Assert(std::accumulate(v.begin(), v.end(), 0U) == N, ExcInternalError()); std::vector locally_owned_dofs_per_processor = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i) { diff --git a/tests/sharedtria/mg_dof_02.cc b/tests/sharedtria/mg_dof_02.cc index 83b6712eef..c00041c11e 100644 --- a/tests/sharedtria/mg_dof_02.cc +++ b/tests/sharedtria/mg_dof_02.cc @@ -52,7 +52,8 @@ write_dof_data(DoFHandler &dof_handler) for (unsigned int lvl = 0; lvl < n_levels; ++lvl) { std::vector dof_index_per_proc = - dof_handler.compute_locally_owned_mg_dofs_per_processor(lvl); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_mg_dofs(lvl)); for (unsigned int i = 0; i < dof_index_per_proc.size(); ++i) dof_index_per_proc[i].print(deallog); diff --git a/tests/sharedtria/no_cells_01.cc b/tests/sharedtria/no_cells_01.cc index 661c37e96e..6c950547dc 100644 --- a/tests/sharedtria/no_cells_01.cc +++ b/tests/sharedtria/no_cells_01.cc @@ -74,7 +74,8 @@ test() deallog << "n_locally_owned_dofs_per_processor: "; const std::vector v = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); unsigned int sum = 0; for (unsigned int i = 0; i < v.size(); ++i) { @@ -86,10 +87,13 @@ test() dof_handler.locally_owned_dofs().write(deallog.get_file_stream()); deallog << std::endl; - Assert(dof_handler.n_locally_owned_dofs() == - dof_handler.compute_n_locally_owned_dofs_per_processor() - [triangulation.locally_owned_subdomain()], - ExcInternalError()); + Assert( + dof_handler.n_locally_owned_dofs() == + Utilities::MPI::all_gather( + MPI_COMM_WORLD, + dof_handler + .n_locally_owned_dofs())[triangulation.locally_owned_subdomain()], + ExcInternalError()); Assert(dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError()); @@ -98,12 +102,14 @@ test() Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError()); const std::vector n_owned_dofs = - dof_handler.compute_n_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.n_locally_owned_dofs()); Assert(std::accumulate(n_owned_dofs.begin(), n_owned_dofs.end(), 0U) == N, ExcInternalError()); const std::vector owned_dofs = - dof_handler.compute_locally_owned_dofs_per_processor(); + Utilities::MPI::all_gather(MPI_COMM_WORLD, + dof_handler.locally_owned_dofs()); IndexSet all(N); for (unsigned int i = 0; i < owned_dofs.size(); ++i) {