From: Wolfgang Bangerth Date: Thu, 27 Oct 2016 17:37:29 +0000 (-0500) Subject: Use subdomain_id consistently for ghost owners. X-Git-Tag: v8.5.0-rc1~533^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3ec1cce185cc51468b0090e6ca3fce1d2744abeb;p=dealii.git Use subdomain_id consistently for ghost owners. --- diff --git a/include/deal.II/distributed/tria_base.h b/include/deal.II/distributed/tria_base.h index f395fc8840..047cce0a98 100644 --- a/include/deal.II/distributed/tria_base.h +++ b/include/deal.II/distributed/tria_base.h @@ -137,7 +137,8 @@ namespace parallel * @note: If @p i is contained in the list of processor @p j, then @p j * will also be contained in the list of processor @p i. */ - const std::set &ghost_owners () const; + const std::set & + ghost_owners () const; /** * Return a set of MPI ranks of the processors that have at least one @@ -148,7 +149,8 @@ namespace parallel * @note: If @p i is contained in the list of processor @p j, then @p j * will also be contained in the list of processor @p i. */ - const std::set &level_ghost_owners () const; + const std::set & + level_ghost_owners () const; protected: /** @@ -179,28 +181,28 @@ namespace parallel * This vector stores the number of locally owned active cells per MPI * rank. */ - std::vector n_locally_owned_active_cells; + std::vector n_locally_owned_active_cells; /** * The total number of active cells (sum of @p * n_locally_owned_active_cells). */ - types::global_dof_index n_global_active_cells; + types::global_dof_index n_global_active_cells; /** * The global number of levels computed as the maximum number of levels * taken over all MPI ranks, so n_levels()<=n_global_levels = * max(n_levels() on proc i). */ - unsigned int n_global_levels; + unsigned int n_global_levels; /** * A set containing the subdomain_id (MPI rank) of the owners of the * ghost cells on this processor. */ - std::set ghost_owners; + std::set ghost_owners; /** * A set containing the MPI ranks of the owners of the level ghost cells * on this processor (for all levels). */ - std::set level_ghost_owners; + std::set level_ghost_owners; NumberCache(); }; @@ -211,8 +213,6 @@ namespace parallel * Update the number_cache variable after mesh creation or refinement. */ virtual void update_number_cache (); - - }; } // namespace parallel diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc index fc064e922b..ea0b8025cc 100644 --- a/source/distributed/shared_tria.cc +++ b/source/distributed/shared_tria.cc @@ -93,7 +93,7 @@ namespace parallel } template - const std::vector & + const std::vector & Triangulation::get_true_subdomain_ids_of_cells() const { return true_subdomain_ids_of_cells; diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 2712756b45..f277ad2cce 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -516,10 +516,10 @@ namespace template void - match_quadrant (const dealii::Triangulation *tria, - unsigned int dealii_index, + match_quadrant (const dealii::Triangulation *tria, + unsigned int dealii_index, typename internal::p4est::types::quadrant &ghost_quadrant, - unsigned int ghost_owner) + types::subdomain_id ghost_owner) { int i, child_id; int l = ghost_quadrant.level; @@ -937,7 +937,7 @@ namespace void build_lists (const typename Triangulation::cell_iterator &cell, const typename internal::p4est::types::quadrant &p4est_cell, - const unsigned int myid); + const types::subdomain_id myid); }; @@ -1935,21 +1935,27 @@ namespace parallel int dummy = 0; unsigned int req_counter = 0; - for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); + for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); it != this->number_cache.level_ghost_owners.end(); ++it, ++req_counter) { - MPI_Isend(&dummy, 1, MPI_INT, + Assert (typeid(types::subdomain_id) + == typeid(unsigned int), + ExcNotImplemented()); + MPI_Isend(&dummy, 1, MPI_UNSIGNED, *it, 9001, this->mpi_communicator, &requests[req_counter]); } - for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); + for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); it != this->number_cache.level_ghost_owners.end(); ++it) { - int dummy; - MPI_Recv(&dummy, 1, MPI_INT, + Assert (typeid(types::subdomain_id) + == typeid(unsigned int), + ExcNotImplemented()); + unsigned int dummy; + MPI_Recv(&dummy, 1, MPI_UNSIGNED, *it, 9001, this->mpi_communicator, MPI_STATUS_IGNORE); } @@ -1961,7 +1967,8 @@ namespace parallel } #endif - Assert(this->number_cache.level_ghost_owners.size() < Utilities::MPI::n_mpi_processes(this->mpi_communicator), ExcInternalError()); + Assert(this->number_cache.level_ghost_owners.size() < Utilities::MPI::n_mpi_processes(this->mpi_communicator), + ExcInternalError()); } } @@ -2629,7 +2636,7 @@ namespace parallel // every ghostquadrant, find corresponding deal coarsecell and // recurse. typename dealii::internal::p4est::types::quadrant *quadr; - unsigned int ghost_owner=0; + types::subdomain_id ghost_owner=0; typename dealii::internal::p4est::types::topidx ghost_tree=0; for (unsigned int g_idx=0; g_idxghosts.elem_count; ++g_idx) diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index 15020f7b11..af05030c33 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -230,7 +230,7 @@ namespace parallel } template - const std::set & + const std::set & Triangulation:: ghost_owners () const { @@ -238,7 +238,7 @@ namespace parallel } template - const std::set & + const std::set & Triangulation:: level_ghost_owners () const {