From ee678b7bc6128d536da1fb2cd4b16bc748bc0dc3 Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Sun, 28 Feb 2021 10:39:45 +0100 Subject: [PATCH] Add Triangulation/DoFHandler::get_communicator() --- doc/news/changes/major/20210228Munch | 6 +++ include/deal.II/distributed/tria_base.h | 6 +-- include/deal.II/dofs/dof_handler.h | 18 +++++++++ include/deal.II/grid/tria.h | 7 ++++ .../mg_transfer_global_coarsening.templates.h | 37 +++++++------------ source/distributed/tria_base.cc | 6 +-- source/grid/tria.cc | 8 ++++ 7 files changed, 58 insertions(+), 30 deletions(-) create mode 100644 doc/news/changes/major/20210228Munch diff --git a/doc/news/changes/major/20210228Munch b/doc/news/changes/major/20210228Munch new file mode 100644 index 0000000000..dc73da8ca1 --- /dev/null +++ b/doc/news/changes/major/20210228Munch @@ -0,0 +1,6 @@ +New: The communicator of an arbitrary (not just parallel) Triangulation class can now be +queried via Triangulation::get_communicator() or DoFHandler::get_communicator(). In +the case of serial Triangulations and DoFHandler set up with serial Triangulations, +MPI_COMM_SELF is returned. +
+(Peter Munch, 2021/02/28) diff --git a/include/deal.II/distributed/tria_base.h b/include/deal.II/distributed/tria_base.h index 19bcf9f8dc..e11f58764f 100644 --- a/include/deal.II/distributed/tria_base.h +++ b/include/deal.II/distributed/tria_base.h @@ -96,8 +96,8 @@ namespace parallel /** * Return MPI communicator used by this triangulation. */ - virtual const MPI_Comm & - get_communicator() const; + virtual MPI_Comm + get_communicator() const override; /** * Return if multilevel hierarchy is supported and has been constructed. @@ -308,7 +308,7 @@ namespace parallel * communicator for this class, which is a duplicate of the one passed to * the constructor. */ - MPI_Comm mpi_communicator; + const MPI_Comm mpi_communicator; /** * The subdomain id to be used for the current processor. This is the MPI diff --git a/include/deal.II/dofs/dof_handler.h b/include/deal.II/dofs/dof_handler.h index c74286fd7f..99601c45c7 100644 --- a/include/deal.II/dofs/dof_handler.h +++ b/include/deal.II/dofs/dof_handler.h @@ -1267,6 +1267,12 @@ public: const Triangulation & get_triangulation() const; + /** + * Return MPI communicator used by the underlying triangulation. + */ + MPI_Comm + get_communicator() const; + /** * Whenever serialization with a parallel::distributed::Triangulation as the * underlying triangulation is considered, we also need to consider storing @@ -2019,6 +2025,18 @@ DoFHandler::get_triangulation() const +template +inline MPI_Comm +DoFHandler::get_communicator() const +{ + Assert(tria != nullptr, + ExcMessage("This DoFHandler object has not been associated " + "with a triangulation.")); + return tria->get_communicator(); +} + + + template inline const BlockInfo & DoFHandler::block_info() const diff --git a/include/deal.II/grid/tria.h b/include/deal.II/grid/tria.h index c8e4eb1166..a840993ab3 100644 --- a/include/deal.II/grid/tria.h +++ b/include/deal.II/grid/tria.h @@ -1614,6 +1614,13 @@ public: virtual void clear(); + /** + * Return MPI communicator used by this triangulation. In the case of + * a serial Triangulation object, MPI_COMM_SELF is returned. + */ + virtual MPI_Comm + get_communicator() const; + /** * Set the mesh smoothing to @p mesh_smoothing. This overrides the * MeshSmoothing given to the constructor. It is allowed to call this diff --git a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h index 933f550cbc..43e9856705 100644 --- a/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h +++ b/include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h @@ -238,19 +238,6 @@ namespace internal { namespace { - template - MPI_Comm - get_mpi_comm(const MeshType &mesh) - { - const auto *tria_parallel = dynamic_cast< - const dealii::parallel::TriangulationBase *>( - &(mesh.get_triangulation())); - - return tria_parallel != nullptr ? tria_parallel->get_communicator() : - MPI_COMM_SELF; - } - template unsigned int compute_shift_within_children(const unsigned int child, @@ -516,7 +503,8 @@ namespace internal FineDoFHandlerView(const MeshType &mesh_fine, const MeshType &mesh_coarse) : mesh_fine(mesh_fine) , mesh_coarse(mesh_coarse) - , communicator(get_mpi_comm(mesh_fine) /*TODO: fix for different comms*/) + , communicator( + mesh_fine.get_communicator() /*TODO: fix for different comms*/) , cell_id_translator(n_coarse_cells(mesh_fine), n_global_levels(mesh_fine)) { @@ -873,7 +861,7 @@ namespace internal n_coarse_cells = std::max(n_coarse_cells, cell->id().get_coarse_cell_id()); - return Utilities::MPI::max(n_coarse_cells, get_mpi_comm(mesh)) + 1; + return Utilities::MPI::max(n_coarse_cells, mesh.get_communicator()) + 1; } static unsigned int @@ -973,9 +961,10 @@ namespace internal std::max(max_active_fe_indices[1], cell->active_fe_index()); } - const auto comm = get_mpi_comm(dof_handler_fine); + const auto comm = dof_handler_fine.get_communicator(); - Assert(comm == get_mpi_comm(dof_handler_coarse), ExcNotImplemented()); + Assert(comm == dof_handler_coarse.get_communicator(), + ExcNotImplemented()); ArrayView temp_min(min_active_fe_indices); ArrayView temp_max(max_active_fe_indices); @@ -1002,10 +991,10 @@ namespace internal { // ... for fine mesh { - transfer.partitioner_fine.reset( - new Utilities::MPI::Partitioner(view.locally_owned_dofs(), - view.locally_relevant_dofs(), - get_mpi_comm(dof_handler_fine))); + transfer.partitioner_fine.reset(new Utilities::MPI::Partitioner( + view.locally_owned_dofs(), + view.locally_relevant_dofs(), + dof_handler_fine.get_communicator())); transfer.vec_fine.reinit(transfer.partitioner_fine); } @@ -1018,7 +1007,7 @@ namespace internal transfer.partitioner_coarse.reset(new Utilities::MPI::Partitioner( dof_handler_coarse.locally_owned_dofs(), locally_relevant_dofs, - get_mpi_comm(dof_handler_coarse))); + dof_handler_coarse.get_communicator())); transfer.vec_coarse.reinit(transfer.partitioner_coarse); } } @@ -1324,7 +1313,7 @@ namespace internal std::make_shared( dof_handler_fine.locally_owned_dofs(), locally_relevant_dofs, - get_mpi_comm(dof_handler_fine)); + dof_handler_fine.get_communicator()); transfer.vec_fine.reinit(transfer.partitioner_fine); touch_count_.reinit(partitioner_fine_); @@ -1487,7 +1476,7 @@ namespace internal transfer.constraint_coarse.copy_from(constraint_coarse); - const auto comm = get_mpi_comm(dof_handler_coarse); + const auto comm = dof_handler_coarse.get_communicator(); { IndexSet locally_relevant_dofs; DoFTools::extract_locally_relevant_dofs(dof_handler_fine, diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index a27a626cee..b379bb8ffa 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -90,7 +90,7 @@ namespace parallel { std::size_t mem = this->dealii::Triangulation::memory_consumption() + - MemoryConsumption::memory_consumption(mpi_communicator) + + MemoryConsumption::memory_consumption(this->mpi_communicator) + MemoryConsumption::memory_consumption(my_subdomain) + MemoryConsumption::memory_consumption( number_cache.n_global_active_cells) + @@ -135,7 +135,7 @@ namespace parallel } template - const MPI_Comm & + MPI_Comm TriangulationBase::get_communicator() const { return mpi_communicator; @@ -170,7 +170,7 @@ namespace parallel number_cache.ghost_owners.insert(cell->subdomain_id()); Assert(number_cache.ghost_owners.size() < - Utilities::MPI::n_mpi_processes(mpi_communicator), + Utilities::MPI::n_mpi_processes(this->mpi_communicator), ExcInternalError()); } diff --git a/source/grid/tria.cc b/source/grid/tria.cc index 1fd8e850c1..7884f80b19 100644 --- a/source/grid/tria.cc +++ b/source/grid/tria.cc @@ -10190,6 +10190,14 @@ Triangulation::clear() } +template +MPI_Comm +Triangulation::get_communicator() const +{ + return MPI_COMM_SELF; +} + + template void -- 2.39.5