From: David Wells Date: Sat, 7 Nov 2020 17:42:48 +0000 (-0500) Subject: Merge pull request #11109 from tjhei/mpi_comm_ref X-Git-Tag: v9.3.0-rc1~925 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=899b683bb8aaae55e025a55b35a17b24afd035db;p=dealii.git Merge pull request #11109 from tjhei/mpi_comm_ref pass MPI_Comm by const ref --- 899b683bb8aaae55e025a55b35a17b24afd035db diff --cc source/distributed/tria_base.cc index 97d475ddaf,e22a435e63..5306b37e9e --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@@ -372,182 -342,9 +372,182 @@@ namespace paralle + template + void + TriangulationBase::reset_global_cell_indices() + { +#ifndef DEAL_II_WITH_MPI + Assert(false, ExcNeedsMPI()); +#else + + // currently only implemented for distributed triangulations + if (dynamic_cast + *>(this) == nullptr) + return; + + // 1) determine number of active locally-owned cells + const types::global_cell_index n_locally_owned_cells = + this->n_locally_owned_active_cells(); + + // 2) determine the offset of each process + types::global_cell_index cell_index = 0; + + MPI_Exscan(&n_locally_owned_cells, + &cell_index, + 1, + Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells), + MPI_SUM, + this->mpi_communicator); + + // 3) give global indices to locally-owned cells and mark all other cells as + // invalid + for (const auto &cell : this->active_cell_iterators()) + if (cell->is_locally_owned()) + cell->set_global_active_cell_index(cell_index++); + else + cell->set_global_active_cell_index(numbers::invalid_dof_index); + + // 4) determine the global indices of ghost cells + GridTools::exchange_cell_data_to_ghosts( + *this, + [](const auto &cell) { return cell->global_active_cell_index(); }, + [](const auto &cell, const auto &id) { + cell->set_global_active_cell_index(id); + }); + + // 5) set up new partitioner + IndexSet is_local(this->n_global_active_cells()); + IndexSet is_ghost(this->n_global_active_cells()); + + for (const auto &cell : this->active_cell_iterators()) + if (!cell->is_artificial()) + { + const auto index = cell->global_active_cell_index(); + + if (index == numbers::invalid_dof_index) + continue; + + if (cell->is_locally_owned()) + is_local.add_index(index); + else + is_ghost.add_index(index); + } + + number_cache.active_cell_index_partitioner = + Utilities::MPI::Partitioner(is_local, is_ghost, this->mpi_communicator); + + // 6) proceed with multigrid levels if requested + if (this->is_multilevel_hierarchy_constructed() == true) + { + // 1) determine number of locally-owned cells on levels + std::vector n_locally_owned_cells( + this->n_global_levels(), 0); + + for (auto cell : this->cell_iterators()) + if (cell->level_subdomain_id() == this->locally_owned_subdomain()) + n_locally_owned_cells[cell->level()]++; + + // 2) determine the offset of each process + std::vector cell_index( + this->n_global_levels(), 0); + + MPI_Exscan(n_locally_owned_cells.data(), + cell_index.data(), + this->n_global_levels(), + Utilities::MPI::internal::mpi_type_id( + n_locally_owned_cells.data()), + MPI_SUM, + this->mpi_communicator); + + // 3) determine global number of "active" cells on each level + std::vector n_cells_level( + this->n_global_levels(), 0); + + for (unsigned int l = 0; l < this->n_global_levels(); ++l) + n_cells_level[l] = n_locally_owned_cells[l] + cell_index[l]; + + MPI_Bcast(n_cells_level.data(), + this->n_global_levels(), + Utilities::MPI::internal::mpi_type_id(n_cells_level.data()), + this->n_subdomains - 1, + this->mpi_communicator); + + // 4) give global indices to locally-owned cells on level and mark + // all other cells as invalid + for (auto cell : this->cell_iterators()) + if (cell->level_subdomain_id() == this->locally_owned_subdomain()) + cell->set_global_level_cell_index(cell_index[cell->level()]++); + else + cell->set_global_level_cell_index(numbers::invalid_dof_index); + + // 5) update the numbers of ghost level cells + GridTools::exchange_cell_data_to_level_ghosts< + types::global_cell_index, + dealii::Triangulation>( + *this, + [](const auto &cell) { return cell->global_level_cell_index(); }, + [](const auto &cell, const auto &id) { + return cell->set_global_level_cell_index(id); + }); + + number_cache.level_cell_index_partitioners.resize( + this->n_global_levels()); + + // 6) set up cell partitioners for each level + for (unsigned int l = 0; l < this->n_global_levels(); ++l) + { + IndexSet is_local(n_cells_level[l]); + IndexSet is_ghost(n_cells_level[l]); + + for (const auto &cell : this->cell_iterators_on_level(l)) + if (cell->level_subdomain_id() != + dealii::numbers::artificial_subdomain_id) + { + const auto index = cell->global_level_cell_index(); + + if (index == numbers::invalid_dof_index) + continue; + + if (cell->level_subdomain_id() == + this->locally_owned_subdomain()) + is_local.add_index(index); + else + is_ghost.add_index(index); + } + + number_cache.level_cell_index_partitioners[l] = + Utilities::MPI::Partitioner(is_local, + is_ghost, + this->mpi_communicator); + } + } + +#endif + } + + + + template + const Utilities::MPI::Partitioner & + TriangulationBase::global_active_cell_index_partitioner() const + { + return number_cache.active_cell_index_partitioner; + } + + template + const Utilities::MPI::Partitioner & + TriangulationBase::global_level_cell_index_partitioner( + const unsigned int level) const + { + Assert(this->is_multilevel_hierarchy_constructed(), ExcNotImplemented()); + AssertIndexRange(level, this->n_global_levels()); + + return number_cache.level_cell_index_partitioners[level]; + } + template DistributedTriangulationBase::DistributedTriangulationBase( - MPI_Comm mpi_communicator, + const MPI_Comm &mpi_communicator, const typename dealii::Triangulation::MeshSmoothing smooth_grid, const bool check_for_distorted_cells)