From 1b309f9a5200c8ddbebf46fa3f6b23fdccae834e Mon Sep 17 00:00:00 2001 From: Wolfgang Bangerth Date: Tue, 30 Jan 2024 16:57:45 -0700 Subject: [PATCH] More work in tria_description.h/cc. Nothing actual, just better documentation and a bit better error checking. --- include/deal.II/grid/tria_description.h | 14 ++- source/grid/tria_description.cc | 132 +++++++++++++++++------- 2 files changed, 107 insertions(+), 39 deletions(-) diff --git a/include/deal.II/grid/tria_description.h b/include/deal.II/grid/tria_description.h index 5beec1f5f3..54518f5f7e 100644 --- a/include/deal.II/grid/tria_description.h +++ b/include/deal.II/grid/tria_description.h @@ -592,7 +592,14 @@ namespace TriangulationDescription * that may be partitioned differently than the desired partitioning. * * If the setup of multigrid levels is requested by the @p settings argument, - * they are partitioned according to a first-child policy. + * they are partitioned according to a first-child policy. In other words, + * a process owns a (non-active) cell if it owns its first child. If that + * first child is not active itself, the policy is applied recursively. + * + * The partitioning of cells is determined based on the elements of the + * `partition` vector. While that vector stores elements of type `double`, + * actual values must be integers and be within the range of process + * ranks of the relevant communicator. * * @note The communicator is extracted from the vector @p partition. * @@ -615,7 +622,10 @@ namespace TriangulationDescription /** * Similar to the above function but allowing the user to prescribe the - * partitioning of the multigrid levels. + * partitioning of the multigrid levels. As with the other function, + * while both the global and level-wise partition vectors store elements + * with data type `double`, actual elements must be integers and equal + * possible process ranks within the relevant communicator. */ template Description diff --git a/source/grid/tria_description.cc b/source/grid/tria_description.cc index 7f94c64d59..5da8f51d3e 100644 --- a/source/grid/tria_description.cc +++ b/source/grid/tria_description.cc @@ -109,35 +109,47 @@ namespace TriangulationDescription */ void collect( - const std::vector &relevant_processes, + const std::vector &future_owners_of_locally_owned_cells, const std::vector> &description_temp, const MPI_Comm comm, const bool vertices_have_unique_ids) { + // Use the some-to-some version of the consensus algorithm framework + // whereby we send requests to other processes that then deal with + // them but do not send anything back. + // + // Note that the input (description_temp) *may* contain an entry for + // the current process. As documented, the consensus algorithm will + // simply copy that into the output queue, i.e., it will call + // process_request() on it as well, and the data will simply come + // back out on the local process. const auto create_request = [&](const unsigned int other_rank) { - const auto ptr = std::find(relevant_processes.begin(), - relevant_processes.end(), - other_rank); + const auto ptr = + std::find(future_owners_of_locally_owned_cells.begin(), + future_owners_of_locally_owned_cells.end(), + other_rank); - Assert(ptr != relevant_processes.end(), ExcInternalError()); + Assert(ptr != future_owners_of_locally_owned_cells.end(), + ExcInternalError()); const auto other_rank_index = - std::distance(relevant_processes.begin(), ptr); + std::distance(future_owners_of_locally_owned_cells.begin(), ptr); return description_temp[other_rank_index]; }; const auto process_request = [&](const unsigned int, - const DescriptionTemp &request) { - this->merge(request, vertices_have_unique_ids); - }; + const DescriptionTemp &request) -> void { + this->merge(request, vertices_have_unique_ids); + }; dealii::Utilities::MPI::ConsensusAlgorithms::selector< - DescriptionTemp>(relevant_processes, - create_request, - process_request, - comm); + DescriptionTemp>( + future_owners_of_locally_owned_cells, + create_request, + process_request, + comm); } /** @@ -1013,27 +1025,66 @@ namespace TriangulationDescription settings_in); } + // Update partitioner ghost elements because we will later want + // to ask also about the future owners of ghost cells. partition.update_ghost_values(); - for (const auto &partition : partitions_mg) partition.update_ghost_values(); - // 1) determine processes owning locally owned cells - const std::vector relevant_processes = [&]() { - std::set relevant_processes; - - for (unsigned int i = 0; i < partition.locally_owned_size(); ++i) - relevant_processes.insert( - static_cast(partition.local_element(i))); + // 1) Determine process ids that appear on locally owned cells. Create + // a sorted vector by first creating a std::set and then copying + // the result. (Note that we get only locally *owned* cells in + // the output because we only loop over the locally *owned* + // entries of the partitioning vector, even though + // 'partition.local_element(i)' could also return locally relevant + // elements if 'i' were to exceed the number of locally owned + // elements.) + const std::vector future_owners_of_locally_owned_cells = + [&partition, &partitions_mg]() { + std::set relevant_process_set; + + const unsigned int n_mpi_ranks = + dealii::Utilities::MPI::n_mpi_processes( + partition.get_mpi_communicator()); + (void)n_mpi_ranks; - for (const auto &partition : partitions_mg) for (unsigned int i = 0; i < partition.locally_owned_size(); ++i) - relevant_processes.insert( - static_cast(partition.local_element(i))); + { + Assert(static_cast(partition.local_element(i)) == + partition.local_element(i), + ExcMessage( + "The elements of a partition vector must be integers.")); + Assert( + partition.local_element(i) < n_mpi_ranks, + ExcMessage( + "The elements of a partition vector must be between zero " + "and the number of processes in the communicator " + "to be used for partitioning the triangulation.")); + relevant_process_set.insert( + static_cast(partition.local_element(i))); + } - return std::vector(relevant_processes.begin(), - relevant_processes.end()); - }(); + for (const auto &partition : partitions_mg) + for (unsigned int i = 0; i < partition.locally_owned_size(); ++i) + { + Assert( + static_cast(partition.local_element(i)) == + partition.local_element(i), + ExcMessage( + "The elements of a partition vector must be integers.")); + Assert( + partition.local_element(i) < n_mpi_ranks, + ExcMessage( + "The elements of a partition vector must be between zero " + "and the number of processes in the communicator " + "to be used for partitioning the triangulation.")); + relevant_process_set.insert( + static_cast(partition.local_element(i))); + } + + return std::vector(relevant_process_set.begin(), + relevant_process_set.end()); + }(); const bool construct_multigrid = (partitions_mg.size() > 0); @@ -1044,7 +1095,11 @@ namespace TriangulationDescription construct_multigrid_hierarchy) : settings_in); - const auto subdomain_id_function = + + // Set up a function that returns the future owner rank for a cell. + // Same then also for the level owner. These functions work for + // locally owned and ghost cells. + const auto cell_to_future_owner = [&partition](const auto &cell) -> types::subdomain_id { if ((cell->is_active() && (cell->is_artificial() == false))) return static_cast( @@ -1053,7 +1108,7 @@ namespace TriangulationDescription return numbers::artificial_subdomain_id; }; - const auto level_subdomain_id_function = + const auto mg_cell_to_future_owner = [&construct_multigrid, &partitions_mg](const auto &cell) -> types::subdomain_id { if (construct_multigrid && (cell->is_artificial_on_level() == false)) @@ -1063,10 +1118,13 @@ namespace TriangulationDescription return numbers::artificial_subdomain_id; }; - // create a description (locally owned cell and a layer of ghost cells - // and all their parents) + // Create a description (locally owned cell and a layer of ghost cells + // and all their parents). We first create a description in the + // 'temporary' format (using class DescriptionTemp), which we will + // later convert to its final form. std::vector> descriptions_per_rank; - descriptions_per_rank.reserve(relevant_processes.size()); + descriptions_per_rank.reserve( + future_owners_of_locally_owned_cells.size()); std::map> coinciding_vertex_groups; @@ -1075,23 +1133,23 @@ namespace TriangulationDescription coinciding_vertex_groups, vertex_to_coinciding_vertex_group); - for (const auto rank : relevant_processes) + for (const auto rank : future_owners_of_locally_owned_cells) descriptions_per_rank.emplace_back( create_description_for_rank>( tria, - subdomain_id_function, - level_subdomain_id_function, + cell_to_future_owner, + mg_cell_to_future_owner, coinciding_vertex_groups, vertex_to_coinciding_vertex_group, tria.get_communicator(), rank, settings)); - // collect description from all processes that used to own locally-owned + // Collect description from all processes that used to own locally-owned // active cells of this process in a single description DescriptionTemp description_merged; description_merged.collect( - relevant_processes, + future_owners_of_locally_owned_cells, descriptions_per_rank, partition.get_mpi_communicator(), dynamic_cast< -- 2.39.5