From: Wolfgang Bangerth Date: Mon, 18 Dec 2023 23:41:55 +0000 (-0700) Subject: Some cleanups to create_description_from_triangulation(). X-Git-Tag: relicensing~230^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ce1dc71d988c53b224a4bdadd97ee1fc0e9ea267;p=dealii.git Some cleanups to create_description_from_triangulation(). Specifically, (i) group assertions at the top of the function, (ii) do not insist on p::d::Triangulation as a parallel triangulation but accept p::s::Triangulation as well by checking for parallel::TriangulationBase as the common base class. --- diff --git a/source/grid/tria_description.cc b/source/grid/tria_description.cc index cc8258aeaa..cc86a67111 100644 --- a/source/grid/tria_description.cc +++ b/source/grid/tria_description.cc @@ -741,42 +741,47 @@ namespace TriangulationDescription const TriangulationDescription::Settings settings, const unsigned int my_rank_in) { - if (const auto tria_pdt = dynamic_cast< - const parallel::distributed::Triangulation *>(&tria)) - Assert(comm == tria_pdt->get_communicator(), - ExcMessage("MPI communicators do not match.")); - - // First, figure out for what rank we are supposed to build the - // TriangulationDescription::Description object - unsigned int my_rank = my_rank_in; - Assert(my_rank == numbers::invalid_unsigned_int || - my_rank < dealii::Utilities::MPI::n_mpi_processes(comm), - ExcMessage("Rank has to be smaller than available processes.")); - - if (auto tria_pdt = dynamic_cast< - const parallel::distributed::Triangulation *>(&tria)) + if (const auto ptria = + dynamic_cast *>( + &tria)) { - Assert(my_rank == numbers::invalid_unsigned_int || - my_rank == dealii::Utilities::MPI::this_mpi_process(comm), + Assert(comm == ptria->get_communicator(), + ExcMessage("MPI communicators do not match.")); + Assert(my_rank_in == numbers::invalid_unsigned_int || + my_rank_in == dealii::Utilities::MPI::this_mpi_process(comm), ExcMessage( - "For creation from a parallel::distributed::Triangulation, " - "my_rank has to equal global rank.")); - - my_rank = dealii::Utilities::MPI::this_mpi_process(comm); - } - else if (auto tria_serial = - dynamic_cast *>( - &tria)) - { - if (my_rank == numbers::invalid_unsigned_int) - my_rank = dealii::Utilities::MPI::this_mpi_process(comm); + "For creation from a parallel::Triangulation, " + "my_rank has to equal the rank of the current process " + "in the given communicator.")); } - else + + // If we are dealing with a sequential triangulation, then someone + // will have needed to set the subdomain_ids by hand. Make sure that + // all ids we see are less than the number of processes we are + // supposed to split the triangulation into. + if (dynamic_cast *>( + &tria) == nullptr) { - Assert(false, - ExcMessage("This type of triangulation is not supported!")); +#if DEBUG + const unsigned int n_mpi_processes = + dealii::Utilities::MPI::n_mpi_processes(comm); + for (const auto &cell : tria.active_cell_iterators()) + Assert(cell->subdomain_id() < n_mpi_processes, + ExcMessage("You can't have a cell with subdomain_id of " + + std::to_string(cell->subdomain_id()) + + " when splitting the triangulation using an MPI " + " communicator with only " + + std::to_string(n_mpi_processes) + " processes.")); +#endif } + // First, figure out for what rank we are supposed to build the + // TriangulationDescription::Description object + const unsigned int my_rank = + (my_rank_in == numbers::invalid_unsigned_int ? + dealii::Utilities::MPI::this_mpi_process(comm) : + my_rank_in); + const auto subdomain_id_function = [](const auto &cell) { return cell->subdomain_id(); };