--- /dev/null
+New: Function GridTools::get_subdomain_association() determines
+the owning process of any valid cell on a Triangulation that is
+represented by a globally unique CellId.
+<br>
+(Marc Fehling, 2021/01/07)
void
partition_multigrid_levels(Triangulation<dim, spacedim> &triangulation);
+ /**
+ * This function allows to ask for the owning subdomain of cells identified by
+ * CellId objects that do not have to exist on the current process.
+ *
+ * @note This function has not been implemented yet for
+ * parallel::fullydistributed::Triangulation.
+ */
+ template <int dim, int spacedim>
+ std::vector<types::subdomain_id>
+ get_subdomain_association(const Triangulation<dim, spacedim> &triangulation,
+ const std::vector<CellId> & cell_ids);
+
/**
* For each active cell, return in the output array to which subdomain (as
* given by the <tt>cell->subdomain_id()</tt> function) it belongs. The
const Triangulation<dim, spacedim> &triangulation,
const types::subdomain_id subdomain);
-
/**
* For a triangulation, return a mask that represents which of its vertices
* are "owned" by the current process in the same way as we talk about
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/thread_management.h>
+#include <deal.II/distributed/fully_distributed_tria.h>
+#include <deal.II/distributed/p4est_wrappers.h>
#include <deal.II/distributed/shared_tria.h>
#include <deal.II/distributed/tria.h>
}
+
+ template <int dim, int spacedim>
+ std::vector<types::subdomain_id>
+ get_subdomain_association(const Triangulation<dim, spacedim> &triangulation,
+ const std::vector<CellId> & cell_ids)
+ {
+ std::vector<types::subdomain_id> subdomain_ids;
+ subdomain_ids.reserve(cell_ids.size());
+
+ if (dynamic_cast<
+ const parallel::fullydistributed::Triangulation<dim, spacedim> *>(
+ &triangulation) != nullptr)
+ {
+ Assert(false, ExcNotImplemented());
+ }
+ else if (const parallel::distributed::Triangulation<dim, spacedim>
+ *parallel_tria = dynamic_cast<
+ const parallel::distributed::Triangulation<dim, spacedim> *>(
+ &triangulation))
+ {
+#ifndef DEAL_II_WITH_P4EST
+ Assert(
+ false,
+ ExcMessage(
+ "You are attempting to use a functionality that is only available "
+ "if deal.II was configured to use p4est, but cmake did not find a "
+ "valid p4est library."));
+#else
+ // for parallel distributed triangulations, we will ask the p4est oracle
+ // about the global partitioning of active cells since this information
+ // is stored on every process
+ for (const auto &cell_id : cell_ids)
+ {
+ // find descendent from coarse quadrant
+ typename dealii::internal::p4est::types<dim>::quadrant p4est_cell,
+ p4est_children[GeometryInfo<dim>::max_children_per_cell];
+
+ dealii::internal::p4est::init_coarse_quadrant<dim>(p4est_cell);
+ for (const auto &child_index : cell_id.get_child_indices())
+ {
+ dealii::internal::p4est::init_quadrant_children<dim>(
+ p4est_cell, p4est_children);
+ p4est_cell =
+ p4est_children[static_cast<unsigned int>(child_index)];
+ }
+
+ // find owning process, i.e., the subdomain id
+ const int owner =
+ dealii::internal::p4est::functions<dim>::comm_find_owner(
+ const_cast<typename dealii::internal::p4est::types<dim>::forest
+ *>(parallel_tria->get_p4est()),
+ cell_id.get_coarse_cell_id(),
+ &p4est_cell,
+ Utilities::MPI::this_mpi_process(
+ parallel_tria->get_communicator()));
+
+ Assert(owner >= 0, ExcMessage("p4est should know the owner."));
+
+ subdomain_ids.push_back(owner);
+ }
+#endif
+ }
+ else if (const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
+ dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>
+ *>(&triangulation))
+ {
+ // for parallel shared triangulations, we need to access true subdomain
+ // ids which are also valid for artificial cells
+ const std::vector<types::subdomain_id> &true_subdomain_ids_of_cells =
+ shared_tria->get_true_subdomain_ids_of_cells();
+
+ for (const auto &cell_id : cell_ids)
+ {
+ const unsigned int active_cell_index =
+ shared_tria->create_cell_iterator(cell_id)->active_cell_index();
+ subdomain_ids.push_back(
+ true_subdomain_ids_of_cells[active_cell_index]);
+ }
+ }
+ else
+ {
+ // the most general type of triangulation is the serial one. here, all
+ // subdomain information is directly available
+ for (const auto &cell_id : cell_ids)
+ {
+ subdomain_ids.push_back(
+ triangulation.create_cell_iterator(cell_id)->subdomain_id());
+ }
+ }
+
+ return subdomain_ids;
+ }
+
+
+
template <int dim, int spacedim>
void
get_subdomain_association(const Triangulation<dim, spacedim> &triangulation,
partition_multigrid_levels(
Triangulation<deal_II_dimension, deal_II_space_dimension> &);
+# if deal_II_dimension >= 2
+ template std::vector<types::subdomain_id>
+ get_subdomain_association(
+ const Triangulation<deal_II_dimension, deal_II_space_dimension> &,
+ const std::vector<CellId> &);
+# endif
+
template void
get_subdomain_association(
const Triangulation<deal_II_dimension, deal_II_space_dimension> &,
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// check GridTools::get_subdomain_association
+
+
+#include <deal.II/base/mpi.h>
+
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/tria_base.h>
+
+#include <deal.II/grid/cell_id.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/tria.h>
+
+#include <memory>
+#include <vector>
+
+#include "../tests.h"
+
+#include "../test_grids.h"
+
+
+enum Type
+{
+ Shared,
+ Distributed
+};
+
+
+template <int dim>
+std::unique_ptr<parallel::Triangulation<dim>>
+create_triangulation(Type type)
+{
+ if (type == Type::Shared)
+ return std::make_unique<parallel::shared::Triangulation<dim>>(
+ MPI_COMM_WORLD, Triangulation<dim>::none, true);
+ else if (type == Type::Distributed)
+ return std::make_unique<parallel::distributed::Triangulation<dim>>(
+ MPI_COMM_WORLD);
+ else
+ return nullptr;
+}
+
+
+template <int dim>
+void
+test(parallel::Triangulation<dim> &tria)
+{
+ // ----- setup -----
+ tria.clear();
+ TestGrids::hyper_line(tria, 4);
+
+ // refine the last cell
+ for (const auto &cell : tria.active_cell_iterators())
+ if (cell->id().to_string() == "0_0:")
+ cell->set_refine_flag();
+ tria.execute_coarsening_and_refinement();
+
+ // ----- test -----
+ // gather global cellid objects
+ std::vector<CellId> global_cell_ids;
+ global_cell_ids.reserve(tria.n_global_active_cells());
+ {
+ std::vector<CellId> local_cell_ids;
+ local_cell_ids.reserve(tria.n_active_cells());
+ for (const auto &cell : tria.active_cell_iterators())
+ if (cell->is_locally_owned())
+ local_cell_ids.push_back(cell->id());
+
+ std::vector<std::vector<CellId>> cell_ids_per_processor =
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, local_cell_ids);
+
+ for (const auto &cell_ids : cell_ids_per_processor)
+ global_cell_ids.insert(global_cell_ids.end(),
+ cell_ids.cbegin(),
+ cell_ids.cend());
+ }
+
+ // determine subdomain of every cellid
+ std::vector<types::subdomain_id> subdomain_ids =
+ GridTools::get_subdomain_association(tria, global_cell_ids);
+
+ // ----- verify results -----
+ AssertDimension(tria.n_global_active_cells(), global_cell_ids.size());
+ AssertDimension(tria.n_global_active_cells(), subdomain_ids.size());
+
+ // check if processor owns represented cells that have been assigned to them
+ for (unsigned int i = 0; i < tria.n_global_active_cells(); ++i)
+ if (tria.locally_owned_subdomain() == subdomain_ids[i])
+ {
+ const auto cell = tria.create_cell_iterator(global_cell_ids[i]);
+ Assert(cell->is_locally_owned(), ExcInternalError());
+ }
+
+ // check if all processors have the same list of subdomain ids
+ {
+ std::vector<std::vector<types::subdomain_id>> subdomain_ids_per_processor =
+ Utilities::MPI::gather(MPI_COMM_WORLD, subdomain_ids);
+
+ if (tria.locally_owned_subdomain() == 0)
+ for (unsigned int i = 1; i < subdomain_ids_per_processor.size(); ++i)
+ Assert(subdomain_ids_per_processor[0] == subdomain_ids_per_processor[i],
+ ExcInternalError());
+ }
+
+ deallog << "OK" << std::endl;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+
+
+ for (const auto &type : {Type::Shared, Type::Distributed})
+ {
+ deallog.push("2d");
+ test(*create_triangulation<2>(type));
+ deallog.pop();
+ deallog.push("3d");
+ test(*create_triangulation<3>(type));
+ deallog.pop();
+ }
+}
--- /dev/null
+
+DEAL:0:2d::OK
+DEAL:0:3d::OK
+DEAL:0:2d::OK
+DEAL:0:3d::OK
+
+DEAL:1:2d::OK
+DEAL:1:3d::OK
+DEAL:1:2d::OK
+DEAL:1:3d::OK
+
+
+DEAL:2:2d::OK
+DEAL:2:3d::OK
+DEAL:2:2d::OK
+DEAL:2:3d::OK
+
+
+DEAL:3:2d::OK
+DEAL:3:3d::OK
+DEAL:3:2d::OK
+DEAL:3:3d::OK
+