const Triangulation<dim, spacedim> & tria,
const LinearAlgebra::distributed::Vector<double> &partition)
{
+#ifdef DEAL_II_WITH_MPI
+ if (tria.get_communicator() == MPI_COMM_NULL)
+ AssertDimension(partition.local_size(), 0);
+#endif
+
// 1) determine processes owning locally owned cells
const std::vector<unsigned int> relevant_processes = [&]() {
std::set<unsigned int> relevant_processes;
});
dealii::Utilities::MPI::ConsensusAlgorithms::Selector<char, char>(
- process, tria.get_communicator())
+ process, partition.get_mpi_communicator())
.run();
// remove redundant entries
description_merged.reduce();
// convert to actual description
- return description_merged.convert(tria.get_communicator(),
+ return description_merged.convert(partition.get_mpi_communicator(),
tria.get_mesh_smoothing());
}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2021 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// Test
+// TriangulationDescription::Utilities::create_description_from_triangulation()
+// so that it also works for p:d:T set up on a subcommunicator.
+
+#include <deal.II/base/mpi_consensus_algorithms.h>
+
+#include <deal.II/distributed/fully_distributed_tria.h>
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/tria_description.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+
+#include <deal.II/numerics/data_out.h>
+
+#include "./tests.h"
+
+using namespace dealii;
+
+MPI_Comm
+create_sub_comm(const MPI_Comm &comm, const unsigned int size)
+{
+ const auto rank = Utilities::MPI::this_mpi_process(comm);
+
+ int color = rank < size;
+
+ MPI_Comm sub_comm;
+ MPI_Comm_split(comm, color, rank, &sub_comm);
+
+ if (rank < size)
+ return sub_comm;
+ else
+ {
+ MPI_Comm_free(&sub_comm);
+ return MPI_COMM_NULL;
+ }
+}
+
+
+
+template <int dim, int spacedim>
+LinearAlgebra::distributed::Vector<double>
+partition_distributed_triangulation(const Triangulation<dim, spacedim> &tria_in,
+ const MPI_Comm & comm)
+{
+ const auto comm_tria = tria_in.get_communicator();
+
+ const auto n_global_active_cells = Utilities::MPI::max(
+ comm_tria == MPI_COMM_NULL ? 0 : tria_in.n_global_active_cells(), comm);
+
+ if (comm_tria == MPI_COMM_NULL)
+ {
+ LinearAlgebra::distributed::Vector<double> partition{
+ IndexSet(n_global_active_cells), IndexSet(n_global_active_cells), comm};
+ partition.update_ghost_values();
+ return partition;
+ }
+
+ const auto tria =
+ dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(&tria_in);
+
+ Assert(tria, ExcNotImplemented());
+
+ LinearAlgebra::distributed::Vector<double> partition(
+ tria->global_active_cell_index_partitioner().lock()->locally_owned_range(),
+ tria->global_active_cell_index_partitioner().lock()->ghost_indices(),
+ comm);
+
+ const unsigned int n_partitions = Utilities::MPI::n_mpi_processes(comm);
+
+ for (const auto &cell : tria_in.active_cell_iterators())
+ if (cell->is_locally_owned())
+ partition[cell->global_active_cell_index()] =
+ cell->global_active_cell_index() * n_partitions /
+ tria_in.n_global_active_cells();
+
+ partition.update_ghost_values();
+
+ return partition;
+}
+
+
+template <int dim>
+void
+test(const MPI_Comm comm, const unsigned int n_partitions)
+{
+ auto sub_comm = create_sub_comm(comm, n_partitions);
+
+ parallel::distributed::Triangulation<dim> tria(sub_comm);
+
+ if (sub_comm != MPI_COMM_NULL)
+ {
+ GridGenerator::subdivided_hyper_cube(tria, 4);
+ tria.refine_global(3);
+ }
+
+ const auto partition_new = partition_distributed_triangulation(tria, comm);
+ partition_new.update_ghost_values();
+
+ // repartition triangulation
+ const auto construction_data =
+ TriangulationDescription::Utilities::create_description_from_triangulation(
+ tria, partition_new);
+
+ parallel::fullydistributed::Triangulation<dim> tria_pft(comm);
+ tria_pft.create_triangulation(construction_data);
+
+ FE_Q<dim> fe(2);
+ DoFHandler<dim> dof_handler(tria_pft);
+ dof_handler.distribute_dofs(fe);
+
+ // print statistics
+ print_statistics(tria_pft);
+ print_statistics(dof_handler);
+
+ if (sub_comm != MPI_COMM_NULL)
+ MPI_Comm_free(&sub_comm);
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi(argc, argv, 1);
+ MPILogInitAll all;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+
+ deallog.push("all");
+ test<2>(comm, Utilities::MPI::n_mpi_processes(comm));
+ deallog.pop();
+
+ // test that we can eliminate processes
+ deallog.push("reduced");
+ test<2>(comm,
+ std::max<unsigned int>(1, Utilities::MPI::n_mpi_processes(comm) / 2));
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:all::n_levels: 4
+DEAL:0:all::n_cells: 469
+DEAL:0:all::n_active_cells: 354
+DEAL:0:all::
+DEAL:0:all::n_dofs: 4225
+DEAL:0:all::n_locally_owned_dofs: 1089
+DEAL:0:all::
+DEAL:0:reduced::n_levels: 4
+DEAL:0:reduced::n_cells: 456
+DEAL:0:reduced::n_active_cells: 344
+DEAL:0:reduced::
+DEAL:0:reduced::n_dofs: 4225
+DEAL:0:reduced::n_locally_owned_dofs: 1105
+DEAL:0:reduced::
+
+DEAL:1:all::n_levels: 4
+DEAL:1:all::n_cells: 469
+DEAL:1:all::n_active_cells: 354
+DEAL:1:all::
+DEAL:1:all::n_dofs: 4225
+DEAL:1:all::n_locally_owned_dofs: 1056
+DEAL:1:all::
+DEAL:1:reduced::n_levels: 4
+DEAL:1:reduced::n_cells: 572
+DEAL:1:reduced::n_active_cells: 432
+DEAL:1:reduced::
+DEAL:1:reduced::n_dofs: 4225
+DEAL:1:reduced::n_locally_owned_dofs: 1040
+DEAL:1:reduced::
+
+
+DEAL:2:all::n_levels: 4
+DEAL:2:all::n_cells: 469
+DEAL:2:all::n_active_cells: 354
+DEAL:2:all::
+DEAL:2:all::n_dofs: 4225
+DEAL:2:all::n_locally_owned_dofs: 1056
+DEAL:2:all::
+DEAL:2:reduced::n_levels: 4
+DEAL:2:reduced::n_cells: 572
+DEAL:2:reduced::n_active_cells: 432
+DEAL:2:reduced::
+DEAL:2:reduced::n_dofs: 4225
+DEAL:2:reduced::n_locally_owned_dofs: 1040
+DEAL:2:reduced::
+
+
+DEAL:3:all::n_levels: 4
+DEAL:3:all::n_cells: 469
+DEAL:3:all::n_active_cells: 354
+DEAL:3:all::
+DEAL:3:all::n_dofs: 4225
+DEAL:3:all::n_locally_owned_dofs: 1024
+DEAL:3:all::
+DEAL:3:reduced::n_levels: 4
+DEAL:3:reduced::n_cells: 456
+DEAL:3:reduced::n_active_cells: 344
+DEAL:3:reduced::
+DEAL:3:reduced::n_dofs: 4225
+DEAL:3:reduced::n_locally_owned_dofs: 1040
+DEAL:3:reduced::
+