--- /dev/null
+Changed: The default partitioner for parallel::shared::Triangulation
+is Zoltan.
+<br>
+(Daniel Arndt, 2017/11/28)
* Configuration flags for distributed Triangulations to be set in the
* constructor. Settings can be combined using bitwise OR.
*
- * The constructor requires that exactly one of <code>partition_metis</code>,
- * <code>partition_zorder</code>, and <code>partition_custom_signal</code> be set.
- * If no setting is given to the constructor, it will set <code>partition_metis</code>
- * by default.
+ * The constructor requires that exactly one of
+ * <code>partition_auto</code>, <code>partition_metis</code>,
+ * <code>partition_zorder</code>, <code>partition_zoltan> and
+ * <code>partition_custom_signal</code> is set. If
+ * <code>partition_auto</code> is chosen, it will use
+ * <code>partition_zoltan</code> (if available), then
+ * <code>partition_metis</code> (if available) and finally
+ * <code>partition_zorder>.
*/
enum Settings
{
/**
- * Use METIS partitioner to partition active cells. This is the
- * default partioning method.
+ * Choose the partitioner depending on the enabled dependencies.
+ * If Zoltan is available, partition_zoltan is used. If METIS is
+ * supported partition_metis is chosen afterwards, else
+ * partition_zorder is taken as partitioning strategy.
+ */
+ partition_auto = 0x0,
+
+ /**
+ * Use METIS partitioner to partition active cells.
*/
partition_metis = 0x1,
*/
partition_zorder = 0x2,
+ /**
+ * Use Zoltan to partition active cells.
+ */
+ partition_zoltan = 0x3,
+
/**
* Partition cells using a custom, user defined function. This is
* accomplished by connecting the post_refinement signal to the
const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing =
(dealii::Triangulation<dim,spacedim>::none),
const bool allow_artificial_cells = false,
- const Settings settings = partition_metis);
+ const Settings settings = partition_auto);
/**
* Destructor.
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/filtered_iterator.h>
+#include <deal.II/lac/sparsity_tools.h>
#include <deal.II/distributed/shared_tria.h>
#include <deal.II/distributed/tria.h>
settings (settings),
allow_artificial_cells(allow_artificial_cells)
{
- Assert((settings & (partition_metis | partition_zorder | partition_custom_signal)) == partition_metis ||
- (settings & (partition_metis | partition_zorder | partition_custom_signal)) == partition_zorder ||
- (settings & (partition_metis | partition_zorder | partition_custom_signal)) == partition_custom_signal,
- ExcMessage ("Settings must contain exactly one type of active cell partitioning scheme."))
+ const auto partition_settings
+ = (partition_zoltan | partition_metis |
+ partition_zorder | partition_custom_signal) & settings;
+ Assert(partition_settings == partition_auto ||
+ partition_settings == partition_metis ||
+ partition_settings == partition_zoltan ||
+ partition_settings == partition_zorder ||
+ partition_settings == partition_custom_signal,
+ ExcMessage ("Settings must contain exactly one type of the active cell partitioning scheme."))
if (settings & construct_multigrid_hierarchy)
Assert(allow_artificial_cells,
"agree on the number of active cells."))
#endif
+ auto partition_settings
+ = (partition_zoltan | partition_metis |
+ partition_zorder | partition_custom_signal) & settings;
+ if (partition_settings == partition_auto)
+#ifdef DEAL_II_WITH_ZOLTAN
+ partition_settings = partition_zoltan;
+#elif defined DEAL_II_WITH_METIS
+ partition_settings = partition_metis;
+#else
+ partition_settings = partition_zorder;
+#endif
- if (settings & partition_metis)
+ if (partition_settings == partition_zoltan)
+ {
+#ifndef DEAL_II_WITH_ZOLTAN
+ AssertThrow (false,
+ ExcMessage("Choosing 'partition_zoltan' requires the library "
+ "to be compiled with support for Zoltan! "
+ "Instead, you might use 'partition_auto' to select "
+ "a partitioning algorithm that is supported "
+ "by your current configuration."));
+#else
+ GridTools::partition_triangulation (this->n_subdomains, *this,
+ SparsityTools::Partitioner::zoltan);
+#endif
+ }
+ else if (partition_settings == partition_metis)
{
- dealii::GridTools::partition_triangulation (this->n_subdomains, *this);
+#ifndef DEAL_II_WITH_METIS
+ AssertThrow (false,
+ ExcMessage("Choosing 'partition_metis' requires the library "
+ "to be compiled with support for METIS! "
+ "Instead, you might use 'partition_auto' to select "
+ "a partitioning algorithm that is supported "
+ "by your current configuration."));
+#else
+ GridTools::partition_triangulation (this->n_subdomains, *this,
+ SparsityTools::Partitioner::metis);
+#endif
}
- else if (settings & partition_zorder)
+ else if (partition_settings == partition_zorder)
{
- dealii::GridTools::partition_triangulation_zorder (this->n_subdomains, *this);
+ GridTools::partition_triangulation_zorder (this->n_subdomains, *this);
}
- else if (settings & partition_custom_signal)
+ else if (partition_settings == partition_custom_signal)
{
// User partitions mesh manually
}
<< " n_active_cells: " << tr1.n_active_cells() << "\n"
<< std::endl;
- parallel::shared::Triangulation<dim> tr2(MPI_COMM_WORLD);
+ parallel::shared::Triangulation<dim> tr2(MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_metis);
tr2.copy_triangulation(tr1);
deallog
if (myid == 0)
deallog << "hyper_cube" << std::endl;
- parallel::shared::Triangulation<dim> tr(MPI_COMM_WORLD);
+ parallel::shared::Triangulation<dim> tr(MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_metis);
GridGenerator::hyper_cube(tr);
tr.refine_global(1);
typename Triangulation<dim>::active_cell_iterator
void test()
{
parallel::shared::Triangulation<dim>
- triangulation (MPI_COMM_WORLD,
- ::Triangulation<dim>::none,
- false,
- parallel::shared::Triangulation<dim>::partition_zorder);
+ triangulation (MPI_COMM_WORLD);
GridGenerator::hyper_cube(triangulation);
triangulation.refine_global (3);
--- /dev/null
+
+DEAL:0:1d::n_dofs: 9
+DEAL:0:1d::n_locally_owned_dofs: 3
+DEAL:0:2d::n_dofs: 81
+DEAL:0:2d::n_locally_owned_dofs: 31
+DEAL:0:3d::n_dofs: 729
+DEAL:0:3d::n_locally_owned_dofs: 297
+
+DEAL:1:1d::n_dofs: 9
+DEAL:1:1d::n_locally_owned_dofs: 4
+DEAL:1:2d::n_dofs: 81
+DEAL:1:2d::n_locally_owned_dofs: 30
+DEAL:1:3d::n_dofs: 729
+DEAL:1:3d::n_locally_owned_dofs: 240
+
+
+DEAL:2:1d::n_dofs: 9
+DEAL:2:1d::n_locally_owned_dofs: 2
+DEAL:2:2d::n_dofs: 81
+DEAL:2:2d::n_locally_owned_dofs: 20
+DEAL:2:3d::n_dofs: 729
+DEAL:2:3d::n_locally_owned_dofs: 192
+
--- /dev/null
+
+DEAL:0:1d::n_dofs: 9
+DEAL:0:1d::n_locally_owned_dofs: 3
+DEAL:0:2d::n_dofs: 81
+DEAL:0:2d::n_locally_owned_dofs: 31
+DEAL:0:3d::n_dofs: 729
+DEAL:0:3d::n_locally_owned_dofs: 297
+
+DEAL:1:1d::n_dofs: 9
+DEAL:1:1d::n_locally_owned_dofs: 4
+DEAL:1:2d::n_dofs: 81
+DEAL:1:2d::n_locally_owned_dofs: 30
+DEAL:1:3d::n_dofs: 729
+DEAL:1:3d::n_locally_owned_dofs: 240
+
+
+DEAL:2:1d::n_dofs: 9
+DEAL:2:1d::n_locally_owned_dofs: 2
+DEAL:2:2d::n_dofs: 81
+DEAL:2:2d::n_locally_owned_dofs: 20
+DEAL:2:3d::n_dofs: 729
+DEAL:2:3d::n_locally_owned_dofs: 192
+
<< " n_active_cells: " << tr1.n_active_cells() << "\n"
<< std::endl;
- parallel::shared::Triangulation<dim> tr2(MPI_COMM_WORLD);
+ parallel::shared::Triangulation<dim> tr2(MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_metis);
tr2.copy_triangulation(tr1);
deallog
<< " n_global_active_cells: " << tr2.n_global_active_cells() << "\n"
<< std::endl;
- parallel::shared::Triangulation<dim> tr3(MPI_COMM_WORLD);
+ parallel::shared::Triangulation<dim> tr3(MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_metis);
tr3.copy_triangulation(tr2);
deallog
void test()
{
parallel::shared::Triangulation<dim>
- triangulation (MPI_COMM_WORLD);
+ triangulation (MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_metis);
FESystem<dim> fe (FE_Q<dim>(3),2,
FE_DGQ<dim>(1),1);
void test()
{
parallel::shared::Triangulation<dim>
- triangulation (MPI_COMM_WORLD);
+ triangulation (MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_metis);
FESystem<dim> fe (FE_Q<dim>(3),2,
FE_DGQ<dim>(1),1);
{
parallel::shared::Triangulation<dim>
triangulation (MPI_COMM_WORLD,
- Triangulation<dim>::none,
- /*artificial*/true);
+ ::Triangulation<dim>::none,
+ /*artificial*/true,
+ parallel::shared::Triangulation<dim>::partition_metis);
FESystem<dim> fe (FE_Q<dim>(3),2,
FE_DGQ<dim>(1),1);
{
parallel::shared::Triangulation<dim>
triangulation (MPI_COMM_WORLD,
- Triangulation<dim>::none,
- /*artificial*/true);
+ ::Triangulation<dim>::none,
+ /*artificial*/true,
+ parallel::shared::Triangulation<dim>::partition_zorder);
FESystem<dim> fe (FE_Q<dim>(3),2,
FE_DGQ<dim>(1),1);
// ---------------------------------------------------------------------
-// distibute dofs on a shared triangulation. Tests the change
+// distribute dofs on a shared triangulation. Tests the change
// from coin_flip to smallest proc index method of distribution
// along partition interface
void test()
{
parallel::shared::Triangulation<dim>
- triangulation (MPI_COMM_WORLD);
+ triangulation (MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_zorder);
hp::FECollection<dim> fe;
fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
void test()
{
parallel::shared::Triangulation<dim>
- triangulation (MPI_COMM_WORLD);
+ triangulation (MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_zorder);
hp::FECollection<dim> fe;
fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
{
parallel::shared::Triangulation<dim>
triangulation (MPI_COMM_WORLD,
- Triangulation<dim>::none,
- /*artificial*/true);
+ ::Triangulation<dim>::none,
+ /*artificial*/true,
+ parallel::shared::Triangulation<dim>::partition_zorder);
hp::FECollection<dim> fe;
fe.push_back(FESystem<dim> (FE_Q<dim>(3),2,
{
parallel::shared::Triangulation<dim>
triangulation (MPI_COMM_WORLD,
- Triangulation<dim>::none,
- /*artificial*/true);
+ ::Triangulation<dim>::none,
+ /*artificial*/true,
+ parallel::shared::Triangulation<dim>::partition_zorder);
hp::FECollection<dim> fe;
fe.push_back (FESystem<dim> (FE_Q<dim>(3),2,
template <int dim>
void test()
{
- parallel::shared::Triangulation<dim> tr(MPI_COMM_WORLD);
+ parallel::shared::Triangulation<dim>
+ tr (MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_metis);
AssertThrow( tr.with_artificial_cells() == false,
ExcInternalError());
template <int dim>
void test()
{
- parallel::shared::Triangulation<dim> tr(MPI_COMM_WORLD,
- Triangulation<dim>::none,
- /*artificial*/true);
+ parallel::shared::Triangulation<dim>
+ tr (MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ /*artificial*/true,
+ parallel::shared::Triangulation<dim>::partition_metis);
AssertThrow( tr.with_artificial_cells() == true,
ExcInternalError());
tr1.save(oa, 0);
}
- parallel::shared::Triangulation<dim> tr2(MPI_COMM_WORLD);
+ parallel::shared::Triangulation<dim>
+ tr2 (MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ false,
+ parallel::shared::Triangulation<dim>::partition_metis);
{
std::istringstream iss(oss.str());
boost::archive::text_iarchive ia(iss, boost::archive::no_header);