--- /dev/null
+New: Each cell is assigned a globally unique active cell index and (if requested)
+a level cell index. These indices are integers enumerated contiguously within
+each subdomain of the mesh.
+Users can query locally-owned and ghost cells for their indices via CellAccessor::global_active_cell_index()
+or CellAccessor::global_level_cell_index().
+The value is managed automatically by the Triangulation classes.
+Furthermore, triangulations deriving from parallel::TriangulationBase provide partitioners
+for these indices, which can be used to set up ghosted vectors with one entry per
+cell.
+<br>
+(Peter Munch, 2020/06/12)
#include <deal.II/base/config.h>
#include <deal.II/base/mpi.h>
+#include <deal.II/base/partitioner.h>
#include <deal.II/base/smartpointer.h>
#include <deal.II/base/subscriptor.h>
#include <deal.II/base/template_constraints.h>
const std::set<types::subdomain_id> &
level_ghost_owners() const;
+ /**
+ * Return partitioner for the global indices of the cells on the active
+ * level of the triangulation.
+ */
+ const Utilities::MPI::Partitioner &
+ global_active_cell_index_partitioner() const;
+
+ /**
+ * Return partitioner for the global indices of the cells on the given @p
+ * level of the triangulation.
+ */
+ const Utilities::MPI::Partitioner &
+ global_level_cell_index_partitioner(const unsigned int level) const;
+
/**
* Return a map that, for each vertex, lists all the processors whose
* subdomains are adjacent to that vertex.
*/
std::set<types::subdomain_id> level_ghost_owners;
+ /**
+ * Partitioner for the global active cell indices.
+ */
+ Utilities::MPI::Partitioner active_cell_index_partitioner;
+
+ /**
+ * Partitioner for the global level cell indices for each level.
+ */
+ std::vector<Utilities::MPI::Partitioner> level_cell_index_partitioners;
+
NumberCache();
};
*/
virtual void
update_number_cache();
+
+ /**
+ * Reset global active cell indices and global level cell indices.
+ */
+ void
+ reset_global_cell_indices();
};
/**
void
reset_active_cell_indices();
+ /**
+ * Reset global cell ids and globale level cell ids.
+ */
+ void
+ reset_global_cell_indices();
+
/**
* Refine all cells on all levels which were previously flagged for
* refinement.
template <typename Accessor>
class TriaActiveIterator;
+namespace parallel
+{
+ template <int dim, int spacedim>
+ class TriangulationBase;
+}
+
template <int dim, int spacedim>
class Manifold;
#endif
* @}
*/
+ /**
+ * Return global active cell index for an active cell.
+ */
+ types::global_cell_index
+ global_active_cell_index() const;
+
+ /**
+ * Return global level cell index for a level cell.
+ */
+ types::global_cell_index
+ global_level_cell_index() const;
+
/**
* @name Dealing with codim 1 cell orientation
*/
* refinement.
*/
void
- set_active_cell_index(const unsigned int active_cell_index);
+ set_active_cell_index(const unsigned int active_cell_index) const;
+
+ /**
+ * Set global active cell index for a cell.
+ */
+ void
+ set_global_active_cell_index(const types::global_cell_index index) const;
+
+ /**
+ * Set global level cell index for a level cell.
+ */
+ void
+ set_global_level_cell_index(const types::global_cell_index index) const;
/**
* Set the parent of a cell.
template <int, int>
friend class Triangulation;
+ template <int, int>
+ friend class parallel::TriangulationBase;
+
friend struct dealii::internal::TriangulationImplementation::Implementation;
};
*/
std::vector<unsigned int> active_cell_indices;
+ /**
+ * Global cell index of each active cell.
+ */
+ std::vector<types::global_cell_index> global_active_cell_indices;
+
+ /**
+ * Global cell index of each cell on the given level.
+ */
+ std::vector<types::global_cell_index> global_level_cell_indices;
+
/**
* Levels and indices of the neighbors of the cells. Convention is, that
* the neighbors of the cell with index @p i are stored in the fields
#include <deal.II/base/logstream.h>
#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/mpi.templates.h>
#include <deal.II/base/utilities.h>
#include <deal.II/distributed/tria_base.h>
Utilities::MPI::n_mpi_processes(this->mpi_communicator),
ExcInternalError());
}
+
+ // reset global cell ids
+ this->reset_global_cell_indices();
}
#else
+ template <int dim, int spacedim>
+ void
+ TriangulationBase<dim, spacedim>::reset_global_cell_indices()
+ {
+#ifndef DEAL_II_WITH_MPI
+ Assert(false, ExcNeedsMPI());
+#else
+
+ // currently only implemented for distributed triangulations
+ if (dynamic_cast<const parallel::DistributedTriangulationBase<dim, spacedim>
+ *>(this) == nullptr)
+ return;
+
+ // 1) determine number of active locally-owned cells
+ const types::global_cell_index n_locally_owned_cells =
+ this->n_locally_owned_active_cells();
+
+ // 2) determine the offset of each process
+ types::global_cell_index cell_index = 0;
+
+ MPI_Exscan(&n_locally_owned_cells,
+ &cell_index,
+ 1,
+ Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells),
+ MPI_SUM,
+ this->mpi_communicator);
+
+ // 3) give global indices to locally-owned cells and mark all other cells as
+ // invalid
+ for (const auto &cell : this->active_cell_iterators())
+ if (cell->is_locally_owned())
+ cell->set_global_active_cell_index(cell_index++);
+ else
+ cell->set_global_active_cell_index(numbers::invalid_dof_index);
+
+ // 4) determine the global indices of ghost cells
+ GridTools::exchange_cell_data_to_ghosts<types::global_cell_index>(
+ *this,
+ [](const auto &cell) { return cell->global_active_cell_index(); },
+ [](const auto &cell, const auto &id) {
+ cell->set_global_active_cell_index(id);
+ });
+
+ // 5) set up new partitioner
+ IndexSet is_local(this->n_global_active_cells());
+ IndexSet is_ghost(this->n_global_active_cells());
+
+ for (const auto &cell : this->active_cell_iterators())
+ if (!cell->is_artificial())
+ {
+ const auto index = cell->global_active_cell_index();
+
+ if (index == numbers::invalid_dof_index)
+ continue;
+
+ if (cell->is_locally_owned())
+ is_local.add_index(index);
+ else
+ is_ghost.add_index(index);
+ }
+
+ number_cache.active_cell_index_partitioner =
+ Utilities::MPI::Partitioner(is_local, is_ghost, this->mpi_communicator);
+
+ // 6) proceed with multigrid levels if requested
+ if (this->is_multilevel_hierarchy_constructed() == true)
+ {
+ // 1) determine number of locally-owned cells on levels
+ std::vector<types::global_cell_index> n_locally_owned_cells(
+ this->n_global_levels(), 0);
+
+ for (auto cell : this->cell_iterators())
+ if (cell->level_subdomain_id() == this->locally_owned_subdomain())
+ n_locally_owned_cells[cell->level()]++;
+
+ // 2) determine the offset of each process
+ std::vector<types::global_cell_index> cell_index(
+ this->n_global_levels(), 0);
+
+ MPI_Exscan(n_locally_owned_cells.data(),
+ cell_index.data(),
+ this->n_global_levels(),
+ Utilities::MPI::internal::mpi_type_id(
+ n_locally_owned_cells.data()),
+ MPI_SUM,
+ this->mpi_communicator);
+
+ // 3) determine global number of "active" cells on each level
+ std::vector<types::global_cell_index> n_cells_level(
+ this->n_global_levels(), 0);
+
+ for (unsigned int l = 0; l < this->n_global_levels(); ++l)
+ n_cells_level[l] = n_locally_owned_cells[l] + cell_index[l];
+
+ MPI_Bcast(n_cells_level.data(),
+ this->n_global_levels(),
+ Utilities::MPI::internal::mpi_type_id(n_cells_level.data()),
+ this->n_subdomains - 1,
+ this->mpi_communicator);
+
+ // 4) give global indices to locally-owned cells on level and mark
+ // all other cells as invalid
+ for (auto cell : this->cell_iterators())
+ if (cell->level_subdomain_id() == this->locally_owned_subdomain())
+ cell->set_global_level_cell_index(cell_index[cell->level()]++);
+ else
+ cell->set_global_level_cell_index(numbers::invalid_dof_index);
+
+ // 5) update the numbers of ghost level cells
+ GridTools::exchange_cell_data_to_level_ghosts<
+ types::global_cell_index,
+ dealii::Triangulation<dim, spacedim>>(
+ *this,
+ [](const auto &cell) { return cell->global_level_cell_index(); },
+ [](const auto &cell, const auto &id) {
+ return cell->set_global_level_cell_index(id);
+ });
+
+ number_cache.level_cell_index_partitioners.resize(
+ this->n_global_levels());
+
+ // 6) set up cell partitioners for each level
+ for (unsigned int l = 0; l < this->n_global_levels(); ++l)
+ {
+ IndexSet is_local(n_cells_level[l]);
+ IndexSet is_ghost(n_cells_level[l]);
+
+ for (const auto &cell : this->cell_iterators_on_level(l))
+ if (cell->level_subdomain_id() !=
+ dealii::numbers::artificial_subdomain_id)
+ {
+ const auto index = cell->global_level_cell_index();
+
+ if (index == numbers::invalid_dof_index)
+ continue;
+
+ if (cell->level_subdomain_id() ==
+ this->locally_owned_subdomain())
+ is_local.add_index(index);
+ else
+ is_ghost.add_index(index);
+ }
+
+ number_cache.level_cell_index_partitioners[l] =
+ Utilities::MPI::Partitioner(is_local,
+ is_ghost,
+ this->mpi_communicator);
+ }
+ }
+
+#endif
+ }
+
+
+
+ template <int dim, int spacedim>
+ const Utilities::MPI::Partitioner &
+ TriangulationBase<dim, spacedim>::global_active_cell_index_partitioner() const
+ {
+ return number_cache.active_cell_index_partitioner;
+ }
+
+ template <int dim, int spacedim>
+ const Utilities::MPI::Partitioner &
+ TriangulationBase<dim, spacedim>::global_level_cell_index_partitioner(
+ const unsigned int level) const
+ {
+ Assert(this->is_multilevel_hierarchy_constructed(), ExcNotImplemented());
+ AssertIndexRange(level, this->n_global_levels());
+
+ return number_cache.level_cell_index_partitioners[level];
+ }
+
template <int dim, int spacedim>
DistributedTriangulationBase<dim, spacedim>::DistributedTriangulationBase(
MPI_Comm mpi_communicator,
total_cells - tria_level.level_subdomain_ids.size(),
0);
+ tria_level.global_active_cell_indices.reserve(total_cells);
+ tria_level.global_active_cell_indices.insert(
+ tria_level.global_active_cell_indices.end(),
+ total_cells - tria_level.global_active_cell_indices.size(),
+ numbers::invalid_unsigned_int);
+
+ tria_level.global_level_cell_indices.reserve(total_cells);
+ tria_level.global_level_cell_indices.insert(
+ tria_level.global_level_cell_indices.end(),
+ total_cells - tria_level.global_level_cell_indices.size(),
+ numbers::invalid_unsigned_int);
+
if (dimension < space_dimension)
{
tria_level.direction_flags.reserve(total_cells);
if (orientation_needed)
level.face_orientations.assign(size * faces_per_cell, -1);
+
+ level.global_active_cell_indices.assign(size,
+ numbers::invalid_unsigned_int);
+ level.global_level_cell_indices.assign(size,
+ numbers::invalid_unsigned_int);
}
internal::TriangulationImplementation::Implementation::compute_number_cache(
*this, levels.size(), number_cache);
reset_active_cell_indices();
+ reset_global_cell_indices();
// now verify that there are indeed no distorted cells. as per the
// documentation of this class, we first collect all distorted cells
update_neighbors(*this);
reset_active_cell_indices();
+ reset_global_cell_indices(); // TODO: better place?
+
// Inform all listeners about end of refinement.
signals.post_refinement();
}
+
+template <int dim, int spacedim>
+void
+Triangulation<dim, spacedim>::reset_global_cell_indices()
+{
+ {
+ types::global_cell_index cell_index = 0;
+ for (const auto &cell : active_cell_iterators())
+ cell->set_active_cell_index(cell_index++);
+ }
+
+ for (unsigned int l = 0; l < levels.size(); ++l)
+ {
+ types::global_cell_index cell_index = 0;
+ for (const auto &cell : cell_iterators_on_level(l))
+ cell->set_global_level_cell_index(cell_index++);
+ }
+}
+
+
+
template <int dim, int spacedim>
void
Triangulation<dim, spacedim>::update_periodic_face_map()
-template <int dim, int spacedim>
-void
-CellAccessor<dim, spacedim>::set_active_cell_index(
- const unsigned int active_cell_index)
-{
- // set the active cell index. allow setting it also for non-active (and
- // unused) cells to allow resetting the index after refinement
- this->tria->levels[this->present_level]
- ->active_cell_indices[this->present_index] = active_cell_index;
-}
-
-
-
template <int dim, int spacedim>
void
CellAccessor<dim, spacedim>::set_parent(const unsigned int parent_index)
+template <int dim, int spacedim>
+void
+CellAccessor<dim, spacedim>::set_active_cell_index(
+ const unsigned int active_cell_index) const
+{
+ this->tria->levels[this->present_level]
+ ->active_cell_indices[this->present_index] = active_cell_index;
+}
+
+
+
+template <int dim, int spacedim>
+void
+CellAccessor<dim, spacedim>::set_global_active_cell_index(
+ const types::global_cell_index index) const
+{
+ this->tria->levels[this->present_level]
+ ->global_active_cell_indices[this->present_index] = index;
+}
+
+
+
+template <int dim, int spacedim>
+inline types::global_cell_index
+CellAccessor<dim, spacedim>::global_active_cell_index() const
+{
+ Assert(this->used(), TriaAccessorExceptions::ExcCellNotUsed());
+ Assert(this->is_active(),
+ ExcMessage(
+ "global_active_cell_index() can only be called on active cells!"));
+
+ return this->tria->levels[this->present_level]
+ ->global_active_cell_indices[this->present_index];
+}
+
+
+
+template <int dim, int spacedim>
+void
+CellAccessor<dim, spacedim>::set_global_level_cell_index(
+ const types::global_cell_index index) const
+{
+ this->tria->levels[this->present_level]
+ ->global_level_cell_indices[this->present_index] = index;
+}
+
+
+
+template <int dim, int spacedim>
+inline types::global_cell_index
+CellAccessor<dim, spacedim>::global_level_cell_index() const
+{
+ return this->tria->levels[this->present_level]
+ ->global_level_cell_indices[this->present_index];
+}
+
+
+
template <int dim, int spacedim>
TriaIterator<CellAccessor<dim, spacedim>>
CellAccessor<dim, spacedim>::parent() const
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// Check global cell ids when construct_multigrid_hierarchy is enabled.
+
+#include <deal.II/base/mpi.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_description.h>
+
+#include "../tests.h"
+
+using namespace dealii;
+
+template <int dim>
+void
+test(int n_refinements, MPI_Comm comm)
+{
+ parallel::distributed::Triangulation<dim> tria(comm);
+ GridGenerator::hyper_cube(tria);
+ tria.refine_global(n_refinements);
+
+ for (const auto cell : tria.active_cell_iterators())
+ if (!cell->is_artificial())
+ deallog << cell->id() << " -> " << cell->subdomain_id() << " "
+ << cell->global_active_cell_index() << std::endl;
+
+ const auto &part = tria.global_active_cell_index_partitioner();
+
+ part.locally_owned_range().print(deallog);
+ part.ghost_indices().print(deallog);
+}
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ const int n_refinements = 2;
+ const MPI_Comm comm = MPI_COMM_WORLD;
+
+ {
+ deallog.push("2d");
+ test<2>(n_refinements, comm);
+ deallog.pop();
+ }
+ if (false)
+ {
+ deallog.push("3d");
+ test<3>(n_refinements, comm);
+ deallog.pop();
+ }
+}
--- /dev/null
+
+DEAL:0:2d::0_2:00 -> 0 0
+DEAL:0:2d::0_2:01 -> 0 1
+DEAL:0:2d::0_2:02 -> 0 2
+DEAL:0:2d::0_2:03 -> 0 3
+DEAL:0:2d::0_2:10 -> 0 4
+DEAL:0:2d::0_2:11 -> 0 5
+DEAL:0:2d::0_2:12 -> 0 6
+DEAL:0:2d::0_2:13 -> 0 7
+DEAL:0:2d::0_2:20 -> 0 8
+DEAL:0:2d::0_2:21 -> 0 9
+DEAL:0:2d::0_2:22 -> 0 10
+DEAL:0:2d::0_2:23 -> 0 11
+DEAL:0:2d::0_2:30 -> 0 12
+DEAL:0:2d::0_2:31 -> 0 13
+DEAL:0:2d::0_2:32 -> 0 14
+DEAL:0:2d::0_2:33 -> 0 15
+DEAL:0:2d::{[0,15]}
+DEAL:0:2d::{}
--- /dev/null
+
+DEAL:0:2d::0_2:00 -> 0 0
+DEAL:0:2d::0_2:01 -> 0 1
+DEAL:0:2d::0_2:02 -> 0 2
+DEAL:0:2d::0_2:03 -> 0 3
+DEAL:0:2d::0_2:10 -> 1 4
+DEAL:0:2d::0_2:12 -> 1 6
+DEAL:0:2d::0_2:20 -> 2 8
+DEAL:0:2d::0_2:21 -> 2 9
+DEAL:0:2d::0_2:30 -> 3 12
+DEAL:0:2d::{[0,3]}
+DEAL:0:2d::{4, 6, [8,9], 12}
+
+DEAL:1:2d::0_2:01 -> 0 1
+DEAL:1:2d::0_2:03 -> 0 3
+DEAL:1:2d::0_2:10 -> 1 4
+DEAL:1:2d::0_2:11 -> 1 5
+DEAL:1:2d::0_2:12 -> 1 6
+DEAL:1:2d::0_2:13 -> 1 7
+DEAL:1:2d::0_2:21 -> 2 9
+DEAL:1:2d::0_2:30 -> 3 12
+DEAL:1:2d::0_2:31 -> 3 13
+DEAL:1:2d::{[4,7]}
+DEAL:1:2d::{1, 3, 9, [12,13]}
+
+
+DEAL:2:2d::0_2:02 -> 0 2
+DEAL:2:2d::0_2:03 -> 0 3
+DEAL:2:2d::0_2:12 -> 1 6
+DEAL:2:2d::0_2:20 -> 2 8
+DEAL:2:2d::0_2:21 -> 2 9
+DEAL:2:2d::0_2:22 -> 2 10
+DEAL:2:2d::0_2:23 -> 2 11
+DEAL:2:2d::0_2:30 -> 3 12
+DEAL:2:2d::0_2:32 -> 3 14
+DEAL:2:2d::{[8,11]}
+DEAL:2:2d::{[2,3], 6, 12, 14}
+
+
+DEAL:3:2d::0_2:03 -> 0 3
+DEAL:3:2d::0_2:12 -> 1 6
+DEAL:3:2d::0_2:13 -> 1 7
+DEAL:3:2d::0_2:21 -> 2 9
+DEAL:3:2d::0_2:23 -> 2 11
+DEAL:3:2d::0_2:30 -> 3 12
+DEAL:3:2d::0_2:31 -> 3 13
+DEAL:3:2d::0_2:32 -> 3 14
+DEAL:3:2d::0_2:33 -> 3 15
+DEAL:3:2d::{[12,15]}
+DEAL:3:2d::{3, [6,7], 9, 11}
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// Check global level cell ids when construct_multigrid_hierarchy is enabled.
+
+#include <deal.II/base/mpi.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_description.h>
+
+#include "../tests.h"
+
+using namespace dealii;
+
+template <int dim>
+void
+test(int n_refinements, MPI_Comm comm)
+{
+ parallel::distributed::Triangulation<dim> tria(
+ comm,
+ Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
+ GridGenerator::hyper_cube(tria);
+ tria.refine_global(n_refinements);
+
+ for (unsigned int l = 0; l < tria.n_global_levels(); ++l)
+ {
+ deallog.push("level=" + std::to_string(l));
+ for (const auto cell : tria.cell_iterators_on_level(l))
+ if (cell->level_subdomain_id() !=
+ dealii::numbers::artificial_subdomain_id)
+ deallog << cell->id() << " -> " << cell->level_subdomain_id() << " "
+ << cell->global_level_cell_index() << std::endl;
+
+ const auto &part = tria.global_level_cell_index_partitioner(l);
+
+ part.locally_owned_range().print(deallog);
+ part.ghost_indices().print(deallog);
+
+ deallog << std::endl;
+ }
+}
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ const int n_refinements = 2;
+ const MPI_Comm comm = MPI_COMM_WORLD;
+
+ {
+ deallog.push("2d");
+ test<2>(n_refinements, comm);
+ deallog.pop();
+ }
+ if (false)
+ {
+ deallog.push("3d");
+ test<3>(n_refinements, comm);
+ deallog.pop();
+ }
+}
--- /dev/null
+
+DEAL:0:2d:level=0::0_0: -> 0 0
+DEAL:0:2d:level=0::{0}
+DEAL:0:2d:level=0::{}
+DEAL:0:2d:level=0::
+DEAL:0:2d:level=0:level=1::0_1:0 -> 0 0
+DEAL:0:2d:level=0:level=1::0_1:1 -> 0 1
+DEAL:0:2d:level=0:level=1::0_1:2 -> 0 2
+DEAL:0:2d:level=0:level=1::0_1:3 -> 0 3
+DEAL:0:2d:level=0:level=1::{[0,3]}
+DEAL:0:2d:level=0:level=1::{}
+DEAL:0:2d:level=0:level=1::
+DEAL:0:2d:level=0:level=1:level=2::0_2:00 -> 0 0
+DEAL:0:2d:level=0:level=1:level=2::0_2:01 -> 0 1
+DEAL:0:2d:level=0:level=1:level=2::0_2:02 -> 0 2
+DEAL:0:2d:level=0:level=1:level=2::0_2:03 -> 0 3
+DEAL:0:2d:level=0:level=1:level=2::0_2:10 -> 0 4
+DEAL:0:2d:level=0:level=1:level=2::0_2:11 -> 0 5
+DEAL:0:2d:level=0:level=1:level=2::0_2:12 -> 0 6
+DEAL:0:2d:level=0:level=1:level=2::0_2:13 -> 0 7
+DEAL:0:2d:level=0:level=1:level=2::0_2:20 -> 0 8
+DEAL:0:2d:level=0:level=1:level=2::0_2:21 -> 0 9
+DEAL:0:2d:level=0:level=1:level=2::0_2:22 -> 0 10
+DEAL:0:2d:level=0:level=1:level=2::0_2:23 -> 0 11
+DEAL:0:2d:level=0:level=1:level=2::0_2:30 -> 0 12
+DEAL:0:2d:level=0:level=1:level=2::0_2:31 -> 0 13
+DEAL:0:2d:level=0:level=1:level=2::0_2:32 -> 0 14
+DEAL:0:2d:level=0:level=1:level=2::0_2:33 -> 0 15
+DEAL:0:2d:level=0:level=1:level=2::{[0,15]}
+DEAL:0:2d:level=0:level=1:level=2::{}
+DEAL:0:2d:level=0:level=1:level=2::
--- /dev/null
+
+DEAL:0:2d:level=0::0_0: -> 0 0
+DEAL:0:2d:level=0::{0}
+DEAL:0:2d:level=0::{}
+DEAL:0:2d:level=0::
+DEAL:0:2d:level=0:level=1::0_1:0 -> 0 0
+DEAL:0:2d:level=0:level=1::0_1:1 -> 1 1
+DEAL:0:2d:level=0:level=1::0_1:2 -> 2 2
+DEAL:0:2d:level=0:level=1::0_1:3 -> 3 3
+DEAL:0:2d:level=0:level=1::{0}
+DEAL:0:2d:level=0:level=1::{[1,3]}
+DEAL:0:2d:level=0:level=1::
+DEAL:0:2d:level=0:level=1:level=2::0_2:00 -> 0 0
+DEAL:0:2d:level=0:level=1:level=2::0_2:01 -> 0 1
+DEAL:0:2d:level=0:level=1:level=2::0_2:02 -> 0 2
+DEAL:0:2d:level=0:level=1:level=2::0_2:03 -> 0 3
+DEAL:0:2d:level=0:level=1:level=2::0_2:10 -> 1 4
+DEAL:0:2d:level=0:level=1:level=2::0_2:12 -> 1 6
+DEAL:0:2d:level=0:level=1:level=2::0_2:20 -> 2 8
+DEAL:0:2d:level=0:level=1:level=2::0_2:21 -> 2 9
+DEAL:0:2d:level=0:level=1:level=2::0_2:30 -> 3 12
+DEAL:0:2d:level=0:level=1:level=2::{[0,3]}
+DEAL:0:2d:level=0:level=1:level=2::{4, 6, [8,9], 12}
+DEAL:0:2d:level=0:level=1:level=2::
+
+DEAL:1:2d:level=0::0_0: -> 0 0
+DEAL:1:2d:level=0::{}
+DEAL:1:2d:level=0::{0}
+DEAL:1:2d:level=0::
+DEAL:1:2d:level=0:level=1::0_1:0 -> 0 0
+DEAL:1:2d:level=0:level=1::0_1:1 -> 1 1
+DEAL:1:2d:level=0:level=1::0_1:2 -> 2 2
+DEAL:1:2d:level=0:level=1::0_1:3 -> 3 3
+DEAL:1:2d:level=0:level=1::{1}
+DEAL:1:2d:level=0:level=1::{0, [2,3]}
+DEAL:1:2d:level=0:level=1::
+DEAL:1:2d:level=0:level=1:level=2::0_2:01 -> 0 1
+DEAL:1:2d:level=0:level=1:level=2::0_2:03 -> 0 3
+DEAL:1:2d:level=0:level=1:level=2::0_2:10 -> 1 4
+DEAL:1:2d:level=0:level=1:level=2::0_2:11 -> 1 5
+DEAL:1:2d:level=0:level=1:level=2::0_2:12 -> 1 6
+DEAL:1:2d:level=0:level=1:level=2::0_2:13 -> 1 7
+DEAL:1:2d:level=0:level=1:level=2::0_2:21 -> 2 9
+DEAL:1:2d:level=0:level=1:level=2::0_2:30 -> 3 12
+DEAL:1:2d:level=0:level=1:level=2::0_2:31 -> 3 13
+DEAL:1:2d:level=0:level=1:level=2::{[4,7]}
+DEAL:1:2d:level=0:level=1:level=2::{1, 3, 9, [12,13]}
+DEAL:1:2d:level=0:level=1:level=2::
+
+
+DEAL:2:2d:level=0::0_0: -> 0 0
+DEAL:2:2d:level=0::{}
+DEAL:2:2d:level=0::{0}
+DEAL:2:2d:level=0::
+DEAL:2:2d:level=0:level=1::0_1:0 -> 0 0
+DEAL:2:2d:level=0:level=1::0_1:1 -> 1 1
+DEAL:2:2d:level=0:level=1::0_1:2 -> 2 2
+DEAL:2:2d:level=0:level=1::0_1:3 -> 3 3
+DEAL:2:2d:level=0:level=1::{2}
+DEAL:2:2d:level=0:level=1::{[0,1], 3}
+DEAL:2:2d:level=0:level=1::
+DEAL:2:2d:level=0:level=1:level=2::0_2:02 -> 0 2
+DEAL:2:2d:level=0:level=1:level=2::0_2:03 -> 0 3
+DEAL:2:2d:level=0:level=1:level=2::0_2:12 -> 1 6
+DEAL:2:2d:level=0:level=1:level=2::0_2:20 -> 2 8
+DEAL:2:2d:level=0:level=1:level=2::0_2:21 -> 2 9
+DEAL:2:2d:level=0:level=1:level=2::0_2:22 -> 2 10
+DEAL:2:2d:level=0:level=1:level=2::0_2:23 -> 2 11
+DEAL:2:2d:level=0:level=1:level=2::0_2:30 -> 3 12
+DEAL:2:2d:level=0:level=1:level=2::0_2:32 -> 3 14
+DEAL:2:2d:level=0:level=1:level=2::{[8,11]}
+DEAL:2:2d:level=0:level=1:level=2::{[2,3], 6, 12, 14}
+DEAL:2:2d:level=0:level=1:level=2::
+
+
+DEAL:3:2d:level=0::0_0: -> 0 0
+DEAL:3:2d:level=0::{}
+DEAL:3:2d:level=0::{0}
+DEAL:3:2d:level=0::
+DEAL:3:2d:level=0:level=1::0_1:0 -> 0 0
+DEAL:3:2d:level=0:level=1::0_1:1 -> 1 1
+DEAL:3:2d:level=0:level=1::0_1:2 -> 2 2
+DEAL:3:2d:level=0:level=1::0_1:3 -> 3 3
+DEAL:3:2d:level=0:level=1::{3}
+DEAL:3:2d:level=0:level=1::{[0,2]}
+DEAL:3:2d:level=0:level=1::
+DEAL:3:2d:level=0:level=1:level=2::0_2:03 -> 0 3
+DEAL:3:2d:level=0:level=1:level=2::0_2:12 -> 1 6
+DEAL:3:2d:level=0:level=1:level=2::0_2:13 -> 1 7
+DEAL:3:2d:level=0:level=1:level=2::0_2:21 -> 2 9
+DEAL:3:2d:level=0:level=1:level=2::0_2:23 -> 2 11
+DEAL:3:2d:level=0:level=1:level=2::0_2:30 -> 3 12
+DEAL:3:2d:level=0:level=1:level=2::0_2:31 -> 3 13
+DEAL:3:2d:level=0:level=1:level=2::0_2:32 -> 3 14
+DEAL:3:2d:level=0:level=1:level=2::0_2:33 -> 3 15
+DEAL:3:2d:level=0:level=1:level=2::{[12,15]}
+DEAL:3:2d:level=0:level=1:level=2::{3, [6,7], 9, 11}
+DEAL:3:2d:level=0:level=1:level=2::
+