This commit fixes issue #2798.
The original implementation caused p::s::Triangulation to be in an
invalid state after copy_triangulation and load.
haven been clarified and improved.
<br>
(Timo Heister, 2016/09/06)
+
+ <li> Fixed: Reimplement copy_triangulation and load in
+ dealii::parallel::shared::Triangulation, this avoids the loss of
+ partition information which causes p::s::Triangulation to be in an invalid state.
+ <br>
+ (Ce Qin, 2016/09/05)
</li>
<li> Fixed: The build system now uses -fPIC instead of -fpic
* Create a triangulation.
*
* This function also partitions triangulation based on the MPI
- * communicator provided to constructor.
+ * communicator provided to the constructor.
*/
virtual void create_triangulation (const std::vector< Point< spacedim > > &vertices,
const std::vector< CellData< dim > > &cells,
const SubCellData &subcelldata);
+ /**
+ * Copy @p other_tria to this triangulation.
+ *
+ * This function also partitions triangulation based on the MPI
+ * communicator provided to the constructor.
+ *
+ * @note This function can not be used with parallel::distributed::Triangulation,
+ * since it only stores those cells that it owns, one layer of ghost cells around
+ * the ones it locally owns, and a number of artificial cells.
+ */
+ virtual void copy_triangulation (const dealii::Triangulation<dim, spacedim> &other_tria);
+
+ /**
+ * Read the data of this object from a stream for the purpose of
+ * serialization. Throw away the previous content.
+ *
+ * This function first does the same work as in dealii::Triangulation::load,
+ * then partitions the triangulation based on the MPI communicator
+ * provided to the constructor.
+ */
+ template <class Archive>
+ void load (Archive &ar, const unsigned int version);
+
/**
* Return a vector of length Triangulation::n_active_cells() where each
* element stores the subdomain id of the owner of this cell. The
*/
std::vector<types::subdomain_id> true_subdomain_ids_of_cells;
};
+
+ template <int dim, int spacedim>
+ template <class Archive>
+ void
+ Triangulation<dim,spacedim>::load (Archive &ar,
+ const unsigned int version)
+ {
+ dealii::Triangulation<dim,spacedim>::load (ar, version);
+ partition();
+ this->update_number_cache ();
+ }
}
#else
/**
* Implementation of the same function as in the base class.
*/
- virtual void copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria);
+ virtual void copy_triangulation (const dealii::Triangulation<dim, spacedim> &other_tria);
/**
* Create a triangulation as documented in the base class.
std::vector<types::manifold_id> get_manifold_ids() const;
/**
- * Copy @p old_tria to this triangulation. This operation is not cheap, so
+ * Copy @p other_tria to this triangulation. This operation is not cheap, so
* you should be careful with using this. We do not implement this function
* as a copy constructor, since it makes it easier to maintain collections
* of triangulations if you can assign them values later on.
* The function is made @p virtual since some derived classes might want to
* disable or extend the functionality of this function.
*
- * @note Calling this function triggers the 'copy' signal on old_tria, i.e.
+ * @note Calling this function triggers the 'copy' signal on other_tria, i.e.
* the triangulation being copied <i>from</i>. It also triggers the
* 'create' signal of the current triangulation. See the section on signals
* in the general documentation for more information.
* how the old triangulation changes, not how any triangulation it may be
* copied to changes.
*/
- virtual void copy_triangulation (const Triangulation<dim, spacedim> &old_tria);
+ virtual void copy_triangulation (const Triangulation<dim, spacedim> &other_tria);
/**
* Create a triangulation from a list of vertices and a list of cells, each
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/filtered_iterator.h>
#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/distributed/tria.h>
DEAL_II_NAMESPACE_OPEN
this->update_number_cache ();
}
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim, spacedim>::
+ copy_triangulation (const dealii::Triangulation<dim, spacedim> &other_tria)
+ {
+ Assert ((dynamic_cast<const dealii::parallel::distributed::Triangulation<dim,spacedim> *>(&other_tria) == NULL),
+ ExcMessage("Cannot use this function on parallel::distributed::Triangulation."));
+
+ dealii::parallel::Triangulation<dim,spacedim>::copy_triangulation (other_tria);
+ partition();
+ this->update_number_cache ();
+ }
+
}
}
template <int dim, int spacedim>
void
Triangulation<dim,spacedim>::
- copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria)
+ copy_triangulation (const dealii::Triangulation<dim, spacedim> &other_tria)
{
try
{
- dealii::Triangulation<dim,spacedim>::
- copy_triangulation (old_tria);
+ dealii::parallel::Triangulation<dim,spacedim>::copy_triangulation (other_tria);
}
catch (const typename dealii::Triangulation<dim,spacedim>::DistortedCellList &)
{
// separate)
triangulation_has_content = true;
- Assert (old_tria.n_levels() == 1,
+ Assert (other_tria.n_levels() == 1,
ExcMessage ("Parallel distributed triangulations can only be copied, "
"if they are not refined!"));
if (const dealii::parallel::distributed::Triangulation<dim,spacedim> *
- old_tria_x = dynamic_cast<const dealii::parallel::distributed::Triangulation<dim,spacedim> *>(&old_tria))
+ other_tria_x = dynamic_cast<const dealii::parallel::distributed::Triangulation<dim,spacedim> *>(&other_tria))
{
- Assert (!old_tria_x->refinement_in_progress,
+ Assert (!other_tria_x->refinement_in_progress,
ExcMessage ("Parallel distributed triangulations can only "
"be copied, if no refinement is in progress!"));
- // duplicate MPI communicator, stored in the base class
- dealii::parallel::Triangulation<dim,spacedim>::copy_triangulation (old_tria);
-
- coarse_cell_to_p4est_tree_permutation = old_tria_x->coarse_cell_to_p4est_tree_permutation;
- p4est_tree_to_coarse_cell_permutation = old_tria_x->p4est_tree_to_coarse_cell_permutation;
- attached_data_size = old_tria_x->attached_data_size;
- n_attached_datas = old_tria_x->n_attached_datas;
+ coarse_cell_to_p4est_tree_permutation = other_tria_x->coarse_cell_to_p4est_tree_permutation;
+ p4est_tree_to_coarse_cell_permutation = other_tria_x->p4est_tree_to_coarse_cell_permutation;
+ attached_data_size = other_tria_x->attached_data_size;
+ n_attached_datas = other_tria_x->n_attached_datas;
- settings = old_tria_x->settings;
+ settings = other_tria_x->settings;
}
else
{
template <int dim, int spacedim>
void
- Triangulation<dim,spacedim>::copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria)
+ Triangulation<dim,spacedim>::copy_triangulation (const dealii::Triangulation<dim, spacedim> &other_tria)
{
#ifndef DEAL_II_WITH_MPI
Assert(false, ExcNotImplemented());
#endif
+ dealii::Triangulation<dim,spacedim>::copy_triangulation (other_tria);
+
if (const dealii::parallel::Triangulation<dim,spacedim> *
- old_tria_x = dynamic_cast<const dealii::parallel::Triangulation<dim,spacedim> *>(&old_tria))
+ other_tria_x = dynamic_cast<const dealii::parallel::Triangulation<dim,spacedim> *>(&other_tria))
{
- mpi_communicator = Utilities::MPI::duplicate_communicator (old_tria_x->get_communicator ());
+ MPI_Comm_free (&this->mpi_communicator);
+ mpi_communicator = Utilities::MPI::duplicate_communicator (other_tria_x->get_communicator ());
}
}
template <int dim, int spacedim>
void
Triangulation<dim, spacedim>::
-copy_triangulation (const Triangulation<dim, spacedim> &old_tria)
+copy_triangulation (const Triangulation<dim, spacedim> &other_tria)
{
Assert ((vertices.size() == 0) &&
(levels.size () == 0) &&
(faces == NULL),
ExcTriangulationNotEmpty(vertices.size(), levels.size()));
- Assert ((old_tria.levels.size() != 0) &&
- (old_tria.vertices.size() != 0) &&
- (dim == 1 || old_tria.faces != NULL),
+ Assert ((other_tria.levels.size() != 0) &&
+ (other_tria.vertices.size() != 0) &&
+ (dim == 1 || other_tria.faces != NULL),
ExcMessage("When calling Triangulation::copy_triangulation(), "
"the target triangulation must be empty but the source "
"triangulation (the argument to this function) must contain "
// copy normal elements
- vertices = old_tria.vertices;
- vertices_used = old_tria.vertices_used;
- anisotropic_refinement = old_tria.anisotropic_refinement;
- smooth_grid = old_tria.smooth_grid;
+ vertices = other_tria.vertices;
+ vertices_used = other_tria.vertices_used;
+ anisotropic_refinement = other_tria.anisotropic_refinement;
+ smooth_grid = other_tria.smooth_grid;
if (dim > 1)
- faces.reset (new internal::Triangulation::TriaFaces<dim>(*old_tria.faces));
+ faces.reset (new internal::Triangulation::TriaFaces<dim>(*other_tria.faces));
typename std::map<types::manifold_id,
SmartPointer<const Manifold<dim,spacedim> , Triangulation<dim, spacedim> > >::const_iterator
- bdry_iterator = old_tria.manifold.begin();
- for (; bdry_iterator != old_tria.manifold.end() ; ++bdry_iterator)
+ bdry_iterator = other_tria.manifold.begin();
+ for (; bdry_iterator != other_tria.manifold.end() ; ++bdry_iterator)
manifold[bdry_iterator->first] = bdry_iterator->second;
- levels.reserve (old_tria.levels.size());
- for (unsigned int level=0; level<old_tria.levels.size(); ++level)
+ levels.reserve (other_tria.levels.size());
+ for (unsigned int level=0; level<other_tria.levels.size(); ++level)
levels.push_back (new
internal::Triangulation::
- TriaLevel<dim>(*old_tria.levels[level]));
+ TriaLevel<dim>(*other_tria.levels[level]));
- number_cache = old_tria.number_cache;
+ number_cache = other_tria.number_cache;
if (dim == 1)
{
vertex_to_boundary_id_map_1d
.reset(new std::map<unsigned int, types::boundary_id>
- (*old_tria.vertex_to_boundary_id_map_1d));
+ (*other_tria.vertex_to_boundary_id_map_1d));
vertex_to_manifold_id_map_1d
.reset(new std::map<unsigned int, types::manifold_id>
- (*old_tria.vertex_to_manifold_id_map_1d));
+ (*other_tria.vertex_to_manifold_id_map_1d));
}
- // inform those who are listening on old_tria of the copy operation
- old_tria.signals.copy (*this);
+ // inform those who are listening on other_tria of the copy operation
+ other_tria.signals.copy (*this);
// also inform all listeners of the current triangulation that the
// triangulation has been created
signals.create();
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// create a tria mesh and copy it
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/grid_generator.h>
+
+template<int dim>
+void test()
+{
+ Triangulation<dim> tr1;
+
+ GridGenerator::subdivided_hyper_cube(tr1, 2);
+
+ deallog
+ << " n_active_cells: " << tr1.n_active_cells() << "\n"
+ << std::endl;
+
+ parallel::shared::Triangulation<dim> tr2(MPI_COMM_WORLD);
+ tr2.copy_triangulation(tr1);
+
+ deallog
+ << " n_active_cells: " << tr2.n_active_cells() << "\n"
+ << " locally_owned_subdomain(): " << tr2.locally_owned_subdomain() << "\n"
+ << " n_locally_owned_active_cells: " << tr2.n_locally_owned_active_cells() << "\n"
+ << " n_global_active_cells: " << tr2.n_global_active_cells() << "\n"
+ << std::endl;
+
+ parallel::distributed::Triangulation<dim> tr3(MPI_COMM_WORLD);
+ tr3.copy_triangulation(tr1);
+
+ deallog
+ << " n_active_cells: " << tr3.n_active_cells() << "\n"
+ << " locally_owned_subdomain(): " << tr3.locally_owned_subdomain() << "\n"
+ << " n_locally_owned_active_cells: " << tr3.n_locally_owned_active_cells() << "\n"
+ << " n_global_active_cells: " << tr3.n_global_active_cells() << "\n"
+ << std::endl;
+
+ parallel::distributed::Triangulation<dim> tr4(MPI_COMM_WORLD);
+ tr4.copy_triangulation(tr2);
+
+ deallog
+ << " n_active_cells: " << tr4.n_active_cells() << "\n"
+ << " locally_owned_subdomain(): " << tr4.locally_owned_subdomain() << "\n"
+ << " n_locally_owned_active_cells: " << tr4.n_locally_owned_active_cells() << "\n"
+ << " n_global_active_cells: " << tr4.n_global_active_cells() << "\n"
+ << std::endl;
+
+ parallel::distributed::Triangulation<dim> tr5(MPI_COMM_WORLD);
+ tr5.copy_triangulation(tr3);
+
+ deallog
+ << " n_active_cells: " << tr5.n_active_cells() << "\n"
+ << " locally_owned_subdomain(): " << tr5.locally_owned_subdomain() << "\n"
+ << " n_locally_owned_active_cells: " << tr5.n_locally_owned_active_cells() << "\n"
+ << " n_global_active_cells: " << tr5.n_global_active_cells() << "\n"
+ << std::endl;
+}
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d:: n_active_cells: 4
+
+DEAL:0:2d:: n_active_cells: 4
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 2
+ n_global_active_cells: 4
+
+DEAL:0:2d:: n_active_cells: 4
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 2
+ n_global_active_cells: 4
+
+DEAL:0:2d:: n_active_cells: 4
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 2
+ n_global_active_cells: 4
+
+DEAL:0:2d:: n_active_cells: 4
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 2
+ n_global_active_cells: 4
+
+DEAL:0:3d:: n_active_cells: 8
+
+DEAL:0:3d:: n_active_cells: 8
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 4
+ n_global_active_cells: 8
+
+DEAL:0:3d:: n_active_cells: 8
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 4
+ n_global_active_cells: 8
+
+DEAL:0:3d:: n_active_cells: 8
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 4
+ n_global_active_cells: 8
+
+DEAL:0:3d:: n_active_cells: 8
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 4
+ n_global_active_cells: 8
+
+
+DEAL:1:2d:: n_active_cells: 4
+
+DEAL:1:2d:: n_active_cells: 4
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 2
+ n_global_active_cells: 4
+
+DEAL:1:2d:: n_active_cells: 4
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 2
+ n_global_active_cells: 4
+
+DEAL:1:2d:: n_active_cells: 4
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 2
+ n_global_active_cells: 4
+
+DEAL:1:2d:: n_active_cells: 4
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 2
+ n_global_active_cells: 4
+
+DEAL:1:3d:: n_active_cells: 8
+
+DEAL:1:3d:: n_active_cells: 8
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 4
+ n_global_active_cells: 8
+
+DEAL:1:3d:: n_active_cells: 8
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 4
+ n_global_active_cells: 8
+
+DEAL:1:3d:: n_active_cells: 8
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 4
+ n_global_active_cells: 8
+
+DEAL:1:3d:: n_active_cells: 8
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 4
+ n_global_active_cells: 8
+
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// create a shared tria mesh and copy it
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/grid_generator.h>
+
+template<int dim>
+void test()
+{
+ Triangulation<dim> tr1;
+
+ GridGenerator::hyper_cube(tr1);
+ tr1.refine_global(2);
+
+ deallog
+ << " n_active_cells: " << tr1.n_active_cells() << "\n"
+ << std::endl;
+
+ parallel::shared::Triangulation<dim> tr2(MPI_COMM_WORLD);
+ tr2.copy_triangulation(tr1);
+
+ deallog
+ << " n_active_cells: " << tr2.n_active_cells() << "\n"
+ << " locally_owned_subdomain(): " << tr2.locally_owned_subdomain() << "\n"
+ << " n_locally_owned_active_cells: " << tr2.n_locally_owned_active_cells() << "\n"
+ << " n_global_active_cells: " << tr2.n_global_active_cells() << "\n"
+ << std::endl;
+
+ parallel::shared::Triangulation<dim> tr3(MPI_COMM_WORLD);
+ tr3.copy_triangulation(tr2);
+
+ deallog
+ << " n_active_cells: " << tr3.n_active_cells() << "\n"
+ << " locally_owned_subdomain(): " << tr3.locally_owned_subdomain() << "\n"
+ << " n_locally_owned_active_cells: " << tr3.n_locally_owned_active_cells() << "\n"
+ << " n_global_active_cells: " << tr3.n_global_active_cells() << "\n"
+ << std::endl;
+}
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d:: n_active_cells: 16
+
+DEAL:0:2d:: n_active_cells: 16
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 8
+ n_global_active_cells: 16
+
+DEAL:0:2d:: n_active_cells: 16
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 8
+ n_global_active_cells: 16
+
+DEAL:0:3d:: n_active_cells: 64
+
+DEAL:0:3d:: n_active_cells: 64
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 32
+ n_global_active_cells: 64
+
+DEAL:0:3d:: n_active_cells: 64
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 32
+ n_global_active_cells: 64
+
+
+DEAL:1:2d:: n_active_cells: 16
+
+DEAL:1:2d:: n_active_cells: 16
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 8
+ n_global_active_cells: 16
+
+DEAL:1:2d:: n_active_cells: 16
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 8
+ n_global_active_cells: 16
+
+DEAL:1:3d:: n_active_cells: 64
+
+DEAL:1:3d:: n_active_cells: 64
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 32
+ n_global_active_cells: 64
+
+DEAL:1:3d:: n_active_cells: 64
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 32
+ n_global_active_cells: 64
+
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// save a tria mesh and load it
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/grid_generator.h>
+
+#include <boost/archive/text_oarchive.hpp>
+#include <boost/archive/text_iarchive.hpp>
+
+#include <sstream>
+#include <fstream>
+
+template<int dim>
+void test()
+{
+ Triangulation<dim> tr1;
+
+ GridGenerator::hyper_cube(tr1);
+ tr1.refine_global(2);
+
+ deallog
+ << " n_active_cells: " << tr1.n_active_cells() << "\n"
+ << std::endl;
+
+ std::ostringstream oss;
+ {
+ boost::archive::text_oarchive oa(oss, boost::archive::no_header);
+ tr1.save(oa, 0);
+ }
+
+ parallel::shared::Triangulation<dim> tr2(MPI_COMM_WORLD);
+ {
+ std::istringstream iss(oss.str());
+ boost::archive::text_iarchive ia(iss, boost::archive::no_header);
+ tr2.load(ia, 0);
+ }
+
+ deallog
+ << " n_active_cells: " << tr2.n_active_cells() << "\n"
+ << " locally_owned_subdomain(): " << tr2.locally_owned_subdomain() << "\n"
+ << " n_locally_owned_active_cells: " << tr2.n_locally_owned_active_cells() << "\n"
+ << " n_global_active_cells: " << tr2.n_global_active_cells() << "\n"
+ << std::endl;
+}
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d:: n_active_cells: 16
+
+DEAL:0:2d:: n_active_cells: 16
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 8
+ n_global_active_cells: 16
+
+DEAL:0:3d:: n_active_cells: 64
+
+DEAL:0:3d:: n_active_cells: 64
+ locally_owned_subdomain(): 0
+ n_locally_owned_active_cells: 32
+ n_global_active_cells: 64
+
+
+DEAL:1:2d:: n_active_cells: 16
+
+DEAL:1:2d:: n_active_cells: 16
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 8
+ n_global_active_cells: 16
+
+DEAL:1:3d:: n_active_cells: 64
+
+DEAL:1:3d:: n_active_cells: 64
+ locally_owned_subdomain(): 1
+ n_locally_owned_active_cells: 32
+ n_global_active_cells: 64
+
+