It encapsulates ditribution of cells among processors and in the majority of cases behaves like
distributed triangulation counterpart.
<ol>
+ <li> New: parallel::shared::Triangulation class which extends
+ Triangulation class to automatically partition triangulation when run
+ with MPI. Identical functionality between parallel::shared::Triangulation and
+ parallel::distributed::Triangulation is grouped in the parent class
+ parallel::Triangulation.
+ <br>
+ (Denis Davydov, 2015/08/14)
+ </li>
+
<li> New: The online documentation of all functions now includes
links to the file and line where that function is implemented. Both
are clickable to provide immediate access to the source code of a
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: tria.h 32739 2014-04-08 16:39:47Z denis.davydov $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef __deal2__distributed__shared_tria_h
+#define __deal2__distributed__shared_tria_h
+
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/subscriptor.h>
+#include <deal.II/base/smartpointer.h>
+#include <deal.II/base/template_constraints.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/distributed/tria_base.h>
+
+#include <deal.II/base/std_cxx1x/function.h>
+#include <deal.II/base/std_cxx1x/tuple.h>
+
+#include <set>
+#include <vector>
+#include <list>
+#include <utility>
+
+#ifdef DEAL_II_WITH_MPI
+# include <mpi.h>
+#endif
+
+
+DEAL_II_NAMESPACE_OPEN
+
+template <int, int> class Triangulation;
+
+
+namespace parallel
+{
+
+#ifdef DEAL_II_WITH_MPI
+
+
+ namespace shared
+ {
+
+ /**
+ * This is an extension of dealii::Triangulation class to automatically
+ * partition triangulation when run with MPI.
+ * Different from the parallel::distributed::Triangulation, the entire mesh
+ * is stored on each processor. However, cells are labeled according to
+ * the id of the processor which "owns" them. The partitioning is done
+ * automatically inside the DoFHandler by calling Metis.
+ * This enables distributing DoFs among processors and therefore splitting
+ * matrices and vectors across processors.
+ * The usage of this class is demonstrated in Step-18.
+ *
+ * @author Denis Davydov, 2015
+ * @ingroup distributed
+ *
+ */
+ template <int dim, int spacedim = dim>
+ class Triangulation : public dealii::parallel::Triangulation<dim,spacedim>
+ {
+ public:
+ typedef typename dealii::Triangulation<dim,spacedim>::active_cell_iterator active_cell_iterator;
+ typedef typename dealii::Triangulation<dim,spacedim>::cell_iterator cell_iterator;
+
+ /**
+ * Constructor.
+ */
+ Triangulation (MPI_Comm mpi_communicator,
+ const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing =
+ (dealii::Triangulation<dim,spacedim>::none) );
+
+ /**
+ * Destructor.
+ */
+ virtual ~Triangulation ();
+
+ /**
+ * Coarsen and refine the mesh according to refinement and
+ * coarsening flags set.
+ *
+ * This step is equivalent to the dealii::Triangulation class
+ * with an addition of calling dealii::GridTools::partition_triangulation() at the end.
+ */
+ virtual void execute_coarsening_and_refinement ();
+
+ /**
+ * Create a triangulation.
+ *
+ * This function also partitions triangulation based on the
+ * MPI communicator provided to constructor.
+ */
+ virtual void create_triangulation (const std::vector< Point< spacedim > > &vertices,
+ const std::vector< CellData< dim > > &cells,
+ const SubCellData &subcelldata);
+
+ };
+ }
+#else
+
+ namespace shared
+ {
+
+ /**
+ * Dummy class the compiler chooses for parallel shared
+ * triangulations if we didn't actually configure deal.II with the
+ * MPI library. The existence of this class allows us to refer
+ * to parallel::shared::Triangulation objects throughout the
+ * library even if it is disabled.
+ *
+ * Since the constructor of this class is private, no such objects
+ * can actually be created if we don't have p4est available.
+ */
+ template <int dim, int spacedim = dim>
+ class Triangulation : public dealii::parallel::Triangulation<dim,spacedim>
+ {
+ private:
+ /**
+ * Constructor.
+ */
+ Triangulation ();
+ public:
+
+ /**
+ * Destructor.
+ */
+ virtual ~Triangulation ();
+
+ };
+ }
+
+
+#endif
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
#include <deal.II/base/std_cxx11/function.h>
#include <deal.II/base/std_cxx11/tuple.h>
+#include <deal.II/distributed/tria_base.h>
+
#include <set>
#include <vector>
#include <list>
* @ingroup distributed
*/
template <int dim, int spacedim = dim>
- class Triangulation : public dealii::Triangulation<dim,spacedim>
+ class Triangulation : public dealii::parallel::Triangulation<dim,spacedim>
{
public:
/**
void
communicate_locally_moved_vertices (const std::vector<bool> &vertex_locally_moved);
- /**
- * Return the subdomain id of those cells that are owned by the current
- * processor. All cells in the triangulation that do not have this
- * subdomain id are either owned by another processor or have children
- * that only exist on other processors.
- */
- types::subdomain_id locally_owned_subdomain () const;
-
- /**
- * Return the number of active cells in the triangulation that are
- * locally owned, i.e. that have a subdomain_id equal to
- * locally_owned_subdomain(). Note that there may be more active cells
- * in the triangulation stored on the present processor, such as for
- * example ghost cells, or cells further away from the locally owned
- * block of cells but that are needed to ensure that the triangulation
- * that stores this processor's set of active cells still remains
- * balanced with respect to the 2:1 size ratio of adjacent cells.
- *
- * As a consequence of the remark above, the result of this function is
- * always smaller or equal to the result of the function with the same
- * name in the ::Triangulation base class, which includes the active
- * ghost and artificial cells (see also
- * @ref GlossArtificialCell
- * and
- * @ref GlossGhostCell).
- */
- unsigned int n_locally_owned_active_cells () const;
-
- /**
- * Return the sum over all processors of the number of active cells
- * owned by each processor. This equals the overall number of active
- * cells in the distributed triangulation.
- */
- virtual types::global_dof_index n_global_active_cells () const;
-
- /**
- * Returns the global maximum level. This may be bigger than the number
- * dealii::Triangulation::n_levels() (a function in this class's base
- * class) returns if the current processor only stores cells in parts of
- * the domain that are not very refined, but if other processors store
- * cells in more deeply refined parts of the domain.
- */
- virtual unsigned int n_global_levels () const;
/**
* Returns true if the triangulation has hanging nodes.
virtual
bool has_hanging_nodes() const;
- /**
- * Return the number of active cells owned by each of the MPI processes
- * that contribute to this triangulation. The element of this vector
- * indexed by locally_owned_subdomain() equals the result of
- * n_locally_owned_active_cells().
- */
- const std::vector<unsigned int> &
- n_locally_owned_active_cells_per_processor () const;
-
- /**
- * Return the MPI communicator used by this triangulation.
- */
- MPI_Comm get_communicator () const;
-
/**
* Return the local memory consumption in bytes.
*/
private:
- /**
- * MPI communicator to be used for the triangulation. We create a unique
- * communicator for this class, which is a duplicate of the one passed
- * to the constructor.
- */
- MPI_Comm mpi_communicator;
/**
* store the Settings.
*/
Settings settings;
- /**
- * The subdomain id to be used for the current processor.
- */
- types::subdomain_id my_subdomain;
-
/**
* A flag that indicates whether the triangulation has actual content.
*/
bool triangulation_has_content;
- /**
- * A structure that contains some numbers about the distributed
- * triangulation.
- */
- struct NumberCache
- {
- std::vector<unsigned int> n_locally_owned_active_cells;
- types::global_dof_index n_global_active_cells;
- unsigned int n_global_levels;
-
- NumberCache();
- };
-
- NumberCache number_cache;
-
/**
* A data structure that holds the connectivity between trees. Since
* each tree is rooted in a coarse grid cell, this data structure holds
*/
void copy_local_forest_to_triangulation ();
-
- /**
- * Update the number_cache variable after mesh creation or refinement.
- */
- void update_number_cache ();
-
/**
* Internal function notifying all registered classes to attach their
* data before repartitioning occurs. Called from
* all this class does is throw an exception.
*/
template <int spacedim>
- class Triangulation<1,spacedim> : public dealii::Triangulation<1,spacedim>
+ class Triangulation<1,spacedim> : public dealii::parallel::Triangulation<1,spacedim>
{
public:
/**
*/
virtual ~Triangulation ();
- /**
- * Return the MPI communicator used by this triangulation.
- */
- MPI_Comm get_communicator () const;
-
- /**
- * Return the sum over all processors of the number of active cells
- * owned by each processor. This equals the overall number of active
- * cells in the distributed triangulation.
- */
- types::global_dof_index n_global_active_cells () const;
- virtual unsigned int n_global_levels () const;
-
/**
* Returns a permutation vector for the order the coarse cells are
* handed of to p4est. For example the first element i in this vector
void
communicate_locally_moved_vertices (const std::vector<bool> &vertex_locally_moved);
- /**
- * Return the subdomain id of those cells that are owned by the current
- * processor. All cells in the triangulation that do not have this
- * subdomain id are either owned by another processor or have children
- * that only exist on other processors.
- */
- types::subdomain_id locally_owned_subdomain () const;
-
/**
* Dummy arrays. This class isn't usable but the compiler wants to see
* these variables at a couple places anyway.
*/
types::subdomain_id locally_owned_subdomain () const;
- /**
- * Return the MPI communicator used by this triangulation.
- */
-#ifdef DEAL_II_WITH_MPI
- MPI_Comm get_communicator () const;
-#endif
};
}
}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef __deal2__distributed__tria_base_h
+#define __deal2__distributed__tria_base_h
+
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/subscriptor.h>
+#include <deal.II/base/smartpointer.h>
+#include <deal.II/base/template_constraints.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/base/std_cxx1x/function.h>
+#include <deal.II/base/std_cxx1x/tuple.h>
+
+#include <set>
+#include <vector>
+#include <list>
+#include <utility>
+
+#ifdef DEAL_II_WITH_MPI
+# include <mpi.h>
+#endif
+
+
+DEAL_II_NAMESPACE_OPEN
+
+template <int, int> class Triangulation;
+
+
+namespace parallel
+{
+ /**
+ * This class describes the interface for all triangulation classes that
+ * work in parallel, namely parallel::distributed::Triangulation
+ * and parallel::shared::Triangulation.
+ */
+ template <int dim, int spacedim = dim>
+ class Triangulation : public dealii::Triangulation<dim,spacedim>
+ {
+ public:
+
+ /**
+ * Constructor.
+ */
+#ifdef DEAL_II_WITH_MPI
+ Triangulation (MPI_Comm mpi_communicator,
+ const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid = (dealii::Triangulation<dim,spacedim>::none),
+ const bool check_for_distorted_cells = false);
+#else
+ Triangulation ();
+#endif
+
+ /**
+ * Destructor.
+ */
+ virtual ~Triangulation ();
+
+#ifdef DEAL_II_WITH_MPI
+ /**
+ * Return MPI communicator used by this triangulation.
+ */
+ virtual MPI_Comm get_communicator () const;
+#endif
+
+ /**
+ * Implementation of the same function as in the base class.
+ */
+ virtual void copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria);
+
+ /**
+ * Return the number of active cells owned by each of the MPI processes
+ * that contribute to this triangulation. The element of this vector
+ * indexed by locally_owned_subdomain() equals the result of
+ * n_locally_owned_active_cells().
+ */
+ const std::vector<unsigned int> &
+ n_locally_owned_active_cells_per_processor () const;
+
+
+ /**
+ * Return the number of active cells in the triangulation that are
+ * locally owned, i.e. that have a subdomain_id equal to
+ * locally_owned_subdomain(). Note that there may be more active cells
+ * in the triangulation stored on the present processor, such as for
+ * example ghost cells, or cells further away from the locally owned
+ * block of cells but that are needed to ensure that the triangulation
+ * that stores this processor's set of active cells still remains
+ * balanced with respect to the 2:1 size ratio of adjacent cells.
+ *
+ * As a consequence of the remark above, the result of this function is
+ * always smaller or equal to the result of the function with the same
+ * name in the ::Triangulation base class, which includes the active
+ * ghost and artificial cells (see also
+ * @ref GlossArtificialCell
+ * and
+ * @ref GlossGhostCell).
+ */
+ unsigned int n_locally_owned_active_cells () const;
+
+ /**
+ * Return the sum over all processors of the number of active cells
+ * owned by each processor. This equals the overall number of active
+ * cells in the triangulation.
+ */
+ virtual types::global_dof_index n_global_active_cells () const;
+
+ /**
+ * Return the local memory consumption in bytes.
+ */
+ virtual std::size_t memory_consumption () const;
+
+
+ /**
+ * Returns the global maximum level. This may be bigger than the number
+ * dealii::Triangulation::n_levels() (a function in this class's base
+ * class) returns if the current processor only stores cells in parts of
+ * the domain that are not very refined, but if other processors store
+ * cells in more deeply refined parts of the domain.
+ */
+ virtual unsigned int n_global_levels () const;
+
+ /**
+ * Return the subdomain id of those cells that are owned by the current
+ * processor. All cells in the triangulation that do not have this
+ * subdomain id are either owned by another processor or have children
+ * that only exist on other processors.
+ */
+ types::subdomain_id locally_owned_subdomain () const;
+
+
+ protected:
+#ifdef DEAL_II_WITH_MPI
+ /**
+ * MPI communicator to be used for the triangulation. We create a unique
+ * communicator for this class, which is a duplicate of the one passed
+ * to the constructor.
+ */
+ MPI_Comm mpi_communicator;
+#endif
+
+ /**
+ * The subdomain id to be used for the current processor.
+ */
+ types::subdomain_id my_subdomain;
+
+ /**
+ * total number of subdomains.
+ */
+ types::subdomain_id n_subdomains;
+
+ /**
+ * A structure that contains some numbers about the distributed
+ * triangulation.
+ */
+ struct NumberCache
+ {
+ std::vector<unsigned int> n_locally_owned_active_cells;
+ types::global_dof_index n_global_active_cells;
+ unsigned int n_global_levels;
+
+ NumberCache();
+ };
+
+ NumberCache number_cache;
+
+ /**
+ * Update the number_cache variable after mesh creation or refinement.
+ */
+ void update_number_cache ();
+
+
+ };
+
+} // namespace parallel
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
#include <deal.II/base/config.h>
#include <deal.II/base/exceptions.h>
#include <deal.II/base/template_constraints.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/dofs/dof_renumbering.h>
#include <vector>
#include <map>
virtual ~PolicyBase ();
/**
- * Distribute degrees of freedom on the object given as last argument.
+ * Distribute degrees of freedom on
+ * the object given as first argument.
+ * The reference to the NumberCache of the
+ * DoFHandler object has to be passed in a
+ * second argument. It could then be modified to
+ * make DoFHandler related functions work properly
+ * when called within the policies classes.
+ * The updated NumberCache is written to that argument.
*/
virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+ void
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const = 0;
/**
* Distribute the multigrid dofs on each level
std::vector<NumberCache> &number_caches) const = 0;
/**
- * Renumber degrees of freedom as specified by the first argument.
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ * The reference to the NumberCache of the
+ * DoFHandler object has to be passed in a
+ * second argument. It could then be modified to
+ * make DoFHandler related functions work properly
+ * when called within the policies classes.
+ * The updated NumberCache is written to that argument.
*/
virtual
- NumberCache
+ void
renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const = 0;
+ dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const = 0;
};
* Distribute degrees of freedom on the object given as last argument.
*/
virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ void
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const;
/**
* Distribute multigrid DoFs.
* Renumber degrees of freedom as specified by the first argument.
*/
virtual
- NumberCache
+ void
renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const;
+ };
+
+ /**
+ * This class implements the
+ * policy for operations when
+ * we use a
+ * parallel::shared::Triangulation
+ * object.
+ */
+ template <int dim, int spacedim>
+ class ParallelShared : public Sequential<dim,spacedim>
+ {
+ public:
+
+ /**
+ * Distribute degrees of freedom on
+ * the object given as first argument.
+ *
+ * On distribution, DoFs are renumbered subdomain-wise and
+ * number_cache.n_locally_owned_dofs_per_processor[i] and
+ * number_cache.locally_owned_dofs are updated consistently.
+ */
+ virtual
+ void
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const;
+
+ /**
+ * This function is not yet implemented.
+ */
+ virtual
+ void
+ distribute_mg_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+ std::vector<NumberCache> &number_caches) const;
+
+ /**
+ * Renumber degrees of freedom as
+ * specified by the first argument.
+ *
+ * The input argument @p new_numbers may either have as many entries
+ * as there are global degrees of freedom (i.e. dof_handler.n_dofs() )
+ * or dof_handler.locally_owned_dofs().n_elements().
+ * Therefore it can be utilised with renumbering functions
+ * implemented for the parallel::distributed case.
+ */
+ virtual
+ void
+ renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const;
+ private:
+
};
* Distribute degrees of freedom on the object given as last argument.
*/
virtual
- NumberCache
- distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ void
+ distribute_dofs (dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const;
/**
* Distribute multigrid DoFs.
* Renumber degrees of freedom as specified by the first argument.
*/
virtual
- NumberCache
+ void
renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const;
+ dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const;
};
}
}
extract_locally_relevant_dofs (const DH &dof_handler,
IndexSet &dof_set);
+ /**
+ *
+ * For each processor, determine the set of locally owned degrees of freedom as an IndexSet.
+ * This function then returns a vector of index sets, where the vector has size equal to the
+ * number of MPI processes that participate in the DoF handler object.
+ *
+ * The function can be used for objects of type dealii::Triangulation or parallel::shared::Triangulation.
+ * It will not work for objects of type parallel::distributed::Triangulation since for such triangulations
+ * we do not have information about all cells of the triangulation available locally,
+ * and consequently can not say anything definitive about the degrees of freedom active on other
+ * processors' locally owned cells.
+ */
+ template <class DH>
+ std::vector<IndexSet>
+ locally_owned_dofs_per_subdomain (const DH &dof_handler);
+
/**
* For each DoF, return in the output array to which subdomain (as given by
* the <tt>cell->subdomain_id()</tt> function) it belongs. The output array
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.templates.h>
#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/shared_tria.h>
#include <cmath>
{
template <int, int> class Triangulation;
}
+
+ namespace shared
+ {
+ template <int, int> class Triangulation;
+ }
}
{
Assert (this->active(),
ExcMessage("is_locally_owned() can only be called on active cells!"));
-#ifndef DEAL_II_WITH_P4EST
+#ifndef DEAL_II_WITH_MPI
return true;
#else
if (is_artificial())
return false;
- const parallel::distributed::Triangulation<dim,spacedim> *pdt
- = dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim> *>(this->tria);
+ const parallel::Triangulation<dim,spacedim> *pt
+ = dynamic_cast<const parallel::Triangulation<dim,spacedim> *>(this->tria);
- if (pdt == 0)
+ if (pt == 0)
return true;
else
- return (this->subdomain_id() == pdt->locally_owned_subdomain());
+ return (this->subdomain_id() == pt->locally_owned_subdomain());
+
#endif
}
bool
CellAccessor<dim,spacedim>::is_locally_owned_on_level () const
{
-#ifndef DEAL_II_WITH_P4EST
+
+#ifndef DEAL_II_WITH_MPI
return true;
#else
- const parallel::distributed::Triangulation<dim,spacedim> *pdt
- = dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim> *>(this->tria);
- if (pdt == 0)
+ const parallel::Triangulation<dim,spacedim> *pt
+ = dynamic_cast<const parallel::Triangulation<dim,spacedim> *>(this->tria);
+
+ if (pt == 0)
return true;
else
- return (this->level_subdomain_id() == pdt->locally_owned_subdomain());
+ return (this->level_subdomain_id() == pt->locally_owned_subdomain());
+
#endif
}
{
Assert (this->active(),
ExcMessage("is_ghost() can only be called on active cells!"));
-#ifndef DEAL_II_WITH_P4EST
- return false;
-#else
if (is_artificial() || this->has_children())
return false;
- const parallel::distributed::Triangulation<dim,spacedim> *pdt
- = dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim> *>(this->tria);
+#ifndef DEAL_II_WITH_MPI
+ return false;
+#else
+
+ const parallel::Triangulation<dim,spacedim> *pt
+ = dynamic_cast<const parallel::Triangulation<dim,spacedim> *>(this->tria);
- if (pdt == 0)
+ if (pt == 0)
return false;
else
- return (this->subdomain_id() != pdt->locally_owned_subdomain());
+ return (this->subdomain_id() != pt->locally_owned_subdomain());
+
#endif
}
{
Assert (this->active(),
ExcMessage("is_artificial() can only be called on active cells!"));
-#ifndef DEAL_II_WITH_P4EST
+#ifndef DEAL_II_WITH_MPI
return false;
#else
- return this->subdomain_id() == numbers::artificial_subdomain_id;
+
+ const parallel::Triangulation<dim,spacedim> *pt
+ = dynamic_cast<const parallel::Triangulation<dim,spacedim> *>(this->tria);
+
+ if (pt == 0)
+ return false;
+ else
+ return this->subdomain_id() == numbers::artificial_subdomain_id;
+
#endif
}
grid_refinement.cc
solution_transfer.cc
tria.cc
+ tria_base.cc
+ shared_tria.cc
)
SET(_inst
grid_refinement.inst.in
solution_transfer.inst.in
tria.inst.in
+ shared_tria.inst.in
+ tria_base.inst.in
)
FILE(GLOB _header
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: tria.cc 32807 2014-04-22 15:01:57Z heister $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/distributed/tria.h>
+
+
+#include <algorithm>
+#include <numeric>
+#include <iostream>
+#include <fstream>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+#ifdef DEAL_II_WITH_MPI
+namespace parallel
+{
+ namespace shared
+ {
+
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::Triangulation (MPI_Comm mpi_communicator,
+ const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid):
+ dealii::parallel::Triangulation<dim,spacedim>(mpi_communicator,smooth_grid,false)
+ {
+ }
+
+
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::~Triangulation ()
+ {
+
+ }
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::execute_coarsening_and_refinement ()
+ {
+ dealii::Triangulation<dim,spacedim>::execute_coarsening_and_refinement ();
+ dealii::GridTools::partition_triangulation (this->n_subdomains, *this);
+ this->update_number_cache ();
+ }
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::create_triangulation (const std::vector< Point< spacedim > > &vertices,
+ const std::vector< CellData< dim > > &cells,
+ const SubCellData &subcelldata)
+ {
+ try
+ {
+ dealii::Triangulation<dim,spacedim>::
+ create_triangulation (vertices, cells, subcelldata);
+ }
+ catch (const typename dealii::Triangulation<dim,spacedim>::DistortedCellList &)
+ {
+ // the underlying triangulation should not be checking for distorted
+ // cells
+ AssertThrow (false, ExcInternalError());
+ }
+ dealii::GridTools::partition_triangulation (this->n_subdomains, *this);
+ this->update_number_cache ();
+ }
+
+ }
+}
+
+#else
+
+namespace parallel
+{
+ namespace shared
+ {
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::Triangulation ()
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::~Triangulation ()
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+ template <int dim, int spacedim>
+ types::subdomain_id
+ Triangulation<dim,spacedim>::locally_owned_subdomain () const
+ {
+ Assert (false, ExcNotImplemented());
+ return 0;
+ }
+
+ }
+}
+
+
+#endif
+
+
+/*-------------- Explicit Instantiations -------------------------------*/
+#include "shared_tria.inst"
+
+DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: tria.inst.in 32674 2014-03-20 16:57:24Z denis.davydov $
+//
+// Copyright (C) 2010 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (deal_II_dimension : DIMENSIONS)
+ {
+ namespace parallel
+ \{
+ namespace shared
+ \{
+ template class Triangulation<deal_II_dimension>;
+# if deal_II_dimension < 3
+ template class Triangulation<deal_II_dimension, deal_II_dimension+1>;
+# endif
+# if deal_II_dimension < 2
+ template class Triangulation<deal_II_dimension, deal_II_dimension+2>;
+# endif
+ \}
+ \}
+
+ }
+
/* ---------------------- class Triangulation<dim,spacedim> ------------------------------ */
-
- template <int dim, int spacedim>
- Triangulation<dim,spacedim>::NumberCache::NumberCache()
- :
- n_global_active_cells(0),
- n_global_levels(0)
- {}
-
-
-
template <int dim, int spacedim>
Triangulation<dim,spacedim>::
Triangulation (MPI_Comm mpi_communicator,
const Settings settings_)
:
// do not check for distorted cells
- dealii::Triangulation<dim,spacedim>
- (smooth_grid,
+ dealii::parallel::Triangulation<dim,spacedim>
+ (mpi_communicator,
+ smooth_grid,
false),
- mpi_communicator (Utilities::MPI::
- duplicate_communicator(mpi_communicator)),
settings(settings_),
- my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)),
triangulation_has_content (false),
connectivity (0),
parallel_forest (0),
dealii::internal::p4est::InitFinalize::do_initialize ();
parallel_ghost = 0;
-
- number_cache.n_locally_owned_active_cells
- .resize (Utilities::MPI::n_mpi_processes (mpi_communicator));
}
Assert (connectivity == 0, ExcInternalError());
Assert (parallel_forest == 0, ExcInternalError());
Assert (refinement_in_progress == false, ExcInternalError());
-
- // get rid of the unique communicator used here again
- MPI_Comm_free (&mpi_communicator);
}
AssertThrow (false, ExcInternalError());
}
- update_number_cache ();
+ this->update_number_cache ();
}
dealii::Triangulation<dim,spacedim>::clear ();
- update_number_cache ();
+ this->update_number_cache ();
}
template <int dim, int spacedim>
bool
Triangulation<dim,spacedim>::has_hanging_nodes () const
{
- if (n_global_levels()<=1)
+ if (this->n_global_levels()<=1)
return false; // can not have hanging nodes without refined cells
// if there are any active cells with level less than n_global_levels()-1, then
// The problem is that we cannot just ask for the first active cell, but
// instead need to filter over locally owned cells.
bool have_coarser_cell = false;
- for (typename Triangulation<dim, spacedim>::active_cell_iterator cell = this->begin_active(n_global_levels()-2);
- cell != this->end(n_global_levels()-2);
+ for (typename Triangulation<dim, spacedim>::active_cell_iterator cell = this->begin_active(this->n_global_levels()-2);
+ cell != this->end(this->n_global_levels()-2);
++cell)
if (cell->is_locally_owned())
{
}
// return true if at least one process has a coarser cell
- return 0<Utilities::MPI::max(have_coarser_cell?1:0, mpi_communicator);
+ return 0<Utilities::MPI::max(have_coarser_cell?1:0, this->mpi_communicator);
}
Assert(this->n_cells()>0, ExcMessage("Can not save() an empty Triangulation."));
- if (my_subdomain==0)
+ if (this->my_subdomain==0)
{
std::string fname=std::string(filename)+".info";
std::ofstream f(fname.c_str());
f << "version nproc attached_bytes n_attached_objs n_coarse_cells" << std::endl
<< 2 << " "
- << Utilities::MPI::n_mpi_processes (mpi_communicator) << " "
+ << Utilities::MPI::n_mpi_processes (this->mpi_communicator) << " "
<< real_data_size << " "
<< attached_data_pack_callbacks.size() << " "
<< this->n_cells(0)
Assert(this->n_cells(0) == n_coarse_cells, ExcMessage("Number of coarse cells differ!"));
#if DEAL_II_P4EST_VERSION_GTE(0,3,4,3)
#else
- AssertThrow(numcpus <= Utilities::MPI::n_mpi_processes (mpi_communicator),
+ AssertThrow(numcpus <= Utilities::MPI::n_mpi_processes (this->mpi_communicator),
ExcMessage("parallel::distributed::Triangulation::load() only supports loading "
"saved data with a greater or equal number of processes than were used to "
"save() when using p4est 0.3.4.2."));
#if DEAL_II_P4EST_VERSION_GTE(0,3,4,3)
parallel_forest = dealii::internal::p4est::functions<dim>::load_ext (
- filename, mpi_communicator,
+ filename, this->mpi_communicator,
attached_size, attached_size>0,
autopartition, 0,
this,
#else
(void)autopartition;
parallel_forest = dealii::internal::p4est::functions<dim>::load (
- filename, mpi_communicator,
+ filename, this->mpi_communicator,
attached_size, attached_size>0,
this,
&connectivity);
#endif
- if (numcpus != Utilities::MPI::n_mpi_processes (mpi_communicator))
+ if (numcpus != Utilities::MPI::n_mpi_processes (this->mpi_communicator))
// We are changing the number of CPUs so we need to repartition.
// Note that p4est actually distributes the cells between the changed
// number of CPUs and so everything works without this call, but
AssertThrow (false, ExcInternalError());
}
- update_number_cache ();
+ this->update_number_cache ();
}
// now create a forest out of the connectivity data structure
parallel_forest
= dealii::internal::p4est::functions<2>::
- new_forest (mpi_communicator,
+ new_forest (this->mpi_communicator,
connectivity,
/* minimum initial number of quadrants per tree */ 0,
/* minimum level of upfront refinement */ 0,
// now create a forest out of the connectivity data structure
parallel_forest
= dealii::internal::p4est::functions<2>::
- new_forest (mpi_communicator,
+ new_forest (this->mpi_communicator,
connectivity,
/* minimum initial number of quadrants per tree */ 0,
/* minimum level of upfront refinement */ 0,
// now create a forest out of the connectivity data structure
parallel_forest
= dealii::internal::p4est::functions<3>::
- new_forest (mpi_communicator,
+ new_forest (this->mpi_communicator,
connectivity,
/* minimum initial number of quadrants per tree */ 0,
/* minimum level of upfront refinement */ 0,
match_tree_recursively<dim,spacedim> (*tree, cell,
p4est_coarse_cell,
*parallel_forest,
- my_subdomain);
+ this->my_subdomain);
}
}
cell != this->end();
++cell)
{
- if (cell->subdomain_id() != my_subdomain
+ if (cell->subdomain_id() != this->my_subdomain
&&
cell->subdomain_id() != numbers::artificial_subdomain_id)
++num_ghosts;
determine_level_subdomain_id_recursively<dim,spacedim> (*tree, tree_index, cell,
p4est_coarse_cell,
*parallel_forest,
- my_subdomain,
+ this->my_subdomain,
marked_vertices);
}
const unsigned int total_local_cells = this->n_active_cells();
(void)total_local_cells;
- if (Utilities::MPI::n_mpi_processes (mpi_communicator) == 1)
+ if (Utilities::MPI::n_mpi_processes (this->mpi_communicator) == 1)
Assert (static_cast<unsigned int>(parallel_forest->local_num_quadrants) ==
total_local_cells,
ExcInternalError())
cell = this->begin_active();
cell != this->end(); ++cell)
{
- if (cell->subdomain_id() == my_subdomain)
+ if (cell->subdomain_id() == this->my_subdomain)
++n_owned;
}
RefineAndCoarsenList<dim,spacedim>
refine_and_coarsen_list (*this,
p4est_tree_to_coarse_cell_permutation,
- my_subdomain);
+ this->my_subdomain);
// copy refine and coarsen flags into p4est and execute the refinement
// and coarsening. this uses the refine_and_coarsen_list just built,
refinement_in_progress = false;
- update_number_cache ();
+ this->update_number_cache ();
}
template <int dim, int spacedim>
PartitionWeights<dim,spacedim> partition_weights (*this,
cell_weights,
p4est_tree_to_coarse_cell_permutation,
- my_subdomain);
+ this->my_subdomain);
parallel_forest->user_pointer = &partition_weights;
dealii::internal::p4est::functions<dim>::
refinement_in_progress = false;
// update how many cells, edges, etc, we store locally
- update_number_cache ();
- }
-
-
- template <int dim, int spacedim>
- void
- Triangulation<dim,spacedim>::update_number_cache ()
- {
- Assert (number_cache.n_locally_owned_active_cells.size()
- ==
- Utilities::MPI::n_mpi_processes (mpi_communicator),
- ExcInternalError());
-
- std::fill (number_cache.n_locally_owned_active_cells.begin(),
- number_cache.n_locally_owned_active_cells.end(),
- 0);
-
- if (this->n_levels() == 0)
- {
- // Skip communication done below if we do not have any cells
- // (meaning the Triangulation is empty on all processors). This will
- // happen when called from the destructor of Triangulation, which
- // can get called during exception handling causing a hang in this
- // function.
- number_cache.n_global_active_cells = 0;
- number_cache.n_global_levels = 0;
- return;
- }
-
- if (this->n_levels() > 0)
- for (typename Triangulation<dim,spacedim>::active_cell_iterator
- cell = this->begin_active();
- cell != this->end(); ++cell)
- if (cell->subdomain_id() == my_subdomain)
- ++number_cache.n_locally_owned_active_cells[my_subdomain];
-
- unsigned int send_value
- = number_cache.n_locally_owned_active_cells[my_subdomain];
- MPI_Allgather (&send_value,
- 1,
- MPI_UNSIGNED,
- &number_cache.n_locally_owned_active_cells[0],
- 1,
- MPI_UNSIGNED,
- mpi_communicator);
-
- number_cache.n_global_active_cells
- = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
- number_cache.n_locally_owned_active_cells.end(),
- /* ensure sum is computed with correct data type:*/
- static_cast<types::global_dof_index>(0));
- number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), mpi_communicator);
+ this->update_number_cache ();
}
-
-
-
template <int dim, int spacedim>
void
Triangulation<dim,spacedim>::
ExcInternalError());
}
-
-
- template <int dim, int spacedim>
- types::subdomain_id
- Triangulation<dim,spacedim>::locally_owned_subdomain () const
- {
- Assert (dim > 1, ExcNotImplemented());
- return my_subdomain;
- }
-
-
-
- template <int dim, int spacedim>
- unsigned int
- Triangulation<dim,spacedim>::n_locally_owned_active_cells () const
- {
- return number_cache.n_locally_owned_active_cells[my_subdomain];
- }
-
-
-
- template <int dim, int spacedim>
- types::global_dof_index
- Triangulation<dim,spacedim>::n_global_active_cells () const
- {
- return number_cache.n_global_active_cells;
- }
-
-
-
- template <int dim, int spacedim>
- unsigned int
- Triangulation<dim,spacedim>::n_global_levels () const
- {
- return number_cache.n_global_levels;
- }
-
-
-
- template <int dim, int spacedim>
- const std::vector<unsigned int> &
- Triangulation<dim,spacedim>::n_locally_owned_active_cells_per_processor () const
- {
- return number_cache.n_locally_owned_active_cells;
- }
-
-
-
template <int dim, int spacedim>
unsigned int
Triangulation<dim,spacedim>::
}
- template <int dim, int spacedim>
- MPI_Comm
- Triangulation<dim,spacedim>::get_communicator () const
- {
- return mpi_communicator;
- }
-
-
template<int dim, int spacedim>
void
Triangulation<dim,spacedim>::add_periodicity
dealii::internal::p4est::functions<dim>::destroy (parallel_forest);
parallel_forest
= dealii::internal::p4est::functions<dim>::
- new_forest (mpi_communicator,
+ new_forest (this->mpi_communicator,
connectivity,
/* minimum initial number of quadrants per tree */ 0,
/* minimum level of upfront refinement */ 0,
Triangulation<dim,spacedim>::memory_consumption () const
{
std::size_t mem=
- this->dealii::Triangulation<dim,spacedim>::memory_consumption()
- + MemoryConsumption::memory_consumption(mpi_communicator)
- + MemoryConsumption::memory_consumption(my_subdomain)
+ this->dealii::parallel::Triangulation<dim,spacedim>::memory_consumption()
+ MemoryConsumption::memory_consumption(triangulation_has_content)
- + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells)
- + MemoryConsumption::memory_consumption(number_cache.n_global_active_cells)
- + MemoryConsumption::memory_consumption(number_cache.n_global_levels)
+ MemoryConsumption::memory_consumption(connectivity)
+ MemoryConsumption::memory_consumption(parallel_forest)
+ MemoryConsumption::memory_consumption(refinement_in_progress)
ExcMessage ("Parallel distributed triangulations can only "
"be copied, if no refinement is in progress!"));
- mpi_communicator = Utilities::MPI::duplicate_communicator (old_tria_x->get_communicator ());
+ // duplicate MPI communicator, stored in the base class
+ dealii::parallel::Triangulation<dim,spacedim>::copy_triangulation (old_tria);
coarse_cell_to_p4est_tree_permutation = old_tria_x->coarse_cell_to_p4est_tree_permutation;
p4est_tree_to_coarse_cell_permutation = old_tria_x->p4est_tree_to_coarse_cell_permutation;
AssertThrow (false, ExcInternalError());
}
- update_number_cache ();
+ this->update_number_cache ();
}
template <int spacedim>
Triangulation<1,spacedim>::Triangulation (MPI_Comm)
+ :
+ dealii::parallel::Triangulation<1,spacedim>(MPI_COMM_WORLD,
+ typename dealii::Triangulation<1,spacedim>::MeshSmoothing(),
+ false)
{
Assert (false, ExcNotImplemented());
}
}
- template <int spacedim>
- types::subdomain_id
- Triangulation<1,spacedim>::locally_owned_subdomain () const
- {
- Assert (false, ExcNotImplemented());
- return 0;
- }
-
-
- template <int spacedim>
- types::global_dof_index
- Triangulation<1,spacedim>::n_global_active_cells () const
- {
- Assert (false, ExcNotImplemented());
- return 0;
- }
-
-
- template <int spacedim>
- unsigned int
- Triangulation<1,spacedim>::n_global_levels () const
- {
- Assert (false, ExcNotImplemented());
- return 0;
- }
-
-
- template <int spacedim>
- MPI_Comm
- Triangulation<1,spacedim>::get_communicator () const
- {
- return MPI_COMM_WORLD;
- }
-
template <int spacedim>
const std::vector<types::global_dof_index> &
Triangulation<1,spacedim>::get_p4est_tree_to_coarse_cell_permutation() const
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/distributed/tria_base.h>
+
+
+#include <algorithm>
+#include <numeric>
+#include <iostream>
+#include <fstream>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace parallel
+{
+
+#ifdef DEAL_II_WITH_MPI
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::Triangulation (MPI_Comm mpi_communicator,
+ const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid,
+ const bool check_for_distorted_cells)
+ :
+ dealii::Triangulation<dim,spacedim>(smooth_grid,check_for_distorted_cells),
+ mpi_communicator (Utilities::MPI::
+ duplicate_communicator(mpi_communicator)),
+ my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)),
+ n_subdomains(Utilities::MPI::n_mpi_processes(mpi_communicator))
+ {
+ number_cache.n_locally_owned_active_cells.resize (n_subdomains);
+ };
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria)
+ {
+ if (const dealii::parallel::Triangulation<dim,spacedim> *
+ old_tria_x = dynamic_cast<const dealii::parallel::Triangulation<dim,spacedim> *>(&old_tria))
+ {
+ mpi_communicator = Utilities::MPI::duplicate_communicator (old_tria_x->get_communicator ());
+ }
+ }
+
+#else
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::Triangulation()
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::copy_triangulation (const dealii::Triangulation<dim, spacedim> &old_tria)
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+
+#endif
+
+ template <int dim, int spacedim>
+ std::size_t
+ Triangulation<dim,spacedim>::memory_consumption() const
+ {
+ std::size_t mem=
+ this->dealii::Triangulation<dim,spacedim>::memory_consumption()
+ + MemoryConsumption::memory_consumption(mpi_communicator)
+ + MemoryConsumption::memory_consumption(my_subdomain)
+ + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells)
+ + MemoryConsumption::memory_consumption(number_cache.n_global_active_cells)
+ + MemoryConsumption::memory_consumption(number_cache.n_global_levels);
+ return mem;
+
+ }
+
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::~Triangulation ()
+ {
+ // get rid of the unique communicator used here again
+ MPI_Comm_free (&this->mpi_communicator);
+ };
+
+ template <int dim, int spacedim>
+ Triangulation<dim,spacedim>::NumberCache::NumberCache()
+ :
+ n_global_active_cells(0),
+ n_global_levels(0)
+ {}
+
+ template <int dim, int spacedim>
+ unsigned int
+ Triangulation<dim,spacedim>::n_locally_owned_active_cells () const
+ {
+ return number_cache.n_locally_owned_active_cells[my_subdomain];
+ }
+
+ template <int dim, int spacedim>
+ unsigned int
+ Triangulation<dim,spacedim>::n_global_levels () const
+ {
+ return number_cache.n_global_levels;
+ }
+
+ template <int dim, int spacedim>
+ types::global_dof_index
+ Triangulation<dim,spacedim>::n_global_active_cells () const
+ {
+ return number_cache.n_global_active_cells;
+ }
+
+ template <int dim, int spacedim>
+ const std::vector<unsigned int> &
+ Triangulation<dim,spacedim>::n_locally_owned_active_cells_per_processor () const
+ {
+ return number_cache.n_locally_owned_active_cells;
+ }
+
+#ifdef DEAL_II_WITH_MPI
+ template <int dim, int spacedim>
+ MPI_Comm
+ Triangulation<dim,spacedim>::get_communicator () const
+ {
+ return mpi_communicator;
+ }
+
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::update_number_cache ()
+ {
+ Assert (number_cache.n_locally_owned_active_cells.size()
+ ==
+ Utilities::MPI::n_mpi_processes (this->mpi_communicator),
+ ExcInternalError());
+
+ std::fill (number_cache.n_locally_owned_active_cells.begin(),
+ number_cache.n_locally_owned_active_cells.end(),
+ 0);
+
+ if (this->n_levels() == 0)
+ {
+ // Skip communication done below if we do not have any cells
+ // (meaning the Triangulation is empty on all processors). This will
+ // happen when called from the destructor of Triangulation, which
+ // can get called during exception handling causing a hang in this
+ // function.
+ number_cache.n_global_active_cells = 0;
+ number_cache.n_global_levels = 0;
+ return;
+ }
+
+ if (this->n_levels() > 0)
+ for (typename Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active();
+ cell != this->end(); ++cell)
+ if (cell->subdomain_id() == my_subdomain)
+ ++number_cache.n_locally_owned_active_cells[my_subdomain];
+
+ unsigned int send_value
+ = number_cache.n_locally_owned_active_cells[my_subdomain];
+ MPI_Allgather (&send_value,
+ 1,
+ MPI_UNSIGNED,
+ &number_cache.n_locally_owned_active_cells[0],
+ 1,
+ MPI_UNSIGNED,
+ this->mpi_communicator);
+
+ number_cache.n_global_active_cells
+ = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
+ number_cache.n_locally_owned_active_cells.end(),
+ /* ensure sum is computed with correct data type:*/
+ static_cast<types::global_dof_index>(0));
+ number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
+ }
+#else
+ template <int dim, int spacedim>
+ void
+ Triangulation<dim,spacedim>::update_number_cache ()
+ {
+ Assert (false, ExcNotImplemented());
+ }
+
+#endif
+
+ template <int dim, int spacedim>
+ types::subdomain_id
+ Triangulation<dim,spacedim>::locally_owned_subdomain () const
+ {
+ Assert (dim > 1, ExcNotImplemented());
+ return my_subdomain;
+ }
+
+
+}
+
+
+/*-------------- Explicit Instantiations -------------------------------*/
+#include "tria_base.inst"
+
+DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: tria.inst.in 32674 2014-03-20 16:57:24Z denis.davydov $
+//
+// Copyright (C) 2010 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (deal_II_dimension : DIMENSIONS)
+ {
+ namespace parallel
+ \{
+ template class Triangulation<deal_II_dimension>;
+# if deal_II_dimension < 3
+ template class Triangulation<deal_II_dimension, deal_II_dimension+1>;
+# endif
+# if deal_II_dimension < 2
+ template class Triangulation<deal_II_dimension, deal_II_dimension+2>;
+# endif
+ \}
+
+ }
+
policy_name = "Policy::Sequential<";
else if (dynamic_cast<const typename dealii::internal::DoFHandler::Policy::ParallelDistributed<dim,spacedim>*>(&policy))
policy_name = "Policy::ParallelDistributed<";
+ else if (dynamic_cast<const typename dealii::internal::DoFHandler::Policy::ParallelShared<dim,spacedim>*>(&policy))
+ policy_name = "Policy::ParallelShared<";
else
AssertThrow(false, ExcNotImplemented());
policy_name += Utilities::int_to_string(dim)+
// decide whether we need a
// sequential or a parallel
// distributed policy
- if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+ if (dynamic_cast<const parallel::shared::Triangulation< dim, spacedim>*>
(&tria)
- == 0)
+ != 0)
+ policy.reset (new internal::DoFHandler::Policy::ParallelShared<dim,spacedim>());
+ else if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+ (&tria)
+ == 0)
policy.reset (new internal::DoFHandler::Policy::Sequential<dim,spacedim>());
else
policy.reset (new internal::DoFHandler::Policy::ParallelDistributed<dim,spacedim>());
// decide whether we need a
// sequential or a parallel
// distributed policy
- if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+ if (dynamic_cast<const parallel::shared::Triangulation< dim, spacedim>*>
(&t)
- == 0)
+ != 0)
+ policy.reset (new internal::DoFHandler::Policy::ParallelShared<dim,spacedim>());
+ else if (dynamic_cast<const parallel::distributed::Triangulation< dim, spacedim >*>
+ (&t)
+ == 0)
policy.reset (new internal::DoFHandler::Policy::Sequential<dim,spacedim>());
else
policy.reset (new internal::DoFHandler::Policy::ParallelDistributed<dim,spacedim>());
internal::DoFHandler::Implementation::reserve_space (*this);
// hand things off to the policy
- number_cache = policy->distribute_dofs (*this);
+ policy->distribute_dofs (*this,number_cache);
// initialize the block info object
// only if this is a sequential
ExcMessage ("New DoF index is not less than the total number of dofs."));
#endif
- number_cache = policy->renumber_dofs (new_numbers, *this);
+ policy->renumber_dofs (new_numbers, *this,number_cache);
}
template <int dim, int spacedim>
- NumberCache
+ void
Sequential<dim,spacedim>::
- distribute_dofs (DoFHandler<dim,spacedim> &dof_handler) const
+ distribute_dofs (DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache_current ) const
{
const types::global_dof_index n_dofs =
Implementation::distribute_dofs (0,
number_cache.locally_owned_dofs_per_processor
= std::vector<IndexSet> (1,
number_cache.locally_owned_dofs);
- return number_cache;
+ number_cache_current = number_cache;
}
}
template <int dim, int spacedim>
- NumberCache
+ void
Sequential<dim,spacedim>::
renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const
+ dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache_current) const
{
Implementation::renumber_dofs (new_numbers, IndexSet(0),
dof_handler, true);
number_cache.locally_owned_dofs_per_processor
= std::vector<IndexSet> (1,
number_cache.locally_owned_dofs);
- return number_cache;
+ number_cache_current = number_cache;
+ }
+
+ /* --------------------- class ParallelShared ---------------- */
+
+ template <int dim, int spacedim>
+ void
+ ParallelShared<dim,spacedim>::
+ distribute_dofs (DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const
+ {
+ Sequential<dim,spacedim>::distribute_dofs (dof_handler,number_cache);
+ DoFRenumbering::subdomain_wise (dof_handler);
+ number_cache.locally_owned_dofs_per_processor = DoFTools::locally_owned_dofs_per_subdomain (dof_handler);
+ number_cache.locally_owned_dofs = number_cache.locally_owned_dofs_per_processor[dof_handler.get_tria().locally_owned_subdomain()];
+ number_cache.n_locally_owned_dofs_per_processor.resize (number_cache.locally_owned_dofs_per_processor.size());
+ for (unsigned int i = 0; i < number_cache.n_locally_owned_dofs_per_processor.size(); i++)
+ number_cache.n_locally_owned_dofs_per_processor[i] = number_cache.locally_owned_dofs_per_processor[i].n_elements();
+ number_cache.n_locally_owned_dofs = number_cache.n_locally_owned_dofs_per_processor[dof_handler.get_tria().locally_owned_subdomain()];
}
+ template <int dim, int spacedim>
+ void
+ ParallelShared<dim,spacedim>::
+ distribute_mg_dofs (DoFHandler<dim,spacedim> &dof_handler,
+ std::vector<NumberCache> &number_caches) const
+ {
+ // first, call the sequential function to distribute dofs
+ Sequential<dim,spacedim>:: distribute_mg_dofs (dof_handler, number_caches);
+ // now we need to update the number cache.
+ // This part is not yet implemented.
+ AssertThrow(false,ExcNotImplemented());
+ }
+
+ template <int dim, int spacedim>
+ void
+ ParallelShared<dim,spacedim>::
+ renumber_dofs (const std::vector<types::global_dof_index> &new_numbers,
+ dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache) const
+ {
+
+#ifndef DEAL_II_WITH_MPI
+ (void)dof_handler;
+ Assert (false, ExcNotImplemented());
+
+#else
+ std::vector<types::global_dof_index> global_gathered_numbers (dof_handler.n_dofs (), 0);
+ // as we call DoFRenumbering::subdomain_wise (dof_handler) from distribute_dofs(),
+ // we need to support sequential-like input.
+ // Distributed-like input from, for example, component_wise renumbering is also supported.
+ if (new_numbers.size () == dof_handler.n_dofs ())
+ {
+ global_gathered_numbers = new_numbers;
+ }
+ else
+ {
+ Assert(new_numbers.size() == dof_handler.locally_owned_dofs().n_elements(),
+ ExcInternalError());
+ const parallel::shared::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&dof_handler.get_tria ()));
+ Assert(tr != 0, ExcInternalError());
+ const unsigned int n_cpu = Utilities::MPI::n_mpi_processes (tr->get_communicator ());
+ const unsigned int this_process =
+ Utilities::MPI::this_mpi_process (tr->get_communicator ());
+ std::vector<types::global_dof_index> gathered_new_numbers (dof_handler.n_dofs (), 0);
+ Assert(this_process == dof_handler.get_tria ().locally_owned_subdomain (),
+ ExcInternalError())
+
+ //gather new numbers among processors into one vector
+ {
+ std::vector<types::global_dof_index> new_numbers_copy (new_numbers);
+ // displs:
+ // Entry i specifies the displacement (relative to recvbuf )
+ // at which to place the incoming data from process i
+ // rcounts:
+ // containing the number of elements that are to be received from each process
+ std::vector<int> displs(n_cpu),
+ rcounts(n_cpu);
+ types::global_dof_index shift = 0;
+ //set rcounts based on new_numbers:
+ int cur_count = new_numbers_copy.size ();
+ MPI_Allgather (&cur_count, 1, MPI_INT,
+ &rcounts[0], 1, MPI_INT,
+ tr->get_communicator ());
+
+ for (unsigned int i = 0; i < n_cpu; i++)
+ {
+ displs[i] = shift;
+ shift += rcounts[i];
+ }
+ Assert(((int)new_numbers_copy.size()) == rcounts[this_process],
+ ExcInternalError());
+ MPI_Allgatherv (&new_numbers_copy[0], new_numbers_copy.size (),
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ &gathered_new_numbers[0], &rcounts[0],
+ &displs[0],
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ tr->get_communicator ());
+ }
+ // put new numbers according to the current locally_owned_dofs_per_processor IndexSets
+ types::global_dof_index shift = 0;
+ // flag_1 and flag_2 are
+ // used to control that there is a
+ // one-to-one relation between old and new DoFs.
+ std::vector<unsigned int> flag_1 (dof_handler.n_dofs (), 0),
+ flag_2 (dof_handler.n_dofs (), 0);
+ for (unsigned int i = 0; i < n_cpu; i++)
+ {
+ const IndexSet &iset =
+ number_cache.locally_owned_dofs_per_processor[i];
+ for (types::global_dof_index ind = 0;
+ ind < iset.n_elements (); ind++)
+ {
+ const types::global_dof_index target = iset.nth_index_in_set (ind);
+ const types::global_dof_index value = gathered_new_numbers[shift + ind];
+ Assert(target < dof_handler.n_dofs(), ExcInternalError());
+ Assert(value < dof_handler.n_dofs(), ExcInternalError());
+ global_gathered_numbers[target] = value;
+ flag_1[target]++;
+ flag_2[value]++;
+ }
+ shift += iset.n_elements ();
+ }
+
+ Assert(*std::max_element(flag_1.begin(), flag_1.end()) == 1,
+ ExcInternalError());
+ Assert(*std::min_element(flag_1.begin(), flag_1.end()) == 1,
+ ExcInternalError());
+ Assert((*std::max_element(flag_2.begin(), flag_2.end())) == 1,
+ ExcInternalError());
+ Assert((*std::min_element(flag_2.begin(), flag_2.end())) == 1,
+ ExcInternalError());
+ }
+ Sequential<dim, spacedim>::renumber_dofs (global_gathered_numbers, dof_handler, number_cache);
+ // correct number_cache:
+ number_cache.locally_owned_dofs_per_processor =
+ DoFTools::locally_owned_dofs_per_subdomain (dof_handler);
+ number_cache.locally_owned_dofs =
+ number_cache.locally_owned_dofs_per_processor[dof_handler.get_tria ().locally_owned_subdomain ()];
+ // sequential renumbering returns a vector of size 1 here,
+ // correct this:
+ number_cache.n_locally_owned_dofs_per_processor.resize(number_cache.locally_owned_dofs_per_processor.size());
+ for (unsigned int i = 0;
+ i < number_cache.n_locally_owned_dofs_per_processor.size (); i++)
+ number_cache.n_locally_owned_dofs_per_processor[i] = number_cache.locally_owned_dofs_per_processor[i].n_elements ();
+
+ number_cache.n_locally_owned_dofs =
+ number_cache.n_locally_owned_dofs_per_processor[dof_handler.get_tria ().locally_owned_subdomain ()];
+#endif
+ }
/* --------------------- class ParallelDistributed ---------------- */
template <int dim, int spacedim>
- NumberCache
+ void
ParallelDistributed<dim, spacedim>::
- distribute_dofs (DoFHandler<dim,spacedim> &dof_handler) const
+ distribute_dofs (DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache_current) const
{
NumberCache number_cache;
#endif // DEBUG
#endif // DEAL_II_WITH_P4EST
- return number_cache;
+ number_cache_current = number_cache;
}
template <int dim, int spacedim>
- NumberCache
+ void
ParallelDistributed<dim, spacedim>::
renumber_dofs (const std::vector<dealii::types::global_dof_index> &new_numbers,
- dealii::DoFHandler<dim,spacedim> &dof_handler) const
+ dealii::DoFHandler<dim,spacedim> &dof_handler,
+ NumberCache &number_cache_current) const
{
(void)new_numbers;
(void)dof_handler;
}
#endif
- return number_cache;
+ number_cache_current = number_cache;
}
}
}
\{
template class PolicyBase<deal_II_dimension,deal_II_dimension>;
template class Sequential<deal_II_dimension,deal_II_dimension>;
+ template class ParallelShared<deal_II_dimension,deal_II_dimension>;
template class ParallelDistributed<deal_II_dimension,deal_II_dimension>;
#if deal_II_dimension==1 || deal_II_dimension==2
template class PolicyBase<deal_II_dimension,deal_II_dimension+1>;
template class Sequential<deal_II_dimension,deal_II_dimension+1>;
+ template class ParallelShared<deal_II_dimension,deal_II_dimension+1>;
template class ParallelDistributed<deal_II_dimension,deal_II_dimension+1>;
#endif
#if deal_II_dimension==3
template class PolicyBase<1,3>;
template class Sequential<1,3>;
+ template class ParallelShared<1,3>;
template class ParallelDistributed<1,3>;
#endif
\}
const unsigned int n_buckets = fe_collection.n_components();
std::vector<types::global_dof_index> shifts(n_buckets);
- if (const parallel::distributed::Triangulation<dim,spacedim> *tria
- = (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+ if (const parallel::Triangulation<dim,spacedim> *tria
+ = (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
(&start->get_dof_handler().get_tria())))
{
-#ifdef DEAL_II_WITH_P4EST
+#ifdef DEAL_II_WITH_MPI
std::vector<types::global_dof_index> local_dof_count(n_buckets);
for (unsigned int c=0; c<n_buckets; ++c)
const unsigned int n_buckets = fe_collection.n_blocks();
std::vector<types::global_dof_index> shifts(n_buckets);
- if (const parallel::distributed::Triangulation<dim,spacedim> *tria
- = (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
+ if (const parallel::Triangulation<dim,spacedim> *tria
+ = (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
(&start->get_dof_handler().get_tria())))
{
-#ifdef DEAL_II_WITH_P4EST
+#ifdef DEAL_II_WITH_MPI
std::vector<types::global_dof_index> local_dof_count(n_buckets);
for (unsigned int c=0; c<n_buckets; ++c)
active_fe_indices[cell->active_cell_index()] = cell->active_fe_index();
}
+ template <class DH>
+ std::vector<IndexSet>
+ locally_owned_dofs_per_subdomain (const DH &dof_handler)
+ {
+ //the following is a random process (flip of a coin), thus should be called once only.
+ std::vector< dealii::types::subdomain_id > subdomain_association (dof_handler.n_dofs ());
+ dealii::DoFTools::get_subdomain_association (dof_handler, subdomain_association);
+
+ const unsigned int n_subdomains = 1 + (*std::max_element (subdomain_association.begin (),
+ subdomain_association.end () ));
+
+ std::vector<dealii::IndexSet> index_sets (n_subdomains,dealii::IndexSet(dof_handler.n_dofs()));
+
+ // loop over subdomain_association and populate IndexSet when a
+ // change in subdomain ID is found
+ dealii::types::global_dof_index i_min = 0;
+ dealii::types::global_dof_index this_subdomain = subdomain_association[0];
+
+ for (dealii::types::global_dof_index index = 1;
+ index < subdomain_association.size (); ++index)
+ {
+ //found index different from the current one
+ if (subdomain_association[index] != this_subdomain)
+ {
+ index_sets[this_subdomain].add_range (i_min, index);
+ i_min = index;
+ this_subdomain = subdomain_association[index];
+ }
+ }
+ // the very last element is of different index
+ if (i_min == subdomain_association.size () - 1)
+ {
+ index_sets[this_subdomain].add_index (i_min);
+ }
+
+ // otherwise there are at least two different indices
+ else
+ {
+ index_sets[this_subdomain].add_range (
+ i_min, subdomain_association.size ());
+ }
+
+ for (unsigned int i = 0; i < n_subdomains; i++)
+ index_sets[i].compress ();
+
+ return index_sets;
+ }
template <class DH>
void
ExcDimensionMismatch(subdomain_association.size(),
dof_handler.n_dofs()));
+ Assert(dof_handler.n_dofs() > 0,
+ ExcMessage("Number of DoF is not positive. "
+ "This could happen when the function is called before NumberCache is written."));
// preset all values by an invalid value
std::fill_n (subdomain_association.begin(), dof_handler.n_dofs(),
numbers::invalid_subdomain_id);
DoFTools::get_subdomain_association<hp::DoFHandler<deal_II_dimension> >
(const hp::DoFHandler<deal_II_dimension> &dof_handler,
std::vector<types::subdomain_id> &subdomain_association);
-
+
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<DoFHandler<deal_II_dimension> >
+(const DoFHandler<deal_II_dimension> &dof_handler);
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<hp::DoFHandler<deal_II_dimension> >
+(const hp::DoFHandler<deal_II_dimension> &dof_handler);
template
unsigned int
DoFTools::get_subdomain_association<hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> >
(const hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> &dof_handler,
std::vector<types::subdomain_id> &subdomain_association);
+
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<DoFHandler<deal_II_dimension,deal_II_dimension+1> >
+(const DoFHandler<deal_II_dimension,deal_II_dimension+1> &dof_handler);
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> >
+(const hp::DoFHandler<deal_II_dimension,deal_II_dimension+1> &dof_handler);
template
void
DoFTools::get_subdomain_association<hp::DoFHandler<1,3> >
(const hp::DoFHandler<1,3> &dof_handler,
std::vector<types::subdomain_id> &subdomain_association);
+
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<DoFHandler<1,3> >
+(const DoFHandler<1,3> &dof_handler);
+template
+std::vector<IndexSet>
+DoFTools::locally_owned_dofs_per_subdomain<hp::DoFHandler<1,3> >
+(const hp::DoFHandler<1,3> &dof_handler);
template
unsigned int
number_cache.n_global_dofs = next_free_dof;
number_cache.n_locally_owned_dofs = number_cache.n_global_dofs;
- number_cache.locally_owned_dofs
- = IndexSet (number_cache.n_global_dofs);
- number_cache.locally_owned_dofs.add_range (0,
- number_cache.n_global_dofs);
- Assert (number_cache.n_global_dofs < std::numeric_limits<unsigned int>::max (),
- ExcMessage ("Global number of degrees of freedom is too large."));
- number_cache.n_locally_owned_dofs_per_processor
- = std::vector<types::global_dof_index> (1,
- (types::global_dof_index) number_cache.n_global_dofs);
+ if (dynamic_cast<const parallel::shared::Triangulation< dim, spacedim >*>
+ (&this->get_tria())
+ == 0)
+ {
+ number_cache.locally_owned_dofs
+ = IndexSet (number_cache.n_global_dofs);
+ number_cache.locally_owned_dofs.add_range (0,
+ number_cache.n_global_dofs);
+ Assert (number_cache.n_global_dofs < std::numeric_limits<unsigned int>::max (),
+ ExcMessage ("Global number of degrees of freedom is too large."));
+ number_cache.n_locally_owned_dofs_per_processor
+ = std::vector<types::global_dof_index> (1,
+ (types::global_dof_index) number_cache.n_global_dofs);
+ }
+ else
+ {
+ AssertThrow(false, ExcNotImplemented() );
+ //number_cache.locally_owned_dofs = dealii::DoFTools::locally_owned_dofs_with_subdomain(this,tria->locally_owned_subdomain() );
+ //TODO: update n_locally_owned_dofs_per_processor as well
+ }
number_cache.locally_owned_dofs_per_processor
= std::vector<IndexSet> (1,
a-framework algorithms all-headers aniso arpack base bits build_tests
codim_one deal.II distributed_grids fe gla grid hp integrators lac lapack
manifold matrix_free metis mpi multigrid opencascade petsc serialization
- slepc trilinos umfpack
+ slepc trilinos umfpack sharedtria
)
IF(DEFINED DEAL_II_HAVE_TESTS_DIRECTORY)
# Only set up mesh_converter tests if the testsuite is set up as a
--- /dev/null
+CMAKE_MINIMUM_REQUIRED(VERSION 2.8.9)
+INCLUDE(${DEAL_II_SOURCE_DIR}/tests/setup_testsubproject.cmake)
+PROJECT(testsuite CXX)
+INCLUDE(${DEAL_II_TARGET_CONFIG})
+DEAL_II_PICKUP_TESTS()
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: dof_handler_number_cache.cc 31761 2013-11-22 14:42:37Z heister $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template<int dim>
+void test()
+{
+ parallel::shared::Triangulation<dim>
+ triangulation (MPI_COMM_WORLD);
+
+ FESystem<dim> fe (FE_Q<dim>(3),2,
+ FE_DGQ<dim>(1),1);
+
+ DoFHandler<dim> dof_handler (triangulation);
+
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global (2);
+
+ const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+ for (unsigned int i=0; i<n_refinements[dim]; ++i)
+ {
+ // refine one-fifth of cells randomly
+ std::vector<bool> flags (triangulation.n_active_cells(), false);
+ for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+ flags[Testing::rand() % flags.size()] = true;
+ // make sure there's at least one that
+ // will be refined
+ flags[0] = true;
+
+ // refine triangulation
+ unsigned int index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ if (flags[index])
+ cell->set_refine_flag();
+ ++index;
+ }
+
+ Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+ // flag all other cells for coarsening
+ // (this should ensure that at least
+ // some of them will actually be
+ // coarsened)
+ index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ if (!flags[index])
+ cell->set_coarsen_flag();
+ ++index;
+ }
+
+ triangulation.execute_coarsening_and_refinement ();
+ dof_handler.distribute_dofs (fe);
+
+ deallog
+ << "n_dofs: " << dof_handler.n_dofs() << std::endl
+ << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+
+ deallog << "n_locally_owned_dofs_per_processor: ";
+ std::vector<unsigned int> v = dof_handler.n_locally_owned_dofs_per_processor();
+ unsigned int sum = 0;
+ for (unsigned int i=0;i<v.size();++i)
+ {
+ deallog << v[i] << " ";
+ sum += v[i];
+ }
+ deallog << " sum: " << sum << std::endl;
+
+ Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+ Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+ const unsigned int N = dof_handler.n_dofs();
+
+ Assert (dof_handler.n_locally_owned_dofs() <= N,
+ ExcInternalError());
+ Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+ dof_handler.n_locally_owned_dofs_per_processor().end(),
+ 0U) == N,
+ ExcInternalError());
+
+ IndexSet all (N);
+ for (unsigned int i=0;
+ i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+ {
+ IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+ Assert(intersect.n_elements()==0, ExcInternalError());
+ all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+ }
+
+ Assert(all == complete_index_set(N), ExcInternalError());
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 289
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 578
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 1023
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 4446
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 13862
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_locally_owned_dofs: 297
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_locally_owned_dofs: 588
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:2d::n_locally_owned_dofs: 1013
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_locally_owned_dofs: 4386
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:1:3d::n_dofs: 41826
+DEAL:1:3d::n_locally_owned_dofs: 14131
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_locally_owned_dofs: 232
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_locally_owned_dofs: 588
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:2d::n_locally_owned_dofs: 1020
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_locally_owned_dofs: 4450
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:2:3d::n_dofs: 41826
+DEAL:2:3d::n_locally_owned_dofs: 13833
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
--- /dev/null
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 818 sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1754 sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 3056 sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13282 sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 41826 sum: 41826
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: dof_handler_number_cache.cc 31761 2013-11-22 14:42:37Z heister $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria with renumbering
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template<int dim>
+void test()
+{
+ parallel::shared::Triangulation<dim>
+ triangulation (MPI_COMM_WORLD);
+
+ FESystem<dim> fe (FE_Q<dim>(3),2,
+ FE_DGQ<dim>(1),1);
+
+ DoFHandler<dim> dof_handler (triangulation);
+
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global (2);
+
+ const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+ for (unsigned int i=0; i<n_refinements[dim]; ++i)
+ {
+ // refine one-fifth of cells randomly
+ std::vector<bool> flags (triangulation.n_active_cells(), false);
+ for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+ flags[Testing::rand() % flags.size()] = true;
+ // make sure there's at least one that
+ // will be refined
+ flags[0] = true;
+
+ // refine triangulation
+ unsigned int index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ if (flags[index])
+ cell->set_refine_flag();
+ ++index;
+ }
+
+ Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+ // flag all other cells for coarsening
+ // (this should ensure that at least
+ // some of them will actually be
+ // coarsened)
+ index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ if (!flags[index])
+ cell->set_coarsen_flag();
+ ++index;
+ }
+
+ triangulation.execute_coarsening_and_refinement ();
+ dof_handler.distribute_dofs (fe);
+ DoFRenumbering::component_wise(dof_handler);
+
+ deallog
+ << "n_dofs: " << dof_handler.n_dofs() << std::endl
+ << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+
+ deallog << "n_locally_owned_dofs_per_processor: ";
+ std::vector<unsigned int> v = dof_handler.n_locally_owned_dofs_per_processor();
+ unsigned int sum = 0;
+ for (unsigned int i=0;i<v.size();++i)
+ {
+ deallog << v[i] << " ";
+ sum += v[i];
+ }
+ deallog << " sum: " << sum << std::endl;
+
+ Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+ Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+ const unsigned int N = dof_handler.n_dofs();
+
+ Assert (dof_handler.n_locally_owned_dofs() <= N,
+ ExcInternalError());
+ Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+ dof_handler.n_locally_owned_dofs_per_processor().end(),
+ 0U) == N,
+ ExcInternalError());
+
+ IndexSet all (N);
+ for (unsigned int i=0;
+ i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+ {
+ IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+ Assert(intersect.n_elements()==0, ExcInternalError());
+ all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+ }
+ Assert(all == complete_index_set(N), ExcInternalError());
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 289
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 578
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 1023
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 4446
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 13862
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_locally_owned_dofs: 297
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_locally_owned_dofs: 588
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:2d::n_locally_owned_dofs: 1013
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_locally_owned_dofs: 4386
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:1:3d::n_dofs: 41826
+DEAL:1:3d::n_locally_owned_dofs: 14131
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_locally_owned_dofs: 232
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_locally_owned_dofs: 588
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:2d::n_locally_owned_dofs: 1020
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_locally_owned_dofs: 4450
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:2:3d::n_dofs: 41826
+DEAL:2:3d::n_locally_owned_dofs: 13833
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
--- /dev/null
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 818 sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1754 sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 3056 sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13282 sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 41826 sum: 41826
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: 3d_refinement_01.cc 31349 2013-10-20 19:07:06Z maier $
+//
+// Copyright (C) 2008 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// create a shared tria mesh and refine it
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/numerics/data_out.h>
+
+#include <fstream>
+
+template <int dim, int spacedim>
+void write_mesh (const parallel::shared::Triangulation<dim,spacedim> &tria,
+ const char *filename_)
+{
+ DataOut<dim> data_out;
+ data_out.attach_triangulation (tria);
+ Vector<float> subdomain (tria.n_active_cells());
+ for (unsigned int i=0; i<subdomain.size(); ++i)
+ subdomain(i) = tria.locally_owned_subdomain();
+ data_out.add_data_vector (subdomain, "subdomain");
+
+ data_out.build_patches ();
+ const std::string filename = (filename_ +
+ Utilities::int_to_string
+ (tria.locally_owned_subdomain(), 4));
+ {
+ std::ofstream output ((filename + ".vtu").c_str());
+ data_out.write_vtu (output);
+ }
+}
+
+
+
+template<int dim>
+void test()
+{
+ parallel::shared::Triangulation<dim> tr(MPI_COMM_WORLD);
+
+
+ GridGenerator::hyper_cube(tr);
+ tr.begin_active()->set_refine_flag();
+ tr.execute_coarsening_and_refinement ();
+ tr.begin_active()->set_refine_flag();
+ tr.execute_coarsening_and_refinement ();
+
+ deallog
+ << " locally_owned_subdomain(): " << tr.locally_owned_subdomain() << "\n"
+ << " n_active_cells: " << tr.n_active_cells() << "\n"
+ << " n_levels: " << tr.n_levels() << "\n"
+ << " n_global_levels: " << tr.n_global_levels() << "\n"
+ //<< " n_locally_owned_active_cells: " << tr.n_locally_owned_active_cells() << "\n"
+ //<< " n_global_active_cells: " << tr.n_global_active_cells() << "\n"
+ << std::endl;
+
+ /*deallog << "n_locally_owned_active_cells_per_processor: ";
+ std::vector<unsigned int> v = tr.n_locally_owned_active_cells_per_processor();
+ for (unsigned int i=0;i<v.size();++i)
+ deallog << v[i] << " ";
+ deallog << std::endl;*/
+
+ deallog << "subdomains: ";
+ typename parallel::distributed::Triangulation<dim>::active_cell_iterator it=tr.begin_active();
+ for (; it!=tr.end(); ++it)
+ {
+ deallog << it->subdomain_id() << " ";
+ }
+ deallog << std::endl;
+
+ //write_mesh(tr, "mesh");
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d:: locally_owned_subdomain(): 0
+ n_active_cells: 7
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:2d::subdomains: 0 1 0 1 2 1 2
+DEAL:0:3d:: locally_owned_subdomain(): 0
+ n_active_cells: 15
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1
+
+DEAL:1:2d:: locally_owned_subdomain(): 1
+ n_active_cells: 7
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:1:2d::subdomains: 0 1 0 1 2 1 2
+DEAL:1:3d:: locally_owned_subdomain(): 1
+ n_active_cells: 15
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:1:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1
+
+
+DEAL:2:2d:: locally_owned_subdomain(): 2
+ n_active_cells: 7
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:2:2d::subdomains: 0 1 0 1 2 1 2
+DEAL:2:3d:: locally_owned_subdomain(): 2
+ n_active_cells: 15
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:2:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1
+
--- /dev/null
+
+DEAL:0:2d:: locally_owned_subdomain(): 0
+ n_active_cells: 7
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:2d::subdomains: 0 0 0 0 0 0 0
+DEAL:0:3d:: locally_owned_subdomain(): 0
+ n_active_cells: 15
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:3d::subdomains: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0