/**
* Constructor.
+ *
+ * If @p allow_aritifical_cells is true, this class will behave
+ * similar to parallel::distributed::Triangulation in that there will be
+ * locally owned, ghost and artificial cells.
+ *
+ * Otherwise all non-locally owned cells are considered ghost.
*/
Triangulation (MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing =
- (dealii::Triangulation<dim,spacedim>::none) );
+ (dealii::Triangulation<dim,spacedim>::none),
+ const bool allow_artificial_cells = false);
/**
* Destructor.
const std::vector< CellData< dim > > &cells,
const SubCellData &subcelldata);
+ /**
+ * Return a vector of length Triangulation::n_active_cells() where each
+ * element stores the subdomain id of the owner of this cell. The elements
+ * of the vector are obviously the same as the subdomain ids for locally
+ * owned and ghost cells, but are also correct for artificial cells that
+ * do not store who the owner of the cell is in their subdomain_id field.
+ */
+ const std::vector<types::subdomain_id> &get_true_subdomain_ids_of_cells() const;
+
+ /**
+ * Return allow_artificial_cells , namely true if artificial cells are allowed.
+ */
+ bool with_artificial_cells() const;
+
+ private:
+ /**
+ * A flag to decide whether or not artificial cells are allowed.
+ */
+ const bool allow_artificial_cells;
+
+ /**
+ * This function calls GridTools::partition_triangulation () and if
+ * requested in the constructor of the class marks artificial cells.
+ */
+ void partition();
+
+ /**
+ * A vector containing subdomain IDs of cells obtained by partitioning
+ * using METIS. In case allow_artificial_cells is false, this vector
+ * is consistent with IDs stored in cell->subdomain_id() of the triangulation
+ * class. When allow_artificial_cells is true, cells which are artificial
+ * will have cell->subdomain_id() == numbers::artificial;
+ *
+ * The original parition information is stored to allow using sequential
+ * DoF distribution and partitioning functions with semi-artificial cells.
+ */
+ std::vector<types::subdomain_id> true_subdomain_ids_of_cells;
};
}
#else
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/filtered_iterator.h>
#include <deal.II/distributed/shared_tria.h>
+
+
#include <algorithm>
#include <numeric>
#include <iostream>
template <int dim, int spacedim>
Triangulation<dim,spacedim>::Triangulation (MPI_Comm mpi_communicator,
- const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid):
- dealii::parallel::Triangulation<dim,spacedim>(mpi_communicator,smooth_grid,false)
+ const typename dealii::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid,
+ const bool allow_artificial_cells):
+ dealii::parallel::Triangulation<dim,spacedim>(mpi_communicator,smooth_grid,false),
+ allow_artificial_cells(allow_artificial_cells)
{
}
+ template <int dim, int spacedim>
+ void Triangulation<dim,spacedim>::partition()
+ {
+ dealii::GridTools::partition_triangulation (this->n_subdomains, *this);
+
+ if (allow_artificial_cells)
+ {
+ true_subdomain_ids_of_cells.resize(this->n_active_cells());
+
+ // get halo layer of (ghost) cells
+ // parallel::shared::Triangulation<dim>::
+ std_cxx11::function<bool (const typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator &)> predicate
+ = IteratorFilters::SubdomainEqualTo(this->my_subdomain);
+
+ const std::vector<typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator>
+ active_halo_layer_vector = GridTools::compute_active_cell_halo_layer (*this, predicate);
+ std::set<typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator>
+ active_halo_layer(active_halo_layer_vector.begin(), active_halo_layer_vector.end());
+
+ // loop over all cells and mark artificial:
+ typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator
+ cell = this->begin_active(),
+ endc = this->end();
+ for (unsigned int index=0; cell != endc; cell++, index++)
+ {
+ // store original/true subdomain ids:
+ true_subdomain_ids_of_cells[index] = cell->subdomain_id();
+
+ if (cell->is_locally_owned() == false &&
+ active_halo_layer.find(cell) == active_halo_layer.end())
+ cell->set_subdomain_id(numbers::artificial_subdomain_id);
+ }
+ }
+ }
+
+ template <int dim, int spacedim>
+ bool
+ Triangulation<dim,spacedim>::with_artificial_cells() const
+ {
+ return allow_artificial_cells;
+ }
+
+ template <int dim, int spacedim>
+ const std::vector<unsigned int> &
+ Triangulation<dim,spacedim>::get_true_subdomain_ids_of_cells() const
+ {
+ return true_subdomain_ids_of_cells;
+ }
template <int dim, int spacedim>
Triangulation<dim,spacedim>::~Triangulation ()
Triangulation<dim,spacedim>::execute_coarsening_and_refinement ()
{
dealii::Triangulation<dim,spacedim>::execute_coarsening_and_refinement ();
- dealii::GridTools::partition_triangulation (this->n_subdomains, *this);
+ partition();
this->update_number_cache ();
}
// cells
AssertThrow (false, ExcInternalError());
}
- dealii::GridTools::partition_triangulation (this->n_subdomains, *this);
+ partition();
this->update_number_cache ();
}
distribute_dofs (DoFHandler<dim,spacedim> &dof_handler,
NumberCache &number_cache) const
{
+ // If the underlying shared::Tria allows artifical cells, we need to do
+ // some tricks here to make Sequential algorithms play nicely.
+ // Namely, we first restore original partition (without artificial cells)
+ // and then turn artificial cells on at the end of this function.
+ const parallel::shared::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&dof_handler.get_tria ()));
+ Assert(tr != 0, ExcInternalError());
+ typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator
+ cell = dof_handler.get_tria().begin_active(),
+ endc = dof_handler.get_tria().end();
+ std::vector<types::subdomain_id> current_subdomain_ids(tr->n_active_cells());
+ const std::vector<types::subdomain_id> &true_subdomain_ids = tr->get_true_subdomain_ids_of_cells();
+ if (tr->with_artificial_cells())
+ for (unsigned int index=0; cell != endc; cell++, index++)
+ {
+ current_subdomain_ids[index] = cell->subdomain_id();
+ cell->set_subdomain_id(true_subdomain_ids[index]);
+ }
+
Sequential<dim,spacedim>::distribute_dofs (dof_handler,number_cache);
DoFRenumbering::subdomain_wise (dof_handler);
+ // dofrenumbering will reset subdomains, this is ugly but we need to do it again:
+ cell = tr->begin_active();
+ if (tr->with_artificial_cells())
+ for (unsigned int index=0; cell != endc; cell++, index++)
+ cell->set_subdomain_id(true_subdomain_ids[index]);
+
number_cache.locally_owned_dofs_per_processor = DoFTools::locally_owned_dofs_per_subdomain (dof_handler);
number_cache.locally_owned_dofs = number_cache.locally_owned_dofs_per_processor[dof_handler.get_tria().locally_owned_subdomain()];
number_cache.n_locally_owned_dofs_per_processor.resize (number_cache.locally_owned_dofs_per_processor.size());
for (unsigned int i = 0; i < number_cache.n_locally_owned_dofs_per_processor.size(); i++)
number_cache.n_locally_owned_dofs_per_processor[i] = number_cache.locally_owned_dofs_per_processor[i].n_elements();
number_cache.n_locally_owned_dofs = number_cache.n_locally_owned_dofs_per_processor[dof_handler.get_tria().locally_owned_subdomain()];
+
+ // restore current subdomain ids
+ cell = tr->begin_active();
+ if (tr->with_artificial_cells())
+ for (unsigned int index=0; cell != endc; cell++, index++)
+ cell->set_subdomain_id(current_subdomain_ids[index]);
}
template <int dim, int spacedim>
(void)number_cache;
Assert (false, ExcNotImplemented());
#else
+ // Similar to distribute_dofs() we need to have a special treatment in
+ // case artificial cells are present.
+ const parallel::shared::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&dof_handler.get_tria ()));
+ Assert(tr != 0, ExcInternalError());
+ typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator
+ cell = dof_handler.get_tria().begin_active(),
+ endc = dof_handler.get_tria().end();
+ std::vector<types::subdomain_id> current_subdomain_ids(tr->n_active_cells());
+ const std::vector<types::subdomain_id> &true_subdomain_ids = tr->get_true_subdomain_ids_of_cells();
+ if (tr->with_artificial_cells())
+ for (unsigned int index=0; cell != endc; cell++, index++)
+ {
+ current_subdomain_ids[index] = cell->subdomain_id();
+ cell->set_subdomain_id(true_subdomain_ids[index]);
+ }
+
std::vector<types::global_dof_index> global_gathered_numbers (dof_handler.n_dofs (), 0);
// as we call DoFRenumbering::subdomain_wise (dof_handler) from distribute_dofs(),
// we need to support sequential-like input.
{
Assert(new_numbers.size() == dof_handler.locally_owned_dofs().n_elements(),
ExcInternalError());
- const parallel::shared::Triangulation<dim, spacedim> *tr =
- (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>*> (&dof_handler.get_tria ()));
- Assert(tr != 0, ExcInternalError());
const unsigned int n_cpu = Utilities::MPI::n_mpi_processes (tr->get_communicator ());
std::vector<types::global_dof_index> gathered_new_numbers (dof_handler.n_dofs (), 0);
Assert(Utilities::MPI::this_mpi_process (tr->get_communicator ()) ==
number_cache.n_locally_owned_dofs =
number_cache.n_locally_owned_dofs_per_processor[dof_handler.get_tria ().locally_owned_subdomain ()];
+
+ // restore artificial cells
+ cell = tr->begin_active();
+ if (tr->with_artificial_cells())
+ for (unsigned int index=0; cell != endc; cell++, index++)
+ cell->set_subdomain_id(current_subdomain_ids[index]);
#endif
}
#include <deal.II/hp/fe_values.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/distributed/tria.h>
-
+#include <deal.II/distributed/shared_tria.h>
#include <algorithm>
#include <numeric>
Assert(dof_handler.n_dofs() > 0,
ExcMessage("Number of DoF is not positive. "
"This could happen when the function is called before NumberCache is written."));
+
+ // In case this function is executed with parallel::shared::Triangulation
+ // with artifical cells, we need to take "true" subdomain IDs (i.e. without
+ // artificial cells). Otherwise we are good to use subdomain_id as stored
+ // in cell->subdomain_id().
+ std::vector<types::subdomain_id> cell_owners (dof_handler.get_tria().n_active_cells());
+ if (const parallel::shared::Triangulation<DH::dimension, DH::space_dimension> *tr =
+ (dynamic_cast<const parallel::shared::Triangulation<DH::dimension, DH::space_dimension>*> (&dof_handler.get_tria ())))
+ {
+ cell_owners = tr->get_true_subdomain_ids_of_cells();
+ }
+ else
+ {
+ for (typename DH::active_cell_iterator cell = dof_handler.begin_active();
+ cell!= dof_handler.end(); cell++)
+ if (cell->is_locally_owned())
+ cell_owners[cell->active_cell_index()] = cell->subdomain_id();
+ }
+
// preset all values by an invalid value
std::fill_n (subdomain_association.begin(), dof_handler.n_dofs(),
numbers::invalid_subdomain_id);
endc = dof_handler.end();
for (; cell!=endc; ++cell)
{
- Assert (cell->is_artificial() == false,
- ExcMessage ("You can't call this function for meshes that "
- "have artificial cells."));
-
- const types::subdomain_id subdomain_id = cell->subdomain_id();
+ const types::subdomain_id subdomain_id = cell_owners[cell->active_cell_index()];
const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell;
local_dof_indices.resize (dofs_per_cell);
cell->get_dof_indices (local_dof_indices);
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: dof_handler_number_cache.cc 31761 2013-11-22 14:42:37Z heister $
+//
+// Copyright (C) 2008 - 2013, 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria with artifical cells
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template<int dim>
+void test()
+{
+ parallel::shared::Triangulation<dim>
+ triangulation (MPI_COMM_WORLD,
+ Triangulation<dim>::none,
+ /*artificial*/true);
+
+ FESystem<dim> fe (FE_Q<dim>(3),2,
+ FE_DGQ<dim>(1),1);
+
+ DoFHandler<dim> dof_handler (triangulation);
+
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global (2);
+
+ const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+ for (unsigned int i=0; i<n_refinements[dim]; ++i)
+ {
+ // refine one-fifth of cells randomly
+ std::vector<bool> flags (triangulation.n_active_cells(), false);
+ for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+ flags[Testing::rand() % flags.size()] = true;
+ // make sure there's at least one that
+ // will be refined
+ flags[0] = true;
+
+ // refine triangulation
+ unsigned int index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ if (flags[index])
+ cell->set_refine_flag();
+ ++index;
+ }
+
+ Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+ // flag all other cells for coarsening
+ // (this should ensure that at least
+ // some of them will actually be
+ // coarsened)
+ index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ if (!flags[index])
+ cell->set_coarsen_flag();
+ ++index;
+ }
+
+ triangulation.execute_coarsening_and_refinement ();
+ dof_handler.distribute_dofs (fe);
+
+ deallog
+ << "n_dofs: " << dof_handler.n_dofs() << std::endl
+ << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+
+ deallog << "n_locally_owned_dofs_per_processor: ";
+ std::vector<types::global_dof_index> v = dof_handler.n_locally_owned_dofs_per_processor();
+ unsigned int sum = 0;
+ for (unsigned int i=0; i<v.size(); ++i)
+ {
+ deallog << v[i] << " ";
+ sum += v[i];
+ }
+ deallog << " sum: " << sum << std::endl;
+
+ Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+ Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+ const unsigned int N = dof_handler.n_dofs();
+
+ Assert (dof_handler.n_locally_owned_dofs() <= N,
+ ExcInternalError());
+ Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+ dof_handler.n_locally_owned_dofs_per_processor().end(),
+ 0U) == N,
+ ExcInternalError());
+
+ IndexSet all (N);
+ for (unsigned int i=0;
+ i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+ {
+ IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+ Assert(intersect.n_elements()==0, ExcInternalError());
+ all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+ }
+
+ Assert(all == complete_index_set(N), ExcInternalError());
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 301
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 301 261 256 sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 578
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 578 615 561 sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 1011
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1011 1091 954 sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 4484
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 4484 4322 4476 sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 13898
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13898 13950 13978 sum: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_locally_owned_dofs: 261
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 301 261 256 sum: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_locally_owned_dofs: 615
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 578 615 561 sum: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:2d::n_locally_owned_dofs: 1091
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 1011 1091 954 sum: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_locally_owned_dofs: 4322
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 4484 4322 4476 sum: 13282
+DEAL:1:3d::n_dofs: 41826
+DEAL:1:3d::n_locally_owned_dofs: 13950
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 13898 13950 13978 sum: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_locally_owned_dofs: 256
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 301 261 256 sum: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_locally_owned_dofs: 561
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 578 615 561 sum: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:2d::n_locally_owned_dofs: 954
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 1011 1091 954 sum: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_locally_owned_dofs: 4476
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 4484 4322 4476 sum: 13282
+DEAL:2:3d::n_dofs: 41826
+DEAL:2:3d::n_locally_owned_dofs: 13978
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 13898 13950 13978 sum: 41826
+
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: dof_handler_number_cache.cc 31761 2013-11-22 14:42:37Z heister $
+//
+// Copyright (C) 2008 - 2013, 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check number cache for shared_tria with artifical cells and renumbering
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/dofs/dof_renumbering.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/intergrid_map.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_dgq.h>
+
+#include <fstream>
+#include <cstdlib>
+#include <numeric>
+
+
+template<int dim>
+void test()
+{
+ parallel::shared::Triangulation<dim>
+ triangulation (MPI_COMM_WORLD,
+ Triangulation<dim>::none,
+ /*artificial*/true);
+
+ FESystem<dim> fe (FE_Q<dim>(3),2,
+ FE_DGQ<dim>(1),1);
+
+ DoFHandler<dim> dof_handler (triangulation);
+
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global (2);
+
+ const unsigned int n_refinements[] = { 0, 4, 3, 2 };
+ for (unsigned int i=0; i<n_refinements[dim]; ++i)
+ {
+ // refine one-fifth of cells randomly
+ std::vector<bool> flags (triangulation.n_active_cells(), false);
+ for (unsigned int k=0; k<flags.size()/5 + 1; ++k)
+ flags[Testing::rand() % flags.size()] = true;
+ // make sure there's at least one that
+ // will be refined
+ flags[0] = true;
+
+ // refine triangulation
+ unsigned int index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ if (flags[index])
+ cell->set_refine_flag();
+ ++index;
+ }
+
+ Assert (index <= triangulation.n_active_cells(), ExcInternalError());
+
+ // flag all other cells for coarsening
+ // (this should ensure that at least
+ // some of them will actually be
+ // coarsened)
+ index=0;
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ {
+ if (!flags[index])
+ cell->set_coarsen_flag();
+ ++index;
+ }
+
+ triangulation.execute_coarsening_and_refinement ();
+ dof_handler.distribute_dofs (fe);
+ DoFRenumbering::component_wise(dof_handler);
+
+ deallog
+ << "n_dofs: " << dof_handler.n_dofs() << std::endl
+ << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl;
+
+ deallog << "n_locally_owned_dofs_per_processor: ";
+ std::vector<types::global_dof_index> v = dof_handler.n_locally_owned_dofs_per_processor();
+ unsigned int sum = 0;
+ for (unsigned int i=0; i<v.size(); ++i)
+ {
+ deallog << v[i] << " ";
+ sum += v[i];
+ }
+ deallog << " sum: " << sum << std::endl;
+
+ Assert(dof_handler.n_locally_owned_dofs() == dof_handler.n_locally_owned_dofs_per_processor()[triangulation.locally_owned_subdomain()], ExcInternalError());
+ Assert( dof_handler.n_locally_owned_dofs() == dof_handler.locally_owned_dofs().n_elements(), ExcInternalError());
+
+ const unsigned int N = dof_handler.n_dofs();
+
+ Assert (dof_handler.n_locally_owned_dofs() <= N,
+ ExcInternalError());
+ Assert (std::accumulate (dof_handler.n_locally_owned_dofs_per_processor().begin(),
+ dof_handler.n_locally_owned_dofs_per_processor().end(),
+ 0U) == N,
+ ExcInternalError());
+
+ IndexSet all (N);
+ for (unsigned int i=0;
+ i<dof_handler.locally_owned_dofs_per_processor().size(); ++i)
+ {
+ IndexSet intersect = all & dof_handler.locally_owned_dofs_per_processor()[i];
+ Assert(intersect.n_elements()==0, ExcInternalError());
+ all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+ }
+
+ Assert(all == complete_index_set(N), ExcInternalError());
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d::n_dofs: 818
+DEAL:0:2d::n_locally_owned_dofs: 289
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:0:2d::n_dofs: 1754
+DEAL:0:2d::n_locally_owned_dofs: 578
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:0:2d::n_dofs: 3056
+DEAL:0:2d::n_locally_owned_dofs: 1023
+DEAL:0:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:0:3d::n_dofs: 13282
+DEAL:0:3d::n_locally_owned_dofs: 4446
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:0:3d::n_dofs: 41826
+DEAL:0:3d::n_locally_owned_dofs: 13862
+DEAL:0:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
+DEAL:1:2d::n_dofs: 818
+DEAL:1:2d::n_locally_owned_dofs: 297
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:1:2d::n_dofs: 1754
+DEAL:1:2d::n_locally_owned_dofs: 588
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:1:2d::n_dofs: 3056
+DEAL:1:2d::n_locally_owned_dofs: 1013
+DEAL:1:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:1:3d::n_dofs: 13282
+DEAL:1:3d::n_locally_owned_dofs: 4386
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:1:3d::n_dofs: 41826
+DEAL:1:3d::n_locally_owned_dofs: 14131
+DEAL:1:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
+
+DEAL:2:2d::n_dofs: 818
+DEAL:2:2d::n_locally_owned_dofs: 232
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818
+DEAL:2:2d::n_dofs: 1754
+DEAL:2:2d::n_locally_owned_dofs: 588
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754
+DEAL:2:2d::n_dofs: 3056
+DEAL:2:2d::n_locally_owned_dofs: 1020
+DEAL:2:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056
+DEAL:2:3d::n_dofs: 13282
+DEAL:2:3d::n_locally_owned_dofs: 4450
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282
+DEAL:2:3d::n_dofs: 41826
+DEAL:2:3d::n_locally_owned_dofs: 13833
+DEAL:2:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826
+
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id: 3d_refinement_01.cc 31349 2013-10-20 19:07:06Z maier $
+//
+// Copyright (C) 2008 - 2013, 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// create a shared tria mesh with artificial cells and refine it
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/numerics/data_out.h>
+
+#include <fstream>
+
+template <int dim, int spacedim>
+void write_mesh (const parallel::shared::Triangulation<dim,spacedim> &tria,
+ const char *filename_)
+{
+ DataOut<dim> data_out;
+ data_out.attach_triangulation (tria);
+ Vector<float> subdomain (tria.n_active_cells());
+ typename parallel::shared::Triangulation<dim>::active_cell_iterator it=tria.begin_active();
+ for (unsigned int i=0; it!=tria.end(); ++it,++i)
+ subdomain(i) = it->subdomain_id();
+
+ data_out.add_data_vector (subdomain, "subdomain");
+
+ data_out.build_patches ();
+ const std::string filename = (filename_ +
+ Utilities::int_to_string
+ (tria.locally_owned_subdomain(), 4));
+ {
+ std::ofstream output ((filename + ".vtu").c_str());
+ data_out.write_vtu (output);
+ }
+}
+
+
+
+template<int dim>
+void test()
+{
+ parallel::shared::Triangulation<dim> tr(MPI_COMM_WORLD,
+ Triangulation<dim>::none,
+ /*artificial*/true);
+
+
+ GridGenerator::hyper_cube(tr);
+ tr.refine_global();
+ tr.begin_active()->set_refine_flag();
+ tr.execute_coarsening_and_refinement ();
+ tr.begin_active()->set_refine_flag();
+ tr.execute_coarsening_and_refinement ();
+
+ deallog
+ << " locally_owned_subdomain(): " << tr.locally_owned_subdomain() << "\n"
+ << " n_active_cells: " << tr.n_active_cells() << "\n"
+ << " n_levels: " << tr.n_levels() << "\n"
+ << " n_global_levels: " << tr.n_global_levels() << "\n"
+ //<< " n_locally_owned_active_cells: " << tr.n_locally_owned_active_cells() << "\n"
+ //<< " n_global_active_cells: " << tr.n_global_active_cells() << "\n"
+ << std::endl;
+
+ /*deallog << "n_locally_owned_active_cells_per_processor: ";
+ std::vector<unsigned int> v = tr.n_locally_owned_active_cells_per_processor();
+ for (unsigned int i=0;i<v.size();++i)
+ deallog << v[i] << " ";
+ deallog << std::endl;*/
+
+ deallog << "subdomains: ";
+ typename parallel::shared::Triangulation<dim>::active_cell_iterator it=tr.begin_active();
+ for (; it!=tr.end(); ++it)
+ {
+ deallog << (int) it->subdomain_id() << " ";
+ }
+ deallog << std::endl;
+
+ const std::string filename = ("mesh_" +
+ Utilities::int_to_string(dim)+
+ "D_");
+ write_mesh(tr, filename.c_str());
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d:: locally_owned_subdomain(): 0
+ n_active_cells: 10
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:2d::subdomains: 1 1 0 0 0 0 2 -2 1 -2
+DEAL:0:3d:: locally_owned_subdomain(): 0
+ n_active_cells: 22
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:0:3d::subdomains: 2 0 2 0 2 0 -2 1 -2 2 -2 1 -2 2 1 1 1 0 1 0 0 0
+
+DEAL:1:2d:: locally_owned_subdomain(): 1
+ n_active_cells: 10
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:1:2d::subdomains: 1 1 -2 0 0 0 2 2 1 2
+DEAL:1:3d:: locally_owned_subdomain(): 1
+ n_active_cells: 22
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:1:3d::subdomains: 2 0 2 0 -2 -2 1 1 2 2 2 1 2 2 1 1 1 0 1 0 0 0
+
+
+DEAL:2:2d:: locally_owned_subdomain(): 2
+ n_active_cells: 10
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:2:2d::subdomains: -2 1 -2 0 -2 0 2 2 1 2
+DEAL:2:3d:: locally_owned_subdomain(): 2
+ n_active_cells: 22
+ n_levels: 3
+ n_global_levels: 3
+
+DEAL:2:3d::subdomains: 2 0 2 0 2 0 1 1 2 2 2 1 2 2 1 -2 1 -2 1 -2 0 -2
+