= (dynamic_cast<const parallel::Triangulation<DoFHandlerType::dimension,DoFHandlerType::space_dimension> *>
(&dof_handler.get_triangulation()) == 0
?
- 1
+ (1+*std::max_element (subdomain_association.begin (),
+ subdomain_association.end ()))
:
Utilities::MPI::n_mpi_processes
(dynamic_cast<const parallel::Triangulation<DoFHandlerType::dimension,DoFHandlerType::space_dimension> *>
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// Test DoFTools::locally_owned_dofs_per_subdomain (for a standard Triangulation)
+
+#include "../tests.h"
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/grid/grid_tools.h>
+
+
+template <int dim>
+void test ()
+{
+ Triangulation<dim> triangulation;
+ GridGenerator::hyper_cube(triangulation, -1.0, 1.0);
+ triangulation.refine_global(1);
+
+ const unsigned int nproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ GridTools::partition_triangulation (nproc,
+ triangulation);
+
+ FE_Q<dim> fe(1);
+ DoFHandler<dim> dof_handler (triangulation);
+ dof_handler.distribute_dofs(fe);
+
+ std::vector<IndexSet> locally_owned_dofs_per_proc = DoFTools::locally_owned_dofs_per_subdomain(dof_handler);
+
+ for (unsigned int p=0; p<nproc; ++p)
+ {
+ deallog << "proc " << p << ": " << std::endl;
+ locally_owned_dofs_per_proc[p].print(deallog.get_file_stream());
+ }
+}
+
+int main (int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
+
+ MPILogInitAll all;
+
+ test<2>();
+}