--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// just create a 16x16 coarse mesh, refine it once, and partition it
+//
+// this test does not use any cell weights, but the following ones will
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::subdivided_hyper_cube(tr, 16);
+ tr.refine_global(1);
+
+ // repartition the mesh; since no weights are attached here, the
+ // function will partition the mesh so that the number of cells is
+ // (roughly) equal between all processors
+ tr.repartition ();
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << tr.n_locally_owned_active_cells_per_processor ()[p]
+ << " locally owned active cells"
+ << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+}
--- /dev/null
+
+DEAL:2d::processor 0: 256 locally owned active cells
+DEAL:2d::processor 1: 256 locally owned active cells
+DEAL:2d::processor 2: 256 locally owned active cells
+DEAL:2d::processor 3: 256 locally owned active cells
+DEAL:3d::processor 0: 8192 locally owned active cells
+DEAL:3d::processor 1: 8192 locally owned active cells
+DEAL:3d::processor 2: 8192 locally owned active cells
+DEAL:3d::processor 3: 8192 locally owned active cells
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// just create a 16x16 coarse mesh, refine it once, and partition it
+//
+// like _01, but first partition it with one sets of weights, and then
+// partition it again with all equal weights. this should yield the
+// same mesh as if there had been no weights at all to begin with
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::subdivided_hyper_cube(tr, 16);
+ tr.refine_global(1);
+
+ // repartition the mesh as described above, first in some arbitrary
+ // way, and then with all equal weights
+ {
+ std::vector<unsigned int> weights (tr.n_active_cells());
+ for (unsigned int i=0; i<weights.size(); ++i)
+ weights[i] = i+1;
+ tr.repartition (weights);
+ }
+ {
+ std::vector<unsigned int> weights (tr.n_active_cells());
+ for (unsigned int i=0; i<weights.size(); ++i)
+ weights[i] = 1;
+ tr.repartition (weights);
+ }
+
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << tr.n_locally_owned_active_cells_per_processor ()[p]
+ << " locally owned active cells"
+ << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+}
--- /dev/null
+
+DEAL:2d::processor 0: 256 locally owned active cells
+DEAL:2d::processor 1: 256 locally owned active cells
+DEAL:2d::processor 2: 256 locally owned active cells
+DEAL:2d::processor 3: 256 locally owned active cells
+DEAL:3d::processor 0: 8192 locally owned active cells
+DEAL:3d::processor 1: 8192 locally owned active cells
+DEAL:3d::processor 2: 8192 locally owned active cells
+DEAL:3d::processor 3: 8192 locally owned active cells
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// just create a 16x16 coarse mesh, refine it once, and partition it
+//
+// like _01_cell_weights_01, but upon the second repartitioning,
+// simply don't attach any weights at all
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::subdivided_hyper_cube(tr, 16);
+ tr.refine_global(1);
+
+ // repartition the mesh as described above, first in some arbitrary
+ // way, and then with no weights
+ {
+ std::vector<unsigned int> weights (tr.n_active_cells());
+ for (unsigned int i=0; i<weights.size(); ++i)
+ weights[i] = i+1;
+ tr.repartition (weights);
+ }
+ {
+ tr.repartition ();
+ }
+
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << tr.n_locally_owned_active_cells_per_processor ()[p]
+ << " locally owned active cells"
+ << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+}
--- /dev/null
+
+DEAL:2d::processor 0: 256 locally owned active cells
+DEAL:2d::processor 1: 256 locally owned active cells
+DEAL:2d::processor 2: 256 locally owned active cells
+DEAL:2d::processor 3: 256 locally owned active cells
+DEAL:3d::processor 0: 8192 locally owned active cells
+DEAL:3d::processor 1: 8192 locally owned active cells
+DEAL:3d::processor 2: 8192 locally owned active cells
+DEAL:3d::processor 3: 8192 locally owned active cells
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// just create a 16x16 coarse mesh, refine it once, and partition it
+//
+// this test uses the same weight on every cell, so partitioning
+// should still be equal
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::subdivided_hyper_cube(tr, 16);
+ tr.refine_global(1);
+
+ // repartition the mesh; attach equal weights to all cells
+ const std::vector<unsigned int> weights (tr.n_active_cells(), 100U);
+ tr.repartition (weights);
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << tr.n_locally_owned_active_cells_per_processor ()[p]
+ << " locally owned active cells"
+ << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+}
--- /dev/null
+
+DEAL:2d::processor 0: 256 locally owned active cells
+DEAL:2d::processor 1: 256 locally owned active cells
+DEAL:2d::processor 2: 256 locally owned active cells
+DEAL:2d::processor 3: 256 locally owned active cells
+DEAL:3d::processor 0: 8192 locally owned active cells
+DEAL:3d::processor 1: 8192 locally owned active cells
+DEAL:3d::processor 2: 8192 locally owned active cells
+DEAL:3d::processor 3: 8192 locally owned active cells
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// just create a 16x16 coarse mesh, refine it once, and partition it
+//
+// this test uses the different weights on cells, so partitioning
+// should yield non-equal mesh sizes
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::subdivided_hyper_cube(tr, 16);
+ tr.refine_global(1);
+
+ // repartition the mesh; attach different weights to all cells
+ std::vector<unsigned int> weights (tr.n_active_cells());
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active(); cell != tr.end(); ++cell)
+ weights[cell->active_cell_index()]
+ = (cell->center()[0] < 0.5
+ ||
+ cell->center()[1] < 0.5
+ ?
+ 1
+ :
+ 4);
+ tr.repartition (weights);
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << tr.n_locally_owned_active_cells_per_processor ()[p]
+ << " locally owned active cells"
+ << std::endl;
+
+ // let each processor sum up its weights
+ std::vector<double> integrated_weights (numproc, 0.0);
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active(); cell != tr.end(); ++cell)
+ if (cell->is_locally_owned())
+ integrated_weights[myid]
+ += (cell->center()[0] < 0.5
+ ||
+ cell->center()[1] < 0.5
+ ?
+ 1
+ :
+ 4);
+ Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights);
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << integrated_weights[p]
+ << " weight"
+ << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+}
--- /dev/null
+
+DEAL:2d::processor 0: 360 locally owned active cells
+DEAL:2d::processor 1: 356 locally owned active cells
+DEAL:2d::processor 2: 128 locally owned active cells
+DEAL:2d::processor 3: 92 locally owned active cells
+DEAL:2d::processor 4: 88 locally owned active cells
+DEAL:2d::processor 0: 360.000 weight
+DEAL:2d::processor 1: 356.000 weight
+DEAL:2d::processor 2: 356.000 weight
+DEAL:2d::processor 3: 368.000 weight
+DEAL:2d::processor 4: 352.000 weight
+DEAL:3d::processor 0: 11472 locally owned active cells
+DEAL:3d::processor 1: 3480 locally owned active cells
+DEAL:3d::processor 2: 7168 locally owned active cells
+DEAL:3d::processor 3: 7784 locally owned active cells
+DEAL:3d::processor 4: 2864 locally owned active cells
+DEAL:3d::processor 0: 11472.0 weight
+DEAL:3d::processor 1: 11472.0 weight
+DEAL:3d::processor 2: 11464.0 weight
+DEAL:3d::processor 3: 11480.0 weight
+DEAL:3d::processor 4: 11456.0 weight
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// just create a 16x16 coarse mesh, refine it once, and partition it
+//
+// like _03, but with a larger spread of weights
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ GridGenerator::subdivided_hyper_cube(tr, 16);
+ tr.refine_global(1);
+
+ // repartition the mesh; attach different weights to all cells
+ std::vector<unsigned int> weights (tr.n_active_cells());
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active(); cell != tr.end(); ++cell)
+ weights[cell->active_cell_index()]
+ = (cell->center()[0] < 0.5
+ ||
+ cell->center()[1] < 0.5
+ ?
+ 1
+ :
+ 1000);
+ tr.repartition (weights);
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << tr.n_locally_owned_active_cells_per_processor ()[p]
+ << " locally owned active cells"
+ << std::endl;
+
+ // let each processor sum up its weights
+ std::vector<double> integrated_weights (numproc, 0.0);
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active(); cell != tr.end(); ++cell)
+ if (cell->is_locally_owned())
+ integrated_weights[myid]
+ += (cell->center()[0] < 0.5
+ ||
+ cell->center()[1] < 0.5
+ ?
+ 1
+ :
+ 1000);
+ Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights);
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << integrated_weights[p]
+ << " weight"
+ << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+}
--- /dev/null
+
+DEAL:2d::processor 0: 820 locally owned active cells
+DEAL:2d::processor 1: 52 locally owned active cells
+DEAL:2d::processor 2: 52 locally owned active cells
+DEAL:2d::processor 3: 48 locally owned active cells
+DEAL:2d::processor 4: 52 locally owned active cells
+DEAL:2d::processor 0: 52768.0 weight
+DEAL:2d::processor 1: 52000.0 weight
+DEAL:2d::processor 2: 52000.0 weight
+DEAL:2d::processor 3: 48000.0 weight
+DEAL:2d::processor 4: 52000.0 weight
+DEAL:3d::processor 0: 13920 locally owned active cells
+DEAL:3d::processor 1: 1640 locally owned active cells
+DEAL:3d::processor 2: 13920 locally owned active cells
+DEAL:3d::processor 3: 1648 locally owned active cells
+DEAL:3d::processor 4: 1640 locally owned active cells
+DEAL:3d::processor 0: 1.64429e+06 weight
+DEAL:3d::processor 1: 1.64000e+06 weight
+DEAL:3d::processor 2: 1.64429e+06 weight
+DEAL:3d::processor 3: 1.64800e+06 weight
+DEAL:3d::processor 4: 1.64000e+06 weight
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// just create a 16x16 coarse mesh, and partition it
+//
+// like _03, but with a spread of weights that ensures that one
+// processor only should carry one cell. we do this by giving all
+// cells a weight of one, except for one cell that has a weight equal
+// to the total number of cells in the mesh (i.e., a weight one larger
+// than that of all other cells together).
+//
+// p4est does not seem to partition this correctly right now. it
+// should have one processor that stores exactly one cell, and the
+// remaining ones should be split equally among the other processors;
+// at present, however, the remaining processors do not split the
+// remaining cells even close to equally
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ // create a 16x16 or 16x16x16 mesh where each cell has size 1x1 ir 1x1x1
+ GridGenerator::subdivided_hyper_cube(tr, 16, 0, 16);
+
+
+ // repartition the mesh; attach different weights to all cells
+ std::vector<unsigned int> weights (tr.n_active_cells());
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active(); cell != tr.end(); ++cell)
+ weights[cell->active_cell_index()]
+ = (
+ // bottom left corner
+ (cell->center()[0] < 1)
+ &&
+ (cell->center()[1] < 1)
+ &&
+ (dim == 3 ?
+ (cell->center()[2] < 1) :
+ true)
+ ?
+ // one cell has more weight than all others together
+ tr.n_global_active_cells()
+ :
+ 1);
+ tr.repartition (weights);
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << tr.n_locally_owned_active_cells_per_processor ()[p]
+ << " locally owned active cells"
+ << std::endl;
+
+ // let each processor sum up its weights
+ std::vector<double> integrated_weights (numproc, 0.0);
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active(); cell != tr.end(); ++cell)
+ if (cell->is_locally_owned())
+ integrated_weights[myid]
+ += (
+ // bottom left corner
+ (cell->center()[0] < 1)
+ &&
+ (cell->center()[1] < 1)
+ &&
+ (dim == 3 ?
+ (cell->center()[2] < 1) :
+ true)
+ ?
+ tr.n_global_active_cells()
+ :
+ 1);
+ Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights);
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << integrated_weights[p]
+ << " weight"
+ << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+}
--- /dev/null
+
+DEAL:2d::processor 0: 1 locally owned active cells
+DEAL:2d::processor 1: 84 locally owned active cells
+DEAL:2d::processor 2: 171 locally owned active cells
+DEAL:2d::processor 0: 256.000 weight
+DEAL:2d::processor 1: 84.0000 weight
+DEAL:2d::processor 2: 171.000 weight
+DEAL:3d::processor 0: 1 locally owned active cells
+DEAL:3d::processor 1: 1364 locally owned active cells
+DEAL:3d::processor 2: 2731 locally owned active cells
+DEAL:3d::processor 0: 4096.00 weight
+DEAL:3d::processor 1: 1364.00 weight
+DEAL:3d::processor 2: 2731.00 weight
--- /dev/null
+
+DEAL:2d::processor 0: 1 locally owned active cells
+DEAL:2d::processor 1: 0 locally owned active cells
+DEAL:2d::processor 2: 50 locally owned active cells
+DEAL:2d::processor 3: 102 locally owned active cells
+DEAL:2d::processor 4: 103 locally owned active cells
+DEAL:2d::processor 0: 256.000 weight
+DEAL:2d::processor 1: 0 weight
+DEAL:2d::processor 2: 50.0000 weight
+DEAL:2d::processor 3: 102.000 weight
+DEAL:2d::processor 4: 103.000 weight
+DEAL:3d::processor 0: 1 locally owned active cells
+DEAL:3d::processor 1: 0 locally owned active cells
+DEAL:3d::processor 2: 818 locally owned active cells
+DEAL:3d::processor 3: 1638 locally owned active cells
+DEAL:3d::processor 4: 1639 locally owned active cells
+DEAL:3d::processor 0: 4096.00 weight
+DEAL:3d::processor 1: 0 weight
+DEAL:3d::processor 2: 818.000 weight
+DEAL:3d::processor 3: 1638.00 weight
+DEAL:3d::processor 4: 1639.00 weight
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// just create a 16x16 coarse mesh, and partition it
+//
+// like _05, but create the 16x16 mesh by starting with an 8x8 mesh
+// and refining it once. p4est then can't store just one cell on that
+// one processor to make sure that local coarsening can work -- i.e.,
+// it needs to store all 4 siblings on one processor, and partition
+// the rest equally on all other processors
+//
+// this also doesn't work correctly right now: with 3 processors, it
+// stores 0/84/172 cells in 2d, rather than the expected 4/126/126
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+template<int dim>
+void test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ parallel::distributed::Triangulation<dim> tr(MPI_COMM_WORLD,
+ dealii::Triangulation<dim>::none,
+ parallel::distributed::Triangulation<dim>::no_automatic_repartitioning);
+
+ // create a 16x16 or 16x16x16 mesh where each cell has size 1x1 ir 1x1x1
+ GridGenerator::subdivided_hyper_cube(tr, 8, 0, 16);
+ tr.refine_global (1);
+
+ // repartition the mesh; attach different weights to all cells
+ std::vector<unsigned int> weights (tr.n_active_cells());
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active(); cell != tr.end(); ++cell)
+ weights[cell->active_cell_index()]
+ = (
+ // bottom left corner
+ (cell->center()[0] < 1)
+ &&
+ (cell->center()[1] < 1)
+ &&
+ (dim == 3 ?
+ (cell->center()[2] < 1) :
+ true)
+ ?
+ // one cell has more weight than all others together
+ tr.n_global_active_cells()
+ :
+ 1);
+ tr.repartition (weights);
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << tr.n_locally_owned_active_cells_per_processor ()[p]
+ << " locally owned active cells"
+ << std::endl;
+
+ // let each processor sum up its weights
+ std::vector<double> integrated_weights (numproc, 0.0);
+ for (typename Triangulation<dim>::active_cell_iterator
+ cell = tr.begin_active(); cell != tr.end(); ++cell)
+ if (cell->is_locally_owned())
+ integrated_weights[myid]
+ += (
+ // bottom left corner
+ (cell->center()[0] < 1)
+ &&
+ (cell->center()[1] < 1)
+ &&
+ (dim == 3 ?
+ (cell->center()[2] < 1) :
+ true)
+ ?
+ tr.n_global_active_cells()
+ :
+ 1);
+ Utilities::MPI::sum (integrated_weights, MPI_COMM_WORLD, integrated_weights);
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ for (unsigned int p=0; p<numproc; ++p)
+ deallog << "processor " << p
+ << ": "
+ << integrated_weights[p]
+ << " weight"
+ << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("2d");
+ test<2>();
+ deallog.pop();
+ deallog.push("3d");
+ test<3>();
+ deallog.pop();
+ }
+ else
+ {
+ test<2>();
+ test<3>();
+ }
+}
--- /dev/null
+
+DEAL:2d::processor 0: 0 locally owned active cells
+DEAL:2d::processor 1: 84 locally owned active cells
+DEAL:2d::processor 2: 172 locally owned active cells
+DEAL:2d::processor 0: 0 weight
+DEAL:2d::processor 1: 339.000 weight
+DEAL:2d::processor 2: 172.000 weight
+DEAL:3d::processor 0: 0 locally owned active cells
+DEAL:3d::processor 1: 1368 locally owned active cells
+DEAL:3d::processor 2: 2728 locally owned active cells
+DEAL:3d::processor 0: 0 weight
+DEAL:3d::processor 1: 5463.00 weight
+DEAL:3d::processor 2: 2728.00 weight