From: Wolfgang Bangerth Date: Wed, 5 Sep 2018 22:36:19 +0000 (-0600) Subject: Add another test for hp parallel DoFs. X-Git-Tag: v9.1.0-rc1~736^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F7151%2Fhead;p=dealii.git Add another test for hp parallel DoFs. --- diff --git a/tests/mpi/hp_unify_dof_indices_08.cc b/tests/mpi/hp_unify_dof_indices_08.cc new file mode 100644 index 0000000000..dd02c90afe --- /dev/null +++ b/tests/mpi/hp_unify_dof_indices_08.cc @@ -0,0 +1,122 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// Read in a large grid from a file and distribute hp DoFs on it using +// FE_Q elements of different orders on different cells. The +// active_fe_index on each cell is determined in a mostly random way, +// but so that it is the same regardless of the number of processors. +// +// We used to treat hp DoF unification on vertices and faces +// differently depending on whether we are in the interior of a +// subdomain or at a processor boundary. But later versions of the +// code did away with this distinction, and now the total number of +// DoFs, must be the same regardless of the number of subdomains. +// +// This test checks this on a large 2d mesh (~30k cells) and a large +// 3d mesh (~13k cells). + + +#include +#include + +#include + +#include + +#include +#include +#include +#include + +#include +#include + +#include + +#include "../tests.h" + + +template +void +test() +{ + parallel::distributed::Triangulation triangulation( + MPI_COMM_WORLD, Triangulation::limit_level_difference_at_vertices); + + // First, read a complicated mesh + GridIn gi; + gi.attach_triangulation(triangulation); + if (dim == 2) + { + std::ifstream in(SOURCE_DIR + "/../grid/grid_in_02/2d.xda"); // ~29k 2d cells + gi.read_xda(in); + } + else + { + std::ifstream in(SOURCE_DIR "/../grid/grid_in_3d/4.in"); // ~14k 3d cells + gi.read_xda(in); + } + + // Then build a collection of FE_Q objects; to make things a bit + // more interesting, duplicate each element twice in the collection + // so that different active_fe_index values may correspond to + // different FE objects but the same underlying FE + hp::FECollection fe; + for (unsigned int i = 0; i < 2; ++i) + for (unsigned int p = 1; p <= 5; ++p) + fe.push_back(FE_Q(p)); + + + // Then more or less randomly assign elements to cells. We use a + // coarse mesh, so the cell->active_cell_index() is globally unique + // regardless of the number of processors involved, and we can use + // that to build a hash value from it that is then used to assign an + // active_fe_index + hp::DoFHandler dof_handler(triangulation); + for (auto cell : dof_handler.active_cell_iterators()) + if (cell->is_locally_owned()) + cell->set_active_fe_index( + (cell->active_cell_index() + + 13 * cell->active_cell_index() * cell->active_cell_index()) % + fe.size()); + dof_handler.distribute_dofs(fe); + + deallog << "Processor: " << Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) + << std::endl; + deallog << " n_globally_active_cells: " + << triangulation.n_global_active_cells() << std::endl; + deallog << " n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() + << std::endl; + deallog << " n_global_dofs: " << dof_handler.n_dofs() << std::endl; +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + deallog.push("2d"); + test<2>(); + deallog.pop(); + + deallog.push("3d"); + test<3>(); + deallog.pop(); +} diff --git a/tests/mpi/hp_unify_dof_indices_08.mpirun=1.output b/tests/mpi/hp_unify_dof_indices_08.mpirun=1.output new file mode 100644 index 0000000000..250ac01364 --- /dev/null +++ b/tests/mpi/hp_unify_dof_indices_08.mpirun=1.output @@ -0,0 +1,9 @@ + +DEAL:0:2d::Processor: 0 +DEAL:0:2d:: n_globally_active_cells: 29035 +DEAL:0:2d:: n_locally_owned_dofs: 485573 +DEAL:0:2d:: n_global_dofs: 485573 +DEAL:0:3d::Processor: 0 +DEAL:0:3d:: n_globally_active_cells: 13824 +DEAL:0:3d:: n_locally_owned_dofs: 1125219 +DEAL:0:3d:: n_global_dofs: 1125219 diff --git a/tests/mpi/hp_unify_dof_indices_08.mpirun=2.output b/tests/mpi/hp_unify_dof_indices_08.mpirun=2.output new file mode 100644 index 0000000000..a9b832f5a0 --- /dev/null +++ b/tests/mpi/hp_unify_dof_indices_08.mpirun=2.output @@ -0,0 +1,19 @@ + +DEAL:0:2d::Processor: 0 +DEAL:0:2d:: n_globally_active_cells: 29035 +DEAL:0:2d:: n_locally_owned_dofs: 242867 +DEAL:0:2d:: n_global_dofs: 485573 +DEAL:0:3d::Processor: 0 +DEAL:0:3d:: n_globally_active_cells: 13824 +DEAL:0:3d:: n_locally_owned_dofs: 567265 +DEAL:0:3d:: n_global_dofs: 1125219 + +DEAL:1:2d::Processor: 1 +DEAL:1:2d:: n_globally_active_cells: 29035 +DEAL:1:2d:: n_locally_owned_dofs: 242706 +DEAL:1:2d:: n_global_dofs: 485573 +DEAL:1:3d::Processor: 1 +DEAL:1:3d:: n_globally_active_cells: 13824 +DEAL:1:3d:: n_locally_owned_dofs: 557954 +DEAL:1:3d:: n_global_dofs: 1125219 + diff --git a/tests/mpi/hp_unify_dof_indices_08.mpirun=7.output b/tests/mpi/hp_unify_dof_indices_08.mpirun=7.output new file mode 100644 index 0000000000..6f3f92110a --- /dev/null +++ b/tests/mpi/hp_unify_dof_indices_08.mpirun=7.output @@ -0,0 +1,69 @@ + +DEAL:0:2d::Processor: 0 +DEAL:0:2d:: n_globally_active_cells: 29035 +DEAL:0:2d:: n_locally_owned_dofs: 68395 +DEAL:0:2d:: n_global_dofs: 485573 +DEAL:0:3d::Processor: 0 +DEAL:0:3d:: n_globally_active_cells: 13824 +DEAL:0:3d:: n_locally_owned_dofs: 162762 +DEAL:0:3d:: n_global_dofs: 1125219 + +DEAL:1:2d::Processor: 1 +DEAL:1:2d:: n_globally_active_cells: 29035 +DEAL:1:2d:: n_locally_owned_dofs: 72283 +DEAL:1:2d:: n_global_dofs: 485573 +DEAL:1:3d::Processor: 1 +DEAL:1:3d:: n_globally_active_cells: 13824 +DEAL:1:3d:: n_locally_owned_dofs: 162413 +DEAL:1:3d:: n_global_dofs: 1125219 + + +DEAL:2:2d::Processor: 2 +DEAL:2:2d:: n_globally_active_cells: 29035 +DEAL:2:2d:: n_locally_owned_dofs: 67839 +DEAL:2:2d:: n_global_dofs: 485573 +DEAL:2:3d::Processor: 2 +DEAL:2:3d:: n_globally_active_cells: 13824 +DEAL:2:3d:: n_locally_owned_dofs: 161549 +DEAL:2:3d:: n_global_dofs: 1125219 + + +DEAL:3:2d::Processor: 3 +DEAL:3:2d:: n_globally_active_cells: 29035 +DEAL:3:2d:: n_locally_owned_dofs: 69223 +DEAL:3:2d:: n_global_dofs: 485573 +DEAL:3:3d::Processor: 3 +DEAL:3:3d:: n_globally_active_cells: 13824 +DEAL:3:3d:: n_locally_owned_dofs: 160833 +DEAL:3:3d:: n_global_dofs: 1125219 + + +DEAL:4:2d::Processor: 4 +DEAL:4:2d:: n_globally_active_cells: 29035 +DEAL:4:2d:: n_locally_owned_dofs: 69483 +DEAL:4:2d:: n_global_dofs: 485573 +DEAL:4:3d::Processor: 4 +DEAL:4:3d:: n_globally_active_cells: 13824 +DEAL:4:3d:: n_locally_owned_dofs: 161477 +DEAL:4:3d:: n_global_dofs: 1125219 + + +DEAL:5:2d::Processor: 5 +DEAL:5:2d:: n_globally_active_cells: 29035 +DEAL:5:2d:: n_locally_owned_dofs: 70928 +DEAL:5:2d:: n_global_dofs: 485573 +DEAL:5:3d::Processor: 5 +DEAL:5:3d:: n_globally_active_cells: 13824 +DEAL:5:3d:: n_locally_owned_dofs: 159501 +DEAL:5:3d:: n_global_dofs: 1125219 + + +DEAL:6:2d::Processor: 6 +DEAL:6:2d:: n_globally_active_cells: 29035 +DEAL:6:2d:: n_locally_owned_dofs: 67422 +DEAL:6:2d:: n_global_dofs: 485573 +DEAL:6:3d::Processor: 6 +DEAL:6:3d:: n_globally_active_cells: 13824 +DEAL:6:3d:: n_locally_owned_dofs: 156684 +DEAL:6:3d:: n_global_dofs: 1125219 +