From: Jean-Paul Pelteret Date: Tue, 3 Jul 2018 14:37:23 +0000 (+0200) Subject: Add some MPI based tests for SolutionTransfer class X-Git-Tag: v9.1.0-rc1~931^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=62636f8a7322565527d013fa731a7de8616d4b99;p=dealii.git Add some MPI based tests for SolutionTransfer class These new tests check the transfer of BlockVectors, augmenting the previously added tests for standard Vectors. --- diff --git a/tests/hp/solution_transfer_15.cc b/tests/hp/solution_transfer_15.cc new file mode 100644 index 0000000000..99d5458f40 --- /dev/null +++ b/tests/hp/solution_transfer_15.cc @@ -0,0 +1,185 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// Test to check if SolutionTransfer works in parallel for block vectors. +// This tests is based off of hp/solution_transfer_14.cc and +// mpi/solution_transfer_03.cc. + +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include +#include + +#include "../tests.h" + + +template +void +initialize_indexsets(IndexSet & locally_owned_dofs, + IndexSet & locally_relevant_dofs, + std::vector &locally_owned_partitioning, + std::vector &locally_relevant_partitioning, + const DH & dof_handler, + const std::vector &block_component, + const unsigned int this_mpi_process) +{ + locally_owned_dofs = + DoFTools::locally_owned_dofs_per_subdomain(dof_handler)[this_mpi_process]; + locally_relevant_dofs = DoFTools::locally_relevant_dofs_per_subdomain( + dof_handler)[this_mpi_process]; + + const unsigned int n_blocks = block_component.size(); + std::vector dofs_per_block(n_blocks); + DoFTools::count_dofs_per_block(dof_handler, dofs_per_block, block_component); + + locally_owned_partitioning.clear(); + locally_relevant_partitioning.clear(); + locally_owned_partitioning.reserve(n_blocks); + locally_relevant_partitioning.reserve(n_blocks); + + for (unsigned int b = 0; b < n_blocks; ++b) + { + const types::global_dof_index idx_begin = + std::accumulate(dofs_per_block.begin(), + std::next(dofs_per_block.begin(), b), + 0); + const types::global_dof_index idx_end = + std::accumulate(dofs_per_block.begin(), + std::next(dofs_per_block.begin(), b + 1), + 0); + locally_owned_partitioning.push_back( + locally_owned_dofs.get_view(idx_begin, idx_end)); + locally_relevant_partitioning.push_back( + locally_relevant_dofs.get_view(idx_begin, idx_end)); + } +} + + +template +void +transfer(const MPI_Comm &mpi_communicator) +{ + const unsigned int this_mpi_process = + Utilities::MPI::this_mpi_process(mpi_communicator); + + Triangulation tria; + GridGenerator::hyper_cube(tria); + tria.refine_global(1); + GridTools::partition_triangulation( + Utilities::MPI::n_mpi_processes(mpi_communicator), tria); + + hp::FECollection fe; + fe.push_back(FESystem(FE_Q(1), 1, FE_Q(1), 1)); + fe.push_back(FESystem(FE_Q(2), 1, FE_Q(2), 1)); + const std::vector block_component({0, 1}); + + hp::DoFHandler dof_handler(tria); + dof_handler.begin(0)->child(0)->set_active_fe_index(1); + + TrilinosWrappers::MPI::BlockVector solution; + + dof_handler.distribute_dofs(fe); + DoFRenumbering::component_wise(dof_handler, block_component); + + IndexSet locally_owned_dofs, locally_relevant_dofs; + std::vector locally_owned_partitioning, + locally_relevant_partitioning; + initialize_indexsets(locally_owned_dofs, + locally_relevant_dofs, + locally_owned_partitioning, + locally_relevant_partitioning, + dof_handler, + block_component, + this_mpi_process); + + solution.reinit(locally_owned_partitioning, mpi_communicator); + + for (unsigned int i = 0; i < solution.size(); ++i) + if (locally_owned_dofs.is_element(i)) + solution(i) = i; + + SolutionTransfer> + soltrans(dof_handler); + + typename Triangulation::active_cell_iterator cell = tria.begin_active(), + endc = tria.end(); + ++cell; + ++cell; + for (; cell != endc; ++cell) + cell->set_refine_flag(); + + TrilinosWrappers::MPI::BlockVector old_solution; + old_solution.reinit(locally_owned_partitioning, + locally_relevant_partitioning, + mpi_communicator); + old_solution = solution; + + tria.prepare_coarsening_and_refinement(); + soltrans.prepare_for_pure_refinement(); + tria.execute_coarsening_and_refinement(); + + dof_handler.distribute_dofs(fe); + DoFRenumbering::component_wise(dof_handler, block_component); + + initialize_indexsets(locally_owned_dofs, + locally_relevant_dofs, + locally_owned_partitioning, + locally_relevant_partitioning, + dof_handler, + block_component, + this_mpi_process); + + solution.reinit(locally_owned_partitioning, mpi_communicator); + soltrans.refine_interpolate(old_solution, solution); +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + const MPI_Comm & mpi_communicator = MPI_COMM_WORLD; + + deallog << " 1D solution transfer" << std::endl; + transfer<1>(mpi_communicator); + + deallog << " 2D solution transfer" << std::endl; + transfer<2>(mpi_communicator); + + deallog << " 3D solution transfer" << std::endl; + transfer<3>(mpi_communicator); +} diff --git a/tests/hp/solution_transfer_15.with_trilinos=true.mpirun=2.output b/tests/hp/solution_transfer_15.with_trilinos=true.mpirun=2.output new file mode 100644 index 0000000000..d3343bcbd3 --- /dev/null +++ b/tests/hp/solution_transfer_15.with_trilinos=true.mpirun=2.output @@ -0,0 +1,9 @@ + +DEAL:0:: 1D solution transfer +DEAL:0:: 2D solution transfer +DEAL:0:: 3D solution transfer + +DEAL:1:: 1D solution transfer +DEAL:1:: 2D solution transfer +DEAL:1:: 3D solution transfer + diff --git a/tests/hp/solution_transfer_15.with_trilinos=true.output b/tests/hp/solution_transfer_15.with_trilinos=true.output new file mode 100644 index 0000000000..9764d32d2a --- /dev/null +++ b/tests/hp/solution_transfer_15.with_trilinos=true.output @@ -0,0 +1,4 @@ + +DEAL:0:: 1D solution transfer +DEAL:0:: 2D solution transfer +DEAL:0:: 3D solution transfer diff --git a/tests/mpi/solution_transfer_03.cc b/tests/mpi/solution_transfer_03.cc new file mode 100644 index 0000000000..79cd90e085 --- /dev/null +++ b/tests/mpi/solution_transfer_03.cc @@ -0,0 +1,180 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// Test to check if SolutionTransfer works in parallel for block vectors. +// This tests is based off of mpi/solution_transfer_02.cc + +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include + +#include "../tests.h" + + +template +void +initialize_indexsets(IndexSet & locally_owned_dofs, + IndexSet & locally_relevant_dofs, + std::vector &locally_owned_partitioning, + std::vector &locally_relevant_partitioning, + const DH & dof_handler, + const std::vector &block_component, + const unsigned int this_mpi_process) +{ + locally_owned_dofs = + DoFTools::locally_owned_dofs_per_subdomain(dof_handler)[this_mpi_process]; + locally_relevant_dofs = DoFTools::locally_relevant_dofs_per_subdomain( + dof_handler)[this_mpi_process]; + + const unsigned int n_blocks = block_component.size(); + std::vector dofs_per_block(n_blocks); + DoFTools::count_dofs_per_block(dof_handler, dofs_per_block, block_component); + + locally_owned_partitioning.clear(); + locally_relevant_partitioning.clear(); + locally_owned_partitioning.reserve(n_blocks); + locally_relevant_partitioning.reserve(n_blocks); + + for (unsigned int b = 0; b < n_blocks; ++b) + { + const types::global_dof_index idx_begin = + std::accumulate(dofs_per_block.begin(), + std::next(dofs_per_block.begin(), b), + 0); + const types::global_dof_index idx_end = + std::accumulate(dofs_per_block.begin(), + std::next(dofs_per_block.begin(), b + 1), + 0); + locally_owned_partitioning.push_back( + locally_owned_dofs.get_view(idx_begin, idx_end)); + locally_relevant_partitioning.push_back( + locally_relevant_dofs.get_view(idx_begin, idx_end)); + } +} + + +template +void +transfer(const MPI_Comm &mpi_communicator) +{ + const unsigned int this_mpi_process = + Utilities::MPI::this_mpi_process(mpi_communicator); + + Triangulation tria; + GridGenerator::hyper_cube(tria); + tria.refine_global(1); + GridTools::partition_triangulation( + Utilities::MPI::n_mpi_processes(mpi_communicator), tria); + + const FESystem fe(FE_Q(2), 1, FE_Q(1), 1); + const std::vector block_component({0, 1}); + + DoFHandler dof_handler(tria); + + TrilinosWrappers::MPI::BlockVector solution; + + dof_handler.distribute_dofs(fe); + DoFRenumbering::component_wise(dof_handler, block_component); + + IndexSet locally_owned_dofs, locally_relevant_dofs; + std::vector locally_owned_partitioning, + locally_relevant_partitioning; + initialize_indexsets(locally_owned_dofs, + locally_relevant_dofs, + locally_owned_partitioning, + locally_relevant_partitioning, + dof_handler, + block_component, + this_mpi_process); + + solution.reinit(locally_owned_partitioning, mpi_communicator); + + for (unsigned int i = 0; i < solution.size(); ++i) + if (locally_owned_dofs.is_element(i)) + solution(i) = i; + + SolutionTransfer> + soltrans(dof_handler); + + typename Triangulation::active_cell_iterator cell = tria.begin_active(), + endc = tria.end(); + ++cell; + ++cell; + for (; cell != endc; ++cell) + cell->set_refine_flag(); + + TrilinosWrappers::MPI::BlockVector old_solution; + old_solution.reinit(locally_owned_partitioning, + locally_relevant_partitioning, + mpi_communicator); + old_solution = solution; + + tria.prepare_coarsening_and_refinement(); + soltrans.prepare_for_pure_refinement(); + tria.execute_coarsening_and_refinement(); + + dof_handler.distribute_dofs(fe); + DoFRenumbering::component_wise(dof_handler, block_component); + + initialize_indexsets(locally_owned_dofs, + locally_relevant_dofs, + locally_owned_partitioning, + locally_relevant_partitioning, + dof_handler, + block_component, + this_mpi_process); + + solution.reinit(locally_owned_partitioning, mpi_communicator); + soltrans.refine_interpolate(old_solution, solution); +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + const MPI_Comm & mpi_communicator = MPI_COMM_WORLD; + + deallog << " 1D solution transfer" << std::endl; + transfer<1>(mpi_communicator); + + deallog << " 2D solution transfer" << std::endl; + transfer<2>(mpi_communicator); + + deallog << " 3D solution transfer" << std::endl; + transfer<3>(mpi_communicator); +} diff --git a/tests/mpi/solution_transfer_03.with_trilinos=true.mpirun=2.output b/tests/mpi/solution_transfer_03.with_trilinos=true.mpirun=2.output new file mode 100644 index 0000000000..d3343bcbd3 --- /dev/null +++ b/tests/mpi/solution_transfer_03.with_trilinos=true.mpirun=2.output @@ -0,0 +1,9 @@ + +DEAL:0:: 1D solution transfer +DEAL:0:: 2D solution transfer +DEAL:0:: 3D solution transfer + +DEAL:1:: 1D solution transfer +DEAL:1:: 2D solution transfer +DEAL:1:: 3D solution transfer + diff --git a/tests/mpi/solution_transfer_03.with_trilinos=true.output b/tests/mpi/solution_transfer_03.with_trilinos=true.output new file mode 100644 index 0000000000..9764d32d2a --- /dev/null +++ b/tests/mpi/solution_transfer_03.with_trilinos=true.output @@ -0,0 +1,4 @@ + +DEAL:0:: 1D solution transfer +DEAL:0:: 2D solution transfer +DEAL:0:: 3D solution transfer