From 881e97e1a440ca834a4c0d7cfa86dba4c4a65a42 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Tue, 5 Feb 2019 19:06:12 +0100 Subject: [PATCH] Fix initialization of LA::d::Vector in multigrid. --- .../deal.II/multigrid/mg_transfer.templates.h | 59 ++- .../parallel_multigrid_adaptive_08.cc | 423 ++++++++++++++++++ ...4est=true.with_lapack=true.mpirun=3.output | 47 ++ 3 files changed, 498 insertions(+), 31 deletions(-) create mode 100644 tests/matrix_free/parallel_multigrid_adaptive_08.cc create mode 100644 tests/matrix_free/parallel_multigrid_adaptive_08.with_mpi=true.with_p4est=true.with_lapack=true.mpirun=3.output diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index 439f2dda30..c207fe593e 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -104,34 +104,6 @@ namespace internal } } - /** - * Adjust vectors on all levels to correct size. Here, we just count the - * numbers of degrees of freedom on each level and @p reinit each level - * vector to this length. - */ - template - void - reinit_vector(const dealii::DoFHandler &mg_dof, - const std::vector &, - MGLevelObject> &v) - { - const parallel::Triangulation *tria = - (dynamic_cast *>( - &mg_dof.get_triangulation())); - - for (unsigned int level = v.min_level(); level <= v.max_level(); ++level) - { - if (v[level].size() != mg_dof.locally_owned_mg_dofs(level).size() || - v[level].local_size() != - mg_dof.locally_owned_mg_dofs(level).n_elements()) - v[level].reinit(mg_dof.locally_owned_mg_dofs(level), - tria != nullptr ? tria->get_communicator() : - MPI_COMM_SELF); - else - v[level] = 0.; - } - } - #ifdef DEAL_II_WITH_TRILINOS /** @@ -453,12 +425,37 @@ MGLevelGlobalTransfer>::copy_to_mg( solution_transfer ? solution_copy_indices_level_mine : copy_indices_level_mine; + (void)mg_dof_handler; + AssertIndexRange(dst.max_level(), mg_dof_handler.get_triangulation().n_global_levels()); AssertIndexRange(dst.min_level(), dst.max_level() + 1); - internal::MGTransfer::reinit_vector(mg_dof_handler, - component_to_block_map, - dst); + + for (unsigned int level = dst.min_level(); level <= dst.max_level(); ++level) + if (dst[level].size() != mg_dof_handler.n_dofs(level) || + dst[level].local_size() != + mg_dof_handler.locally_owned_mg_dofs(level).n_elements()) + { + // In case a ghosted level vector has been initialized, we can simply + // use that as a template for the vector partitioning. If not, we + // resort to the locally owned range of the dof handler. + if (level <= ghosted_level_vector.max_level() && + ghosted_level_vector[level].size() == mg_dof_handler.n_dofs(level)) + dst[level].reinit(ghosted_level_vector[level], false); + else + { + const parallel::Triangulation *tria = + (dynamic_cast *>( + &mg_dof_handler.get_triangulation())); + dst[level].reinit(mg_dof_handler.locally_owned_mg_dofs(level), + tria != nullptr ? tria->get_communicator() : + MPI_COMM_SELF); + } + } + else if ((perform_plain_copy == false && + perform_renumbered_plain_copy == false) || + level != dst.max_level()) + dst[level] = 0; if (perform_plain_copy) { diff --git a/tests/matrix_free/parallel_multigrid_adaptive_08.cc b/tests/matrix_free/parallel_multigrid_adaptive_08.cc new file mode 100644 index 0000000000..4fb6c2b0c4 --- /dev/null +++ b/tests/matrix_free/parallel_multigrid_adaptive_08.cc @@ -0,0 +1,423 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 - 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// similar to parallel_multigrid_adaptive_02 but without using a separate +// transfer class that builds the appropriate vectors. Furthermore, we want to +// avoid setting some vectors to zero in the cell loop. Rather, +// MatrixFreeOperators::LaplaceOperator::adjust_ghost_range_if_necessary() +// will do that - or rather a variant of that, given that we want to modify +// the cell loop of the LaplaceOperator class and provide our own. This forces +// us to reimplement a few things, but all ideas are the same as in the +// matrix-free operators. + +#include + +#include + +#include + +#include +#include + +#include +#include + +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include + +#include "../tests.h" + +std::ofstream logfile("output"); + +using namespace dealii::MatrixFreeOperators; + + +template +class MyLaplaceOperator : public MatrixFreeOperators::LaplaceOperator< + dim, + fe_degree, + fe_degree + 1, + 1, + LinearAlgebra::distributed::Vector> +{ +public: + void + initialize(std::shared_ptr> data, + const MGConstrainedDoFs & mg_constrained_dofs, + const unsigned int level) + { + MatrixFreeOperators::Base>:: + initialize(data, + mg_constrained_dofs, + level, + std::vector({0})); + + std::vector interface_indices; + mg_constrained_dofs.get_refinement_edge_indices(level).fill_index_vector( + interface_indices); + vmult_edge_constrained_indices.clear(); + vmult_edge_constrained_indices.reserve(interface_indices.size()); + vmult_edge_constrained_values.resize(interface_indices.size()); + const IndexSet &locally_owned = + this->data->get_dof_handler(0).locally_owned_mg_dofs(level); + for (unsigned int i = 0; i < interface_indices.size(); ++i) + if (locally_owned.is_element(interface_indices[i])) + vmult_edge_constrained_indices.push_back( + locally_owned.index_within_set(interface_indices[i])); + } + + void + initialize(std::shared_ptr> data, + const std::vector & mask = {}) + { + MatrixFreeOperators::Base>:: + initialize(data, mask); + } + + + void + vmult(LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const + { + adjust_ghost_range_if_necessary(src); + adjust_ghost_range_if_necessary(dst); + + for (unsigned int i = 0; i < vmult_edge_constrained_indices.size(); ++i) + { + vmult_edge_constrained_values[i] = + src.local_element(vmult_edge_constrained_indices[i]); + const_cast &>(src) + .local_element(vmult_edge_constrained_indices[i]) = 0.; + } + + // zero dst within the loop + this->data->cell_loop( + &MyLaplaceOperator::local_apply, this, dst, src, true); + + for (auto i : this->data->get_constrained_dofs(0)) + dst.local_element(i) = src.local_element(i); + for (unsigned int i = 0; i < vmult_edge_constrained_indices.size(); ++i) + { + dst.local_element(vmult_edge_constrained_indices[i]) = + vmult_edge_constrained_values[i]; + const_cast &>(src) + .local_element(vmult_edge_constrained_indices[i]) = + vmult_edge_constrained_values[i]; + } + } + +private: + void + local_apply(const MatrixFree & data, + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &cell_range) const + { + FEEvaluation phi(data); + + for (unsigned int cell = cell_range.first; cell < cell_range.second; ++cell) + { + phi.reinit(cell); + phi.gather_evaluate(src, false, true); + for (unsigned int q = 0; q < phi.n_q_points; ++q) + phi.submit_gradient(phi.get_gradient(q), q); + phi.integrate_scatter(false, true, dst); + } + } + + void + adjust_ghost_range_if_necessary( + const LinearAlgebra::distributed::Vector &vec) const + { + if (vec.get_partitioner().get() == + this->data->get_dof_info(0).vector_partitioner.get()) + return; + + Assert(vec.get_partitioner()->local_size() == + this->data->get_dof_info(0).vector_partitioner->local_size(), + ExcMessage("The vector passed to the vmult() function does not have " + "the correct size for compatibility with MatrixFree.")); + LinearAlgebra::distributed::Vector copy_vec(vec); + const_cast &>(vec).reinit( + this->data->get_dof_info(0).vector_partitioner); + const_cast &>(vec) + .copy_locally_owned_data_from(copy_vec); + } + + std::vector vmult_edge_constrained_indices; + + mutable std::vector vmult_edge_constrained_values; +}; + + + +template +class MGCoarseIterative + : public MGCoarseGridBase> +{ +public: + MGCoarseIterative() + {} + + void + initialize(const MatrixType &matrix) + { + coarse_matrix = &matrix; + } + + virtual void + operator()(const unsigned int level, + LinearAlgebra::distributed::Vector & dst, + const LinearAlgebra::distributed::Vector &src) const + { + ReductionControl solver_control(1e4, 1e-50, 1e-10); + SolverCG> solver_coarse( + solver_control); + solver_coarse.solve(*coarse_matrix, dst, src, PreconditionIdentity()); + } + + const MatrixType *coarse_matrix; +}; + + + +template +void +do_test(const DoFHandler &dof) +{ + deallog << "Testing " << dof.get_fe().get_name(); + deallog << std::endl; + deallog << "Number of degrees of freedom: " << dof.n_dofs() << std::endl; + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof, locally_relevant_dofs); + + // Dirichlet BC + Functions::ZeroFunction zero_function; + std::map *> dirichlet_boundary; + dirichlet_boundary[0] = &zero_function; + + // fine-level constraints + AffineConstraints constraints; + constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof, constraints); + VectorTools::interpolate_boundary_values(dof, + dirichlet_boundary, + constraints); + constraints.close(); + + // level constraints: + MGConstrainedDoFs mg_constrained_dofs; + mg_constrained_dofs.initialize(dof, dirichlet_boundary); + + MappingQ mapping(fe_degree + 1); + + MyLaplaceOperator fine_matrix; + std::shared_ptr> fine_level_data( + new MatrixFree()); + + typename MatrixFree::AdditionalData fine_level_additional_data; + fine_level_additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + fine_level_additional_data.tasks_block_size = 3; + fine_level_data->reinit(mapping, + dof, + constraints, + QGauss<1>(n_q_points_1d), + fine_level_additional_data); + + fine_matrix.initialize(fine_level_data); + fine_matrix.compute_diagonal(); + + + LinearAlgebra::distributed::Vector in, sol; + fine_matrix.initialize_dof_vector(in); + fine_matrix.initialize_dof_vector(sol); + + // set constant rhs vector + { + // this is to make it consistent with parallel_multigrid_adaptive.cc + AffineConstraints hanging_node_constraints; + hanging_node_constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof, hanging_node_constraints); + hanging_node_constraints.close(); + + for (unsigned int i = 0; i < in.local_size(); ++i) + if (!hanging_node_constraints.is_constrained( + in.get_partitioner()->local_to_global(i))) + in.local_element(i) = 1.; + } + + // set up multigrid in analogy to step-37 + using LevelMatrixType = MyLaplaceOperator; + + MGLevelObject mg_matrices; + MGLevelObject> mg_level_data; + mg_matrices.resize(0, dof.get_triangulation().n_global_levels() - 1); + mg_level_data.resize(0, dof.get_triangulation().n_global_levels() - 1); + for (unsigned int level = 0; + level < dof.get_triangulation().n_global_levels(); + ++level) + { + typename MatrixFree::AdditionalData mg_additional_data; + mg_additional_data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + mg_additional_data.tasks_block_size = 3; + mg_additional_data.level_mg_handler = level; + + AffineConstraints level_constraints; + IndexSet relevant_dofs; + DoFTools::extract_locally_relevant_level_dofs(dof, level, relevant_dofs); + level_constraints.reinit(relevant_dofs); + level_constraints.add_lines( + mg_constrained_dofs.get_boundary_indices(level)); + level_constraints.close(); + + mg_level_data[level].reinit(mapping, + dof, + level_constraints, + QGauss<1>(n_q_points_1d), + mg_additional_data); + mg_matrices[level].initialize(std::make_shared>( + mg_level_data[level]), + mg_constrained_dofs, + level); + mg_matrices[level].compute_diagonal(); + } + MGLevelObject> mg_interface_matrices; + mg_interface_matrices.resize(0, + dof.get_triangulation().n_global_levels() - 1); + for (unsigned int level = 0; + level < dof.get_triangulation().n_global_levels(); + ++level) + mg_interface_matrices[level].initialize(mg_matrices[level]); + + MGTransferMatrixFree mg_transfer(mg_constrained_dofs); + mg_transfer.build(dof); + + MGCoarseIterative mg_coarse; + mg_coarse.initialize(mg_matrices[0]); + + typedef PreconditionChebyshev> + SMOOTHER; + MGSmootherPrecondition> + mg_smoother; + + MGLevelObject smoother_data; + smoother_data.resize(0, dof.get_triangulation().n_global_levels() - 1); + for (unsigned int level = 0; + level < dof.get_triangulation().n_global_levels(); + ++level) + { + smoother_data[level].smoothing_range = 15.; + smoother_data[level].degree = 5; + smoother_data[level].eig_cg_n_iterations = 15; + smoother_data[level].preconditioner = + mg_matrices[level].get_matrix_diagonal_inverse(); + } + mg_smoother.initialize(mg_matrices, smoother_data); + + mg::Matrix> mg_matrix(mg_matrices); + mg::Matrix> mg_interface( + mg_interface_matrices); + + Multigrid> mg( + dof, mg_matrix, mg_coarse, mg_transfer, mg_smoother, mg_smoother); + mg.set_edge_matrices(mg_interface, mg_interface); + PreconditionMG, + MGTransferMatrixFree> + preconditioner(dof, mg, mg_transfer); + + { + // avoid output from inner (coarse-level) solver + deallog.depth_file(3); + + ReductionControl control(30, 1e-20, 1e-7); + SolverCG> solver(control); + solver.solve(fine_matrix, sol, in, preconditioner); + } + + fine_matrix.clear(); + for (unsigned int level = 0; + level < dof.get_triangulation().n_global_levels(); + ++level) + mg_matrices[level].clear(); +} + + + +template +void +test() +{ + parallel::distributed::Triangulation tria( + MPI_COMM_WORLD, + Triangulation::limit_level_difference_at_vertices, + parallel::distributed::Triangulation::construct_multigrid_hierarchy); + GridGenerator::hyper_cube(tria); + tria.refine_global(6 - dim); + const unsigned int n_runs = fe_degree == 1 ? 6 - dim : 5 - dim; + for (unsigned int i = 0; i < n_runs; ++i) + { + for (typename Triangulation::active_cell_iterator cell = + tria.begin_active(); + cell != tria.end(); + ++cell) + if (cell->is_locally_owned() && + ((cell->center().norm() < 0.5 && + (cell->level() < 5 || cell->center().norm() > 0.45)) || + (dim == 2 && cell->center().norm() > 1.2))) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + FE_Q fe(fe_degree); + DoFHandler dof(tria); + dof.distribute_dofs(fe); + dof.distribute_mg_dofs(fe); + + do_test(dof); + } +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1); + mpi_initlog(true); + + test<2, 2>(); +} diff --git a/tests/matrix_free/parallel_multigrid_adaptive_08.with_mpi=true.with_p4est=true.with_lapack=true.mpirun=3.output b/tests/matrix_free/parallel_multigrid_adaptive_08.with_mpi=true.with_p4est=true.with_lapack=true.mpirun=3.output new file mode 100644 index 0000000000..f00e1a40b4 --- /dev/null +++ b/tests/matrix_free/parallel_multigrid_adaptive_08.with_mpi=true.with_p4est=true.with_lapack=true.mpirun=3.output @@ -0,0 +1,47 @@ + +DEAL::Testing FE_Q<2>(2) +DEAL::Number of degrees of freedom: 1935 +DEAL:cg::Starting value 43.0929 +DEAL:cg:cg::Starting value 2.63785 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 0.000574845 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 4.62010e-05 +DEAL:cg:cg::Convergence step 1 value 6.77626e-21 +DEAL:cg:cg::Starting value 6.27180e-07 +DEAL:cg:cg::Convergence step 1 value 1.05879e-22 +DEAL:cg:cg::Starting value 3.19916e-08 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg::Convergence step 5 value 2.10775e-06 +DEAL::Testing FE_Q<2>(2) +DEAL::Number of degrees of freedom: 3403 +DEAL:cg::Starting value 55.4346 +DEAL:cg:cg::Starting value 3.96150 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 0.00121318 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 9.44668e-05 +DEAL:cg:cg::Convergence step 1 value 1.35525e-20 +DEAL:cg:cg::Starting value 1.80128e-06 +DEAL:cg:cg::Convergence step 1 value 2.11758e-22 +DEAL:cg:cg::Starting value 1.21766e-07 +DEAL:cg:cg::Convergence step 1 value 2.64698e-23 +DEAL:cg:cg::Starting value 5.04178e-09 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg::Convergence step 6 value 4.24128e-07 +DEAL::Testing FE_Q<2>(2) +DEAL::Number of degrees of freedom: 8417 +DEAL:cg::Starting value 87.1149 +DEAL:cg:cg::Starting value 9.09582 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 0.00294550 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 0.000242151 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 3.21426e-06 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 1.60211e-07 +DEAL:cg:cg::Convergence step 1 value 0.00000 +DEAL:cg:cg::Starting value 3.38024e-09 +DEAL:cg:cg::Convergence step 1 value 4.13590e-25 +DEAL:cg::Convergence step 6 value 2.56705e-07 -- 2.39.5