From: Denis Davydov Date: Fri, 12 May 2017 09:34:58 +0000 (+0200) Subject: add another MF GMG test which was failing prior to cb5c93d X-Git-Tag: v9.0.0-rc1~1604^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F4374%2Fhead;p=dealii.git add another MF GMG test which was failing prior to cb5c93d --- diff --git a/tests/matrix_free/parallel_multigrid_adaptive_05.cc b/tests/matrix_free/parallel_multigrid_adaptive_05.cc new file mode 100644 index 0000000000..8355c969bc --- /dev/null +++ b/tests/matrix_free/parallel_multigrid_adaptive_05.cc @@ -0,0 +1,581 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2014 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// same test as parallel_multigrid_adaptive_03 but using partition_color + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + + +template +class LaplaceOperator : public Subscriptor +{ +public: + typedef number value_type; + + LaplaceOperator() {}; + + + void initialize (const Mapping &mapping, + const DoFHandler &dof_handler, + const MGConstrainedDoFs &mg_constrained_dofs, + const typename FunctionMap::type &dirichlet_boundary, + const unsigned int level, + const bool threaded) + { + const QGauss<1> quad (n_q_points_1d); + typename MatrixFree::AdditionalData addit_data; + if (threaded) + addit_data.tasks_parallel_scheme = MatrixFree::AdditionalData::partition_color; + else + addit_data.tasks_parallel_scheme = MatrixFree::AdditionalData::none; + addit_data.tasks_block_size = 3; + addit_data.level_mg_handler = level; + ConstraintMatrix constraints; + if (level == numbers::invalid_unsigned_int) + { + IndexSet relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, relevant_dofs); + constraints.reinit(relevant_dofs); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + VectorTools::interpolate_boundary_values(dof_handler, dirichlet_boundary, + constraints); + } + else + { + IndexSet relevant_dofs; + DoFTools::extract_locally_relevant_level_dofs(dof_handler, level, + relevant_dofs); + constraints.reinit(relevant_dofs); + constraints.add_lines(mg_constrained_dofs.get_boundary_indices(level)); + + std::vector interface_indices; + mg_constrained_dofs.get_refinement_edge_indices(level).fill_index_vector(interface_indices); + edge_constrained_indices.clear(); + edge_constrained_indices.reserve(interface_indices.size()); + edge_constrained_values.resize(interface_indices.size()); + const IndexSet &locally_owned = dof_handler.locally_owned_mg_dofs(level); + for (unsigned int i=0; i 0; + } + constraints.close(); + + data.reinit (mapping, dof_handler, constraints, quad, addit_data); + + if (level != numbers::invalid_unsigned_int) + compute_inverse_diagonal(); + } + + void vmult(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + dst = 0; + vmult_add(dst, src); + } + + void Tvmult(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + dst = 0; + vmult_add(dst, src); + } + + void Tvmult_add(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + vmult_add(dst, src); + } + + void vmult_add(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + + // set zero Dirichlet values on the input vector (and remember the src and + // dst values because we need to reset them at the end) + for (unsigned int i=0; i(src.local_element(edge_constrained_indices[i]), + dst.local_element(edge_constrained_indices[i])); + const_cast&>(src).local_element(edge_constrained_indices[i]) = 0.; + } + + data.cell_loop (&LaplaceOperator::local_apply, + this, dst, src); + + const std::vector & + constrained_dofs = data.get_constrained_dofs(); + for (unsigned int i=0; i&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first; + dst.local_element(edge_constrained_indices[i]) = edge_constrained_values[i].second + edge_constrained_values[i].first; + } + } + + void vmult_interface_down(LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + + dst = 0; + + if (!have_interface_matrices) + return; + + // set zero Dirichlet values on the input vector (and remember the src and + // dst values because we need to reset them at the end) + for (unsigned int i=0; i&>(src).local_element(edge_constrained_indices[i]) = 0.; + edge_constrained_values[i] = std::pair(src_val, + dst.local_element(edge_constrained_indices[i])); + } + + data.cell_loop (&LaplaceOperator::local_apply, + this, dst, src); + + unsigned int c=0; + for (unsigned int i=0; i&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first; + } + for ( ; c &dst, + const LinearAlgebra::distributed::Vector &src) const + { + Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + + dst = 0; + + if (!have_interface_matrices) + return; + + LinearAlgebra::distributed::Vector src_cpy (src); + unsigned int c=0; + for (unsigned int i=0; isize(); + } + + types::global_dof_index n() const + { + return data.get_vector_partitioner()->size(); + } + + number el (const unsigned int row, const unsigned int col) const + { + AssertThrow(false, ExcMessage("Matrix-free does not allow for entry access")); + return number(); + } + + void + initialize_dof_vector(LinearAlgebra::distributed::Vector &vector) const + { + if (!vector.partitioners_are_compatible(*data.get_dof_info(0).vector_partitioner)) + data.initialize_dof_vector(vector); + Assert(vector.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), + ExcInternalError()); + } + + const LinearAlgebra::distributed::Vector & + get_matrix_diagonal_inverse() const + { + Assert(inverse_diagonal_entries.size() > 0, ExcNotInitialized()); + return inverse_diagonal_entries; + } + + +private: + void + local_apply (const MatrixFree &data, + LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src, + const std::pair &cell_range) const + { + FEEvaluation phi (data); + + for (unsigned int cell=cell_range.first; cell & + constrained_dofs = data.get_constrained_dofs(); + for (unsigned int i=0; i 1e-10) + inverse_diagonal_entries.local_element(i) = 1./inverse_diagonal_entries.local_element(i); + else + inverse_diagonal_entries.local_element(i) = 1.; + } + + void + local_diagonal_cell (const MatrixFree &data, + LinearAlgebra::distributed::Vector &dst, + const unsigned int &, + const std::pair &cell_range) const + { + FEEvaluation phi (data); + + for (unsigned int cell=cell_range.first; cell local_diagonal_vector[phi.tensor_dofs_per_cell]; + for (unsigned int i=0; i(); + phi.begin_dof_values()[i] = 1.; + phi.evaluate (false,true,false); + for (unsigned int q=0; q data; + LinearAlgebra::distributed::Vector inverse_diagonal_entries; + std::vector edge_constrained_indices; + mutable std::vector > edge_constrained_values; + bool have_interface_matrices; +}; + + + +template +class MGInterfaceMatrix : public Subscriptor +{ +public: + void initialize (const LAPLACEOPERATOR &laplace) + { + this->laplace = &laplace; + } + + void vmult (LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + laplace->vmult_interface_down(dst, src); + } + + void Tvmult (LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + laplace->vmult_interface_up(dst, src); + } + +private: + SmartPointer laplace; +}; + + + +template +class MGTransferMF : public MGTransferMatrixFree +{ +public: + MGTransferMF(const MGLevelObject &laplace, + const MGConstrainedDoFs &mg_constrained_dofs) + : + MGTransferMatrixFree(mg_constrained_dofs), + laplace_operator (laplace) + { + } + + /** + * Overload copy_to_mg from MGTransferPrebuilt to get the vectors compatible + * with MatrixFree and bypass the crude vector initialization in + * MGTransferPrebuilt + */ + template + void + copy_to_mg (const DoFHandler &mg_dof_handler, + MGLevelObject > &dst, + const InVector &src) const + { + for (unsigned int level=dst.min_level(); + level<=dst.max_level(); ++level) + laplace_operator[level].initialize_dof_vector(dst[level]); + MGLevelGlobalTransfer >:: + copy_to_mg(mg_dof_handler, dst, src); + } + +private: + const MGLevelObject &laplace_operator; +}; + + + +template +class MGCoarseIterative : public MGCoarseGridBase > +{ +public: + MGCoarseIterative() {} + + void initialize(const MatrixType &matrix) + { + coarse_matrix = &matrix; + } + + virtual void operator() (const unsigned int level, + LinearAlgebra::distributed::Vector &dst, + const LinearAlgebra::distributed::Vector &src) const + { + ReductionControl solver_control (1e4, 1e-50, 1e-10); + SolverCG > solver_coarse (solver_control); + solver_coarse.solve (*coarse_matrix, dst, src, PreconditionIdentity()); + } + + const MatrixType *coarse_matrix; +}; + + + + +template +void do_test (const DoFHandler &dof, const bool threaded) +{ + deallog << "Testing " << dof.get_fe().get_name(); + deallog << std::endl; + deallog << "Number of degrees of freedom: " << dof.n_dofs() << std::endl; + + ConstraintMatrix hanging_node_constraints; + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof, locally_relevant_dofs); + hanging_node_constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof, hanging_node_constraints); + hanging_node_constraints.close(); + + MGConstrainedDoFs mg_constrained_dofs; + ZeroFunction zero_function; + typename FunctionMap::type dirichlet_boundary; + dirichlet_boundary[0] = &zero_function; + mg_constrained_dofs.initialize(dof, dirichlet_boundary); + + MappingQ mapping(fe_degree+1); + LaplaceOperator fine_matrix; + fine_matrix.initialize(mapping, dof, mg_constrained_dofs, dirichlet_boundary, + numbers::invalid_unsigned_int, threaded); + + LinearAlgebra::distributed::Vector in, sol; + fine_matrix.initialize_dof_vector(in); + fine_matrix.initialize_dof_vector(sol); + + // set constant rhs vector + for (unsigned int i=0; ilocal_to_global(i))) + in.local_element(i) = 1.; + + // set up multigrid in analogy to step-37 + typedef LaplaceOperator LevelMatrixType; + + MGLevelObject mg_matrices; + mg_matrices.resize(0, dof.get_triangulation().n_global_levels()-1); + for (unsigned int level = 0; level > mg_interface_matrices; + mg_interface_matrices.resize(0, dof.get_triangulation().n_global_levels()-1); + for (unsigned int level=0; level mg_transfer(mg_matrices, + mg_constrained_dofs); + mg_transfer.build(dof); + + MGCoarseIterative mg_coarse; + mg_coarse.initialize(mg_matrices[0]); + + typedef PreconditionChebyshev > SMOOTHER; + MGSmootherPrecondition > + mg_smoother; + + MGLevelObject smoother_data; + smoother_data.resize(0, dof.get_triangulation().n_global_levels()-1); + for (unsigned int level = 0; level >()); + smoother_data[level].preconditioner->get_vector() = + mg_matrices[level].get_matrix_diagonal_inverse(); + } + + mg_smoother.initialize(mg_matrices, smoother_data); + + mg::Matrix > + mg_matrix(mg_matrices); + mg::Matrix > + mg_interface(mg_interface_matrices); + + Multigrid > mg(dof, + mg_matrix, + mg_coarse, + mg_transfer, + mg_smoother, + mg_smoother); + mg.set_edge_matrices(mg_interface, mg_interface); + PreconditionMG, + MGTransferMF > + preconditioner(dof, mg, mg_transfer); + + { + // avoid output from inner (coarse-level) solver + deallog.depth_file(3); + + ReductionControl control(30, 1e-20, 1e-7); + SolverCG > solver(control); + solver.solve(fine_matrix, sol, in, preconditioner); + } +} + + + +template +void test () +{ + parallel::distributed::Triangulation tria(MPI_COMM_WORLD, + Triangulation::limit_level_difference_at_vertices, + parallel::distributed::Triangulation::construct_multigrid_hierarchy); + GridGenerator::hyper_cube (tria); + tria.refine_global(8-2*dim); + const unsigned int n_runs = fe_degree == 1 ? 6-dim : 5-dim; + for (unsigned int i=0; i::active_cell_iterator cell=tria.begin_active(); + cell != tria.end(); ++cell) + if (cell->is_locally_owned() && + ((cell->center().norm() < 0.5 && (cell->level() < 5 || + cell->center().norm() > 0.45)) + || + (dim == 2 && cell->center().norm() > 1.2))) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + FE_Q fe (fe_degree); + DoFHandler dof (tria); + dof.distribute_dofs(fe); + dof.distribute_mg_dofs(fe); + + deallog.push("threaded"); + do_test (dof, true); + deallog.pop(); + } +} + + + +int main (int argc, char **argv) +{ + // The original issue with partition_color + // is hit with 2 threads and 4 cores. + Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 2); + + mpi_initlog(); + deallog.threshold_double(1e-9); + + test<2,1,double>(); +} diff --git a/tests/matrix_free/parallel_multigrid_adaptive_05.with_mpi=true.with_p4est=true.mpirun=4.output b/tests/matrix_free/parallel_multigrid_adaptive_05.with_mpi=true.with_p4est=true.mpirun=4.output new file mode 100644 index 0000000000..7916f86cb3 --- /dev/null +++ b/tests/matrix_free/parallel_multigrid_adaptive_05.with_mpi=true.with_p4est=true.mpirun=4.output @@ -0,0 +1,17 @@ + +DEAL:threaded::Testing FE_Q<2>(1) +DEAL:threaded::Number of degrees of freedom: 507 +DEAL:threaded:cg::Starting value 21.9317 +DEAL:threaded:cg::Convergence step 5 value 7.81205e-07 +DEAL:threaded::Testing FE_Q<2>(1) +DEAL:threaded::Number of degrees of freedom: 881 +DEAL:threaded:cg::Starting value 27.7669 +DEAL:threaded:cg::Convergence step 6 value 9.24482e-07 +DEAL:threaded::Testing FE_Q<2>(1) +DEAL:threaded::Number of degrees of freedom: 2147 +DEAL:threaded:cg::Starting value 43.2551 +DEAL:threaded:cg::Convergence step 6 value 1.14520e-06 +DEAL:threaded::Testing FE_Q<2>(1) +DEAL:threaded::Number of degrees of freedom: 6517 +DEAL:threaded:cg::Starting value 76.9220 +DEAL:threaded:cg::Convergence step 6 value 1.55925e-06