From 2c14abc5f6e6e6a43464e7a78866ec40613df774 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Mon, 11 Jan 2016 14:12:05 +0100 Subject: [PATCH] Framework tests (complete multigrid solver) --- .../parallel_multigrid_adaptive_mf.cc | 580 ++++++++++++++++++ ...h_mpi=true.with_p4est=true.mpirun=7.output | 49 ++ tests/matrix_free/parallel_multigrid_mf.cc | 433 +++++++++++++ ...h_mpi=true.with_p4est=true.mpirun=7.output | 49 ++ 4 files changed, 1111 insertions(+) create mode 100644 tests/matrix_free/parallel_multigrid_adaptive_mf.cc create mode 100644 tests/matrix_free/parallel_multigrid_adaptive_mf.with_mpi=true.with_p4est=true.mpirun=7.output create mode 100644 tests/matrix_free/parallel_multigrid_mf.cc create mode 100644 tests/matrix_free/parallel_multigrid_mf.with_mpi=true.with_p4est=true.mpirun=7.output diff --git a/tests/matrix_free/parallel_multigrid_adaptive_mf.cc b/tests/matrix_free/parallel_multigrid_adaptive_mf.cc new file mode 100644 index 0000000000..fdc6862a8b --- /dev/null +++ b/tests/matrix_free/parallel_multigrid_adaptive_mf.cc @@ -0,0 +1,580 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2014-2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// same test as parallel_multigrid_adaptive but using but using adaptive +// meshes with hanging nodes (doing local smoothing) + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +std::ofstream logfile("output"); + + +template +class LaplaceOperator : public Subscriptor +{ +public: + typedef number value_type; + + LaplaceOperator() {}; + + + void initialize (const Mapping &mapping, + const DoFHandler &dof_handler, + const MGConstrainedDoFs &mg_constrained_dofs, + const typename FunctionMap::type &dirichlet_boundary, + const unsigned int level = numbers::invalid_unsigned_int) + { + const QGauss<1> quad (n_q_points_1d); + typename MatrixFree::AdditionalData addit_data; + addit_data.tasks_parallel_scheme = MatrixFree::AdditionalData::none; + addit_data.tasks_block_size = 3; + addit_data.level_mg_handler = level; + addit_data.mpi_communicator = MPI_COMM_WORLD; + ConstraintMatrix constraints; + if (level == numbers::invalid_unsigned_int) + { + IndexSet relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, relevant_dofs); + constraints.reinit(relevant_dofs); + DoFTools::make_hanging_node_constraints(dof_handler, constraints); + VectorTools::interpolate_boundary_values(dof_handler, dirichlet_boundary, + constraints); + } + else + { + IndexSet relevant_dofs; + DoFTools::extract_locally_relevant_level_dofs(dof_handler, level, + relevant_dofs); + constraints.reinit(relevant_dofs); + constraints.add_lines(mg_constrained_dofs.get_boundary_indices(level)); + + std::vector interface_indices; + mg_constrained_dofs.get_refinement_edge_indices(level).fill_index_vector(interface_indices); + edge_constrained_indices.clear(); + edge_constrained_indices.reserve(interface_indices.size()); + edge_constrained_values.resize(interface_indices.size()); + const IndexSet &locally_owned = dof_handler.locally_owned_mg_dofs(level); + for (unsigned int i=0; i 0; + } + constraints.close(); + + data.reinit (mapping, dof_handler, constraints, quad, addit_data); + + if (level != numbers::invalid_unsigned_int) + compute_inverse_diagonal(); + } + + void vmult(parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + dst = 0; + vmult_add(dst, src); + } + + void Tvmult(parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + dst = 0; + vmult_add(dst, src); + } + + void Tvmult_add(parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + vmult_add(dst, src); + } + + void vmult_add(parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + + // set zero Dirichlet values on the input vector (and remember the src and + // dst values because we need to reset them at the end) + for (unsigned int i=0; i(src.local_element(edge_constrained_indices[i]), + dst.local_element(edge_constrained_indices[i])); + const_cast&>(src).local_element(edge_constrained_indices[i]) = 0.; + } + + data.cell_loop (&LaplaceOperator::local_apply, + this, dst, src); + + const std::vector & + constrained_dofs = data.get_constrained_dofs(); + for (unsigned int i=0; i&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first; + dst.local_element(edge_constrained_indices[i]) = edge_constrained_values[i].second + edge_constrained_values[i].first; + } + } + + void vmult_interface_down(parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + + dst = 0; + + if (!have_interface_matrices) + return; + + // set zero Dirichlet values on the input vector (and remember the src and + // dst values because we need to reset them at the end) + for (unsigned int i=0; i&>(src).local_element(edge_constrained_indices[i]) = 0.; + edge_constrained_values[i] = std::pair(src_val, + dst.local_element(edge_constrained_indices[i])); + } + + data.cell_loop (&LaplaceOperator::local_apply, + this, dst, src); + + unsigned int c=0; + for (unsigned int i=0; i&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first; + } + for ( ; c &dst, + const parallel::distributed::Vector &src) const + { + Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError()); + + dst = 0; + + if (!have_interface_matrices) + return; + + parallel::distributed::Vector src_cpy (src); + unsigned int c=0; + for (unsigned int i=0; isize(); + } + + types::global_dof_index n() const + { + return data.get_vector_partitioner()->size(); + } + + number el (const unsigned int row, const unsigned int col) const + { + AssertThrow(false, ExcMessage("Matrix-free does not allow for entry access")); + return number(); + } + + void + initialize_dof_vector(parallel::distributed::Vector &vector) const + { + if (!vector.partitioners_are_compatible(*data.get_dof_info(0).vector_partitioner)) + data.initialize_dof_vector(vector); + Assert(vector.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), + ExcInternalError()); + } + + const parallel::distributed::Vector & + get_matrix_diagonal_inverse() const + { + Assert(inverse_diagonal_entries.size() > 0, ExcNotInitialized()); + return inverse_diagonal_entries; + } + + +private: + void + local_apply (const MatrixFree &data, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src, + const std::pair &cell_range) const + { + FEEvaluation phi (data); + + for (unsigned int cell=cell_range.first; cell & + constrained_dofs = data.get_constrained_dofs(); + for (unsigned int i=0; i 1e-10) + inverse_diagonal_entries.local_element(i) = 1./inverse_diagonal_entries.local_element(i); + else + inverse_diagonal_entries.local_element(i) = 1.; + } + + void + local_diagonal_cell (const MatrixFree &data, + parallel::distributed::Vector &dst, + const unsigned int &, + const std::pair &cell_range) const + { + FEEvaluation phi (data); + + for (unsigned int cell=cell_range.first; cell local_diagonal_vector[phi.tensor_dofs_per_cell]; + for (unsigned int i=0; i(); + phi.begin_dof_values()[i] = 1.; + phi.evaluate (false,true,false); + for (unsigned int q=0; q data; + parallel::distributed::Vector inverse_diagonal_entries; + std::vector edge_constrained_indices; + mutable std::vector > edge_constrained_values; + bool have_interface_matrices; +}; + + + +template +class MGInterfaceMatrix : public Subscriptor +{ +public: + void initialize (const LAPLACEOPERATOR &laplace) + { + this->laplace = &laplace; + } + + void vmult (parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + laplace->vmult_interface_down(dst, src); + } + + void Tvmult (parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + laplace->vmult_interface_up(dst, src); + } + +private: + SmartPointer laplace; +}; + + + +template +class MGTransferMF : public MGTransferMatrixFree +{ +public: + MGTransferMF(const MGLevelObject &laplace, + const MGConstrainedDoFs &mg_constrained_dofs) + : + MGTransferMatrixFree(mg_constrained_dofs), + laplace_operator (laplace) + { + } + + /** + * Overload copy_to_mg from MGTransferPrebuilt to get the vectors compatible + * with MatrixFree and bypass the crude vector initialization in + * MGTransferPrebuilt + */ + template + void + copy_to_mg (const DoFHandler &mg_dof_handler, + MGLevelObject > &dst, + const InVector &src) const + { + for (unsigned int level=dst.min_level(); + level<=dst.max_level(); ++level) + laplace_operator[level].initialize_dof_vector(dst[level]); + MGLevelGlobalTransfer >:: + copy_to_mg(mg_dof_handler, dst, src); + } + +private: + const MGLevelObject &laplace_operator; +}; + + + +template +class MGCoarseIterative : public MGCoarseGridBase > +{ +public: + MGCoarseIterative() {} + + void initialize(const MatrixType &matrix) + { + coarse_matrix = &matrix; + } + + virtual void operator() (const unsigned int level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + ReductionControl solver_control (1e4, 1e-50, 1e-10); + SolverCG > solver_coarse (solver_control); + solver_coarse.solve (*coarse_matrix, dst, src, PreconditionIdentity()); + } + + const MatrixType *coarse_matrix; +}; + + + + +template +void do_test (const DoFHandler &dof) +{ + deallog << "Testing " << dof.get_fe().get_name(); + deallog << std::endl; + deallog << "Number of degrees of freedom: " << dof.n_dofs() << std::endl; + + ConstraintMatrix hanging_node_constraints; + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof, locally_relevant_dofs); + hanging_node_constraints.reinit(locally_relevant_dofs); + DoFTools::make_hanging_node_constraints(dof, hanging_node_constraints); + hanging_node_constraints.close(); + + MGConstrainedDoFs mg_constrained_dofs; + ZeroFunction zero_function; + typename FunctionMap::type dirichlet_boundary; + dirichlet_boundary[0] = &zero_function; + mg_constrained_dofs.initialize(dof, dirichlet_boundary); + + MappingQ mapping(fe_degree+1); + LaplaceOperator fine_matrix; + fine_matrix.initialize(mapping, dof, mg_constrained_dofs, dirichlet_boundary, + numbers::invalid_unsigned_int); + + parallel::distributed::Vector in, sol; + fine_matrix.initialize_dof_vector(in); + fine_matrix.initialize_dof_vector(sol); + + // set constant rhs vector + for (unsigned int i=0; ilocal_to_global(i))) + in.local_element(i) = 1.; + + // set up multigrid in analogy to step-37 + typedef LaplaceOperator LevelMatrixType; + + MGLevelObject mg_matrices; + mg_matrices.resize(0, dof.get_triangulation().n_global_levels()-1); + for (unsigned int level = 0; level > mg_interface_matrices; + mg_interface_matrices.resize(0, dof.get_triangulation().n_global_levels()-1); + for (unsigned int level=0; level mg_transfer(mg_matrices, + mg_constrained_dofs); + mg_transfer.build(dof); + + MGCoarseIterative mg_coarse; + mg_coarse.initialize(mg_matrices[0]); + + typedef PreconditionChebyshev > SMOOTHER; + MGSmootherPrecondition > + mg_smoother; + + MGLevelObject smoother_data; + smoother_data.resize(0, dof.get_triangulation().n_global_levels()-1); + for (unsigned int level = 0; level > + mg_matrix(mg_matrices); + mg::Matrix > + mg_interface(mg_interface_matrices); + + Multigrid > mg(dof, + mg_matrix, + mg_coarse, + mg_transfer, + mg_smoother, + mg_smoother); + mg.set_edge_matrices(mg_interface, mg_interface); + PreconditionMG, + MGTransferMF > + preconditioner(dof, mg, mg_transfer); + + { + // avoid output from inner (coarse-level) solver + deallog.depth_file(2); + ReductionControl control(30, 1e-20, 1e-7); + SolverCG > solver(control); + solver.solve(fine_matrix, sol, in, preconditioner); + } +} + + + +template +void test () +{ + parallel::distributed::Triangulation tria(MPI_COMM_WORLD, + dealii::Triangulation::none,parallel::distributed::Triangulation::construct_multigrid_hierarchy); + GridGenerator::hyper_cube (tria); + tria.refine_global(8-2*dim); + const unsigned int n_runs = fe_degree == 1 ? 6-dim : 5-dim; + for (unsigned int i=0; i::active_cell_iterator cell=tria.begin_active(); + cell != tria.end(); ++cell) + if (cell->is_locally_owned() && + (cell->center().norm() < 0.5 && (cell->level() < 5 || + cell->center().norm() > 0.45) + || + (dim == 2 && cell->center().norm() > 1.2))) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + FE_Q fe (fe_degree); + DoFHandler dof (tria); + dof.distribute_dofs(fe); + dof.distribute_mg_dofs(fe); + + do_test (dof); + } +} + + + +int main (int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv); + + mpi_initlog(); + deallog.threshold_double(1e-9); + + test<2,1,double>(); + test<2,3,float>(); + test<3,1,double>(); + test<3,2,float>(); +} diff --git a/tests/matrix_free/parallel_multigrid_adaptive_mf.with_mpi=true.with_p4est=true.mpirun=7.output b/tests/matrix_free/parallel_multigrid_adaptive_mf.with_mpi=true.with_p4est=true.mpirun=7.output new file mode 100644 index 0000000000..2e57025e45 --- /dev/null +++ b/tests/matrix_free/parallel_multigrid_adaptive_mf.with_mpi=true.with_p4est=true.mpirun=7.output @@ -0,0 +1,49 @@ + +DEAL::Testing FE_Q<2>(1) +DEAL::Number of degrees of freedom: 507 +DEAL:cg::Starting value 21.9317 +DEAL:cg::Convergence step 5 value 7.81205e-07 +DEAL::Testing FE_Q<2>(1) +DEAL::Number of degrees of freedom: 881 +DEAL:cg::Starting value 27.7669 +DEAL:cg::Convergence step 6 value 9.24482e-07 +DEAL::Testing FE_Q<2>(1) +DEAL::Number of degrees of freedom: 2147 +DEAL:cg::Starting value 43.2551 +DEAL:cg::Convergence step 6 value 1.14520e-06 +DEAL::Testing FE_Q<2>(1) +DEAL::Number of degrees of freedom: 6517 +DEAL:cg::Starting value 76.9220 +DEAL:cg::Convergence step 6 value 1.55925e-06 +DEAL::Testing FE_Q<2>(3) +DEAL::Number of degrees of freedom: 4259 +DEAL:cg::Starting value 64.2573 +DEAL:cg::Convergence step 5 value 1.14788e-06 +DEAL::Testing FE_Q<2>(3) +DEAL::Number of degrees of freedom: 7457 +DEAL:cg::Starting value 83.1084 +DEAL:cg::Convergence step 5 value 6.03076e-06 +DEAL::Testing FE_Q<2>(3) +DEAL::Number of degrees of freedom: 18535 +DEAL:cg::Starting value 130.977 +DEAL:cg::Convergence step 5 value 4.58984e-06 +DEAL::Testing FE_Q<3>(1) +DEAL::Number of degrees of freedom: 186 +DEAL:cg::Starting value 12.3693 +DEAL:cg::Convergence step 3 value 8.40297e-08 +DEAL::Testing FE_Q<3>(1) +DEAL::Number of degrees of freedom: 648 +DEAL:cg::Starting value 21.1424 +DEAL:cg::Convergence step 7 value 4.98971e-07 +DEAL::Testing FE_Q<3>(1) +DEAL::Number of degrees of freedom: 2930 +DEAL:cg::Starting value 47.1699 +DEAL:cg::Convergence step 7 value 1.64836e-06 +DEAL::Testing FE_Q<3>(2) +DEAL::Number of degrees of freedom: 1106 +DEAL:cg::Starting value 30.8707 +DEAL:cg::Convergence step 5 value 5.33421e-08 +DEAL::Testing FE_Q<3>(2) +DEAL::Number of degrees of freedom: 4268 +DEAL:cg::Starting value 57.4891 +DEAL:cg::Convergence step 8 value 7.61055e-07 diff --git a/tests/matrix_free/parallel_multigrid_mf.cc b/tests/matrix_free/parallel_multigrid_mf.cc new file mode 100644 index 0000000000..1a38e6d86f --- /dev/null +++ b/tests/matrix_free/parallel_multigrid_mf.cc @@ -0,0 +1,433 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2014-2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// same test as parallel_multigrid but using matrix-free transfer operations +// (and including single-precision multigrid preconditioning) + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +std::ofstream logfile("output"); + + +template +class LaplaceOperator : public Subscriptor +{ +public: + typedef number value_type; + + LaplaceOperator() {}; + + void initialize (const Mapping &mapping, + const DoFHandler &dof_handler, + const std::set &dirichlet_boundaries, + const unsigned int level = numbers::invalid_unsigned_int) + { + const QGauss<1> quad (n_q_points_1d); + typename MatrixFree::AdditionalData addit_data; + addit_data.tasks_parallel_scheme = MatrixFree::AdditionalData::none; + addit_data.level_mg_handler = level; + addit_data.mpi_communicator = MPI_COMM_WORLD; + + // extract the constraints due to Dirichlet boundary conditions + ConstraintMatrix constraints; + ZeroFunction zero; + typename FunctionMap::type functions; + for (std::set::const_iterator it=dirichlet_boundaries.begin(); + it != dirichlet_boundaries.end(); ++it) + functions[*it] = &zero; + if (level == numbers::invalid_unsigned_int) + VectorTools::interpolate_boundary_values(dof_handler, functions, constraints); + else + { + std::vector local_dofs; + typename DoFHandler::cell_iterator + cell = dof_handler.begin(level), + endc = dof_handler.end(level); + for (; cell!=endc; ++cell) + { + if (dof_handler.get_triangulation().locally_owned_subdomain()!=numbers::invalid_subdomain_id + && cell->level_subdomain_id()==numbers::artificial_subdomain_id) + continue; + const FiniteElement &fe = cell->get_fe(); + local_dofs.resize(fe.dofs_per_face); + + for (unsigned int face_no = 0; face_no < GeometryInfo::faces_per_cell; + ++face_no) + if (cell->at_boundary(face_no) == true) + { + const typename DoFHandler::face_iterator + face = cell->face(face_no); + const types::boundary_id bi = face->boundary_id(); + if (functions.find(bi) != functions.end()) + { + face->get_mg_dof_indices(level, local_dofs); + for (unsigned int i=0; i &dst, + const parallel::distributed::Vector &src) const + { + dst = 0; + vmult_add(dst, src); + } + + void Tvmult(parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + dst = 0; + vmult_add(dst, src); + } + + void Tvmult_add(parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + vmult_add(dst, src); + } + + void vmult_add(parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + data.cell_loop (&LaplaceOperator::local_apply, + this, dst, src); + + const std::vector & + constrained_dofs = data.get_constrained_dofs(); + for (unsigned int i=0; isize(); + } + + types::global_dof_index n() const + { + return data.get_vector_partitioner()->size(); + } + + number el (const unsigned int row, const unsigned int col) const + { + AssertThrow(false, ExcMessage("Matrix-free does not allow for entry access")); + return number(); + } + + void + initialize_dof_vector(parallel::distributed::Vector &vector) const + { + if (!vector.partitioners_are_compatible(*data.get_dof_info(0).vector_partitioner)) + data.initialize_dof_vector(vector); + Assert(vector.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), + ExcInternalError()); + } + + const parallel::distributed::Vector & + get_matrix_diagonal_inverse() const + { + Assert(inverse_diagonal_entries.size() > 0, ExcNotInitialized()); + return inverse_diagonal_entries; + } + + +private: + void + local_apply (const MatrixFree &data, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src, + const std::pair &cell_range) const + { + FEEvaluation phi (data); + + for (unsigned int cell=cell_range.first; cell 1e-10) + inverse_diagonal_entries.local_element(i) = 1./inverse_diagonal_entries.local_element(i); + else + inverse_diagonal_entries.local_element(i) = 1.; + } + + void + local_diagonal_cell (const MatrixFree &data, + parallel::distributed::Vector &dst, + const unsigned int &, + const std::pair &cell_range) const + { + FEEvaluation phi (data); + + for (unsigned int cell=cell_range.first; cell local_diagonal_vector[phi.tensor_dofs_per_cell]; + for (unsigned int i=0; i(); + phi.begin_dof_values()[i] = 1.; + phi.evaluate (false,true,false); + for (unsigned int q=0; q data; + parallel::distributed::Vector inverse_diagonal_entries; +}; + + + +template +class MGTransferPrebuiltMF : public MGTransferMatrixFree +{ +public: + MGTransferPrebuiltMF(const MGLevelObject &laplace, + const MGConstrainedDoFs &mg_constrained_dofs) + : + MGTransferMatrixFree(mg_constrained_dofs), + laplace_operator (laplace) + {}; + + /** + * Overload copy_to_mg from MGTransferPrebuilt to get the vectors compatible + * with MatrixFree and bypass the crude initialization in MGTransferPrebuilt + */ + template + void + copy_to_mg (const DoFHandler &mg_dof, + MGLevelObject > &dst, + const InVector &src) const + { + for (unsigned int level=dst.min_level(); + level<=dst.max_level(); ++level) + laplace_operator[level].initialize_dof_vector(dst[level]); + MGLevelGlobalTransfer >::copy_to_mg(mg_dof, dst, src); + } + +private: + const MGLevelObject &laplace_operator; +}; + + + +template +class MGCoarseIterative : public MGCoarseGridBase > +{ +public: + MGCoarseIterative() {} + + void initialize(const MatrixType &matrix) + { + coarse_matrix = &matrix; + } + + virtual void operator() (const unsigned int level, + parallel::distributed::Vector &dst, + const parallel::distributed::Vector &src) const + { + ReductionControl solver_control (1e4, 1e-50, 1e-10); + SolverCG > solver_coarse (solver_control); + solver_coarse.solve (*coarse_matrix, dst, src, PreconditionIdentity()); + } + + const MatrixType *coarse_matrix; +}; + + + + +template +void do_test (const DoFHandler &dof) +{ + deallog << "Testing " << dof.get_fe().get_name(); + deallog << std::endl; + deallog << "Number of degrees of freedom: " << dof.n_dofs() << std::endl; + + MappingQ mapping(fe_degree+1); + LaplaceOperator fine_matrix; + std::set dirichlet_boundaries; + dirichlet_boundaries.insert(0); + fine_matrix.initialize(mapping, dof, dirichlet_boundaries); + + parallel::distributed::Vector in, sol; + fine_matrix.initialize_dof_vector(in); + fine_matrix.initialize_dof_vector(sol); + + // set constant rhs vector + in = 1.; + + // set up multigrid in analogy to step-37 + typedef LaplaceOperator LevelMatrixType; + + MGLevelObject mg_matrices; + mg_matrices.resize(0, dof.get_triangulation().n_global_levels()-1); + for (unsigned int level = 0; level zero_function; + typename FunctionMap::type dirichlet_boundary; + dirichlet_boundary[0] = &zero_function; + mg_constrained_dofs.initialize(dof, dirichlet_boundary); + + MGTransferPrebuiltMF mg_transfer(mg_matrices, mg_constrained_dofs); + mg_transfer.build(dof); + + MGCoarseIterative mg_coarse; + mg_coarse.initialize(mg_matrices[0]); + + typedef PreconditionChebyshev > SMOOTHER; + MGSmootherPrecondition > + mg_smoother; + + MGLevelObject smoother_data; + smoother_data.resize(0, dof.get_triangulation().n_global_levels()-1); + for (unsigned int level = 0; level > + mg_matrix(mg_matrices); + + Multigrid > mg(dof, + mg_matrix, + mg_coarse, + mg_transfer, + mg_smoother, + mg_smoother); + PreconditionMG, + MGTransferPrebuiltMF > + preconditioner(dof, mg, mg_transfer); + + { + // avoid output from inner (coarse-level) solver + deallog.depth_file(2); + ReductionControl control(30, 1e-20, 1e-7); + SolverCG > solver(control); + solver.solve(fine_matrix, sol, in, preconditioner); + } +} + + + +template +void test () +{ + for (unsigned int i=5; i<8; ++i) + { + parallel::distributed::Triangulation tria(MPI_COMM_WORLD, + dealii::Triangulation::none,parallel::distributed::Triangulation::construct_multigrid_hierarchy); + GridGenerator::hyper_cube (tria); + tria.refine_global(i-dim); + + FE_Q fe (fe_degree); + DoFHandler dof (tria); + dof.distribute_dofs(fe); + dof.distribute_mg_dofs(fe); + + do_test (dof); + } +} + + + +int main (int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv); + + mpi_initlog(); + deallog.threshold_double(1e-9); + + { + test<2,1,double>(); + test<2,2,float>(); + + test<3,1,double>(); + test<3,2,float>(); + + } +} diff --git a/tests/matrix_free/parallel_multigrid_mf.with_mpi=true.with_p4est=true.mpirun=7.output b/tests/matrix_free/parallel_multigrid_mf.with_mpi=true.with_p4est=true.mpirun=7.output new file mode 100644 index 0000000000..11d2719bc7 --- /dev/null +++ b/tests/matrix_free/parallel_multigrid_mf.with_mpi=true.with_p4est=true.mpirun=7.output @@ -0,0 +1,49 @@ + +DEAL::Testing FE_Q<2>(1) +DEAL::Number of degrees of freedom: 81 +DEAL:cg::Starting value 9.00000 +DEAL:cg::Convergence step 3 value 3.51892e-08 +DEAL::Testing FE_Q<2>(1) +DEAL::Number of degrees of freedom: 289 +DEAL:cg::Starting value 17.0000 +DEAL:cg::Convergence step 3 value 3.00255e-08 +DEAL::Testing FE_Q<2>(1) +DEAL::Number of degrees of freedom: 1089 +DEAL:cg::Starting value 33.0000 +DEAL:cg::Convergence step 3 value 1.23434e-06 +DEAL::Testing FE_Q<2>(2) +DEAL::Number of degrees of freedom: 289 +DEAL:cg::Starting value 17.0000 +DEAL:cg::Convergence step 3 value 2.83605e-07 +DEAL::Testing FE_Q<2>(2) +DEAL::Number of degrees of freedom: 1089 +DEAL:cg::Starting value 33.0000 +DEAL:cg::Convergence step 3 value 7.10474e-07 +DEAL::Testing FE_Q<2>(2) +DEAL::Number of degrees of freedom: 4225 +DEAL:cg::Starting value 65.0000 +DEAL:cg::Convergence step 3 value 1.70919e-06 +DEAL::Testing FE_Q<3>(1) +DEAL::Number of degrees of freedom: 125 +DEAL:cg::Starting value 11.1803 +DEAL:cg::Convergence step 3 value 0 +DEAL::Testing FE_Q<3>(1) +DEAL::Number of degrees of freedom: 729 +DEAL:cg::Starting value 27.0000 +DEAL:cg::Convergence step 3 value 1.30226e-07 +DEAL::Testing FE_Q<3>(1) +DEAL::Number of degrees of freedom: 4913 +DEAL:cg::Starting value 70.0928 +DEAL:cg::Convergence step 3 value 1.26015e-07 +DEAL::Testing FE_Q<3>(2) +DEAL::Number of degrees of freedom: 729 +DEAL:cg::Starting value 27.0000 +DEAL:cg::Convergence step 3 value 2.86664e-08 +DEAL::Testing FE_Q<3>(2) +DEAL::Number of degrees of freedom: 4913 +DEAL:cg::Starting value 70.0928 +DEAL:cg::Convergence step 3 value 1.13132e-06 +DEAL::Testing FE_Q<3>(2) +DEAL::Number of degrees of freedom: 35937 +DEAL:cg::Starting value 189.571 +DEAL:cg::Convergence step 3 value 5.57593e-06 -- 2.39.5