From: Alexander Knieps Date: Sun, 4 Mar 2018 19:52:34 +0000 (+0100) Subject: Added support for PETSc in MGTransferPrebuilt, internal::MatrixSelector (mg_transfer... X-Git-Tag: v9.1.0-rc1~1112^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=70da3c39f5b91ef32da3fb8a01776c1f2489e646;p=dealii.git Added support for PETSc in MGTransferPrebuilt, internal::MatrixSelector (mg_transfer.h), Removed local_columns.n_elements() > 0 assertion in PETScWrappers::MPI::SparseMatrix --- diff --git a/cmake/config/template-arguments.in b/cmake/config/template-arguments.in index fc9466d78d..a41532a296 100644 --- a/cmake/config/template-arguments.in +++ b/cmake/config/template-arguments.in @@ -168,6 +168,7 @@ VECTORS_WITH_MATRIX := { Vector; @DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@; @DEAL_II_EXPAND_EPETRA_VECTOR@; + @DEAL_II_EXPAND_PETSC_MPI_VECTOR@; } // Matrices diff --git a/doc/news/changes/minor/20180521AlexanderKnieps b/doc/news/changes/minor/20180521AlexanderKnieps new file mode 100644 index 0000000000..be296b5de4 --- /dev/null +++ b/doc/news/changes/minor/20180521AlexanderKnieps @@ -0,0 +1,5 @@ +Improved: MGTransferPrebuilt now supports +PETScWrappers::MPI::Vector and +PETScWrappers::MPI::SparseMatrix. +
+(Alexander Knieps, 2018/05/21) diff --git a/include/deal.II/multigrid/mg_transfer.h b/include/deal.II/multigrid/mg_transfer.h index 2608dedbf8..4067f4716b 100644 --- a/include/deal.II/multigrid/mg_transfer.h +++ b/include/deal.II/multigrid/mg_transfer.h @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include @@ -47,6 +49,8 @@ namespace internal typedef ::dealii::SparsityPattern Sparsity; typedef ::dealii::SparseMatrix Matrix; + static const bool requires_distributed_sparsity_pattern = false; + template static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const SparsityPatternType &sp, const DoFHandlerType &) { @@ -63,6 +67,8 @@ namespace internal typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity; typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix; + static const bool requires_distributed_sparsity_pattern = false; + template static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh) { @@ -86,6 +92,8 @@ namespace internal typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity; typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix; + static const bool requires_distributed_sparsity_pattern = false; + template static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh) { @@ -109,6 +117,8 @@ namespace internal typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity; typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix; + static const bool requires_distributed_sparsity_pattern = false; + template static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, DoFHandlerType &dh) { @@ -133,6 +143,8 @@ namespace internal typedef ::dealii::SparsityPattern Sparsity; typedef ::dealii::SparseMatrix Matrix; + static const bool requires_distributed_sparsity_pattern = false; + template static void reinit(Matrix &, Sparsity &, int, const SparsityPatternType &, const DoFHandlerType &) { @@ -143,6 +155,33 @@ namespace internal }; #endif + +#ifdef DEAL_II_WITH_PETSC + template <> + struct MatrixSelector + { + typedef ::dealii::DynamicSparsityPattern Sparsity; + typedef ::dealii::PETScWrappers::MPI::SparseMatrix Matrix; + + static const bool requires_distributed_sparsity_pattern = true; + + template + static void reinit(Matrix &matrix, Sparsity &, int level, const SparsityPatternType &sp, const DoFHandlerType &dh) + { + const parallel::Triangulation *dist_tria = + dynamic_cast*> + (&(dh.get_triangulation())); + MPI_Comm communicator = dist_tria != nullptr ? + dist_tria->get_communicator() : + MPI_COMM_SELF; + // Reinit PETSc matrix + matrix.reinit(dh.locally_owned_mg_dofs(level+1), + dh.locally_owned_mg_dofs(level), + sp, communicator); + } + + }; +#endif } /* diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index 49c72ebc41..1130d794b9 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -152,6 +153,31 @@ namespace } } #endif + +#ifdef DEAL_II_WITH_PETSC + /** + * Adjust vectors on all levels to correct size. Here, we just count the + * numbers of degrees of freedom on each level and @p reinit each level + * vector to this length. + */ + template + void + reinit_vector (const dealii::DoFHandler &mg_dof, + const std::vector &, + MGLevelObject &v) + { + const dealii::parallel::Triangulation *tria = + (dynamic_cast*> + (&mg_dof.get_triangulation())); + AssertThrow(tria!=nullptr, ExcMessage("multigrid with parallel PETSc vectors only works with a parallel Triangulation!")); + + for (unsigned int level=v.min_level(); + level<=v.max_level(); ++level) + { + v[level].reinit(mg_dof.locally_owned_mg_dofs(level), tria->get_communicator()); + } + } +#endif } diff --git a/source/lac/petsc_parallel_sparse_matrix.cc b/source/lac/petsc_parallel_sparse_matrix.cc index 08ac98f6df..85f34f07e4 100644 --- a/source/lac/petsc_parallel_sparse_matrix.cc +++ b/source/lac/petsc_parallel_sparse_matrix.cc @@ -382,7 +382,6 @@ namespace PETScWrappers //if (preset_nonzero_locations == true) if (local_rows.n_elements()>0) { - Assert(local_columns.n_elements()>0, ExcInternalError()); // MatMPIAIJSetPreallocationCSR // can be used to allocate the sparsity // pattern of a matrix diff --git a/source/multigrid/mg_level_global_transfer.inst.in b/source/multigrid/mg_level_global_transfer.inst.in index d9adbd8cee..d67f14254b 100644 --- a/source/multigrid/mg_level_global_transfer.inst.in +++ b/source/multigrid/mg_level_global_transfer.inst.in @@ -81,4 +81,16 @@ for(deal_II_dimension : DIMENSIONS) MGLevelGlobalTransfer::copy_from_mg_add (const DoFHandler&, TrilinosWrappers::MPI::Vector&, const MGLevelObject&) const; #endif + +#ifdef DEAL_II_WITH_PETSC + template void + MGLevelGlobalTransfer::copy_to_mg ( + const DoFHandler&, MGLevelObject&, const PETScWrappers::MPI::Vector&) const; + template void + MGLevelGlobalTransfer::copy_from_mg (const DoFHandler&, PETScWrappers::MPI::Vector&, + const MGLevelObject&) const; + template void + MGLevelGlobalTransfer::copy_from_mg_add (const DoFHandler&, PETScWrappers::MPI::Vector&, + const MGLevelObject&) const; +#endif } diff --git a/source/multigrid/mg_transfer_prebuilt.cc b/source/multigrid/mg_transfer_prebuilt.cc index dffa44fdb7..b2b0f6f331 100644 --- a/source/multigrid/mg_transfer_prebuilt.cc +++ b/source/multigrid/mg_transfer_prebuilt.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -236,6 +237,41 @@ void MGTransferPrebuilt::build_matrices } } +#ifdef DEAL_II_WITH_MPI + if (internal::MatrixSelector::requires_distributed_sparsity_pattern) + { + // Since PETSc matrices do not offer the functionality to fill up in- + // complete sparsity patterns on their own, the sparsity pattern must be + // manually distributed. + + // Retrieve communicator from triangulation if it is parallel + const parallel::Triangulation *dist_tria = + dynamic_cast*> + (&(mg_dof.get_triangulation())); + + MPI_Comm communicator = dist_tria != nullptr ? + dist_tria->get_communicator() : + MPI_COMM_SELF; + + // Compute # of locally owned MG dofs / processor for distribution + const std::vector<::dealii::IndexSet> &locally_owned_mg_dofs_per_processor = mg_dof.locally_owned_mg_dofs_per_processor(level+1); + std::vector<::dealii::types::global_dof_index> n_locally_owned_mg_dofs_per_processor(locally_owned_mg_dofs_per_processor.size(), 0); + + for (size_t index = 0; index < n_locally_owned_mg_dofs_per_processor.size(); ++index) + { + n_locally_owned_mg_dofs_per_processor[index] = locally_owned_mg_dofs_per_processor[index].n_elements(); + } + + // Distribute sparsity pattern + ::dealii::SparsityTools::distribute_sparsity_pattern( + dsp, + n_locally_owned_mg_dofs_per_processor, + communicator, + dsp.row_index_set() + ); + } +#endif + internal::MatrixSelector::reinit(*prolongation_matrices[level], *prolongation_sparsities[level], level, diff --git a/tests/multigrid/transfer_04a.cc b/tests/multigrid/transfer_04a.cc index b4ec2728aa..3ca8048876 100644 --- a/tests/multigrid/transfer_04a.cc +++ b/tests/multigrid/transfer_04a.cc @@ -14,7 +14,7 @@ // --------------------------------------------------------------------- -// check mg transfer in parallel +// check mg transfer in parallel for trilinos vectors #include "../tests.h" #include diff --git a/tests/multigrid/transfer_04b.cc b/tests/multigrid/transfer_04b.cc new file mode 100644 index 0000000000..dcc0a621c1 --- /dev/null +++ b/tests/multigrid/transfer_04b.cc @@ -0,0 +1,182 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2006 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// check mg transfer in parallel for PETSc vectors + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +using namespace std; + +template +void setup_tria(parallel::distributed::Triangulation &tr) +{ + GridGenerator::hyper_cube(tr); + tr.refine_global(1); + + for (typename parallel::distributed::Triangulation::active_cell_iterator cell = tr.begin_active(); + cell != tr.end(); ++cell) + { + if (cell->id().to_string() != "0_1:3") + cell->set_refine_flag(); + } + tr.execute_coarsening_and_refinement(); + + for (typename parallel::distributed::Triangulation::active_cell_iterator cell = tr.begin_active(); + cell != tr.end(); ++cell) + { + if (cell->id().to_string() == "0_2:00" + || cell->id().to_string() == "0_2:01" + || cell->id().to_string() == "0_2:02") + cell->set_refine_flag(); + } + tr.execute_coarsening_and_refinement(); + + + for (typename parallel::distributed::Triangulation::cell_iterator cell = tr.begin(); + cell != tr.end(); ++cell) + { + deallog << "cell=" << cell->id() + << " level_subdomain_id=" << cell->level_subdomain_id() + << std::endl; + } +} + + + + + + + + + +template +void check_fe(FiniteElement &fe) +{ + deallog << fe.get_name() << std::endl; + + parallel::distributed::Triangulation tr(MPI_COMM_WORLD, + Triangulation::limit_level_difference_at_vertices, + parallel::distributed::Triangulation::construct_multigrid_hierarchy); + setup_tria(tr); + + if (false) + { + DataOut data_out; + Vector subdomain (tr.n_active_cells()); + for (unsigned int i=0; i zero; + typename FunctionMap::type fmap; + fmap.insert(std::make_pair(0, &zero)); + + DoFHandler dofh(tr); + dofh.distribute_dofs(fe); + dofh.distribute_mg_dofs(fe); + typedef PETScWrappers::MPI::Vector vector_t; + { + + + + } + MGTransferPrebuilt transfer; + transfer.build_matrices(dofh); + transfer.print_indices(deallog.get_file_stream()); + + MGLevelObject u(0, tr.n_global_levels()-1); + for (unsigned int level=u.min_level(); level<=u.max_level(); ++level) + { + u[level].reinit(dofh.locally_owned_mg_dofs(level), MPI_COMM_WORLD); + for (unsigned int i=0; i +void check() +{ + FE_Q q1(1); + FE_Q q2(2); +// FE_DGQ dq1(1); + + FESystem s1(q1, 2, q2,1); + + check_fe(q1); + // check_fe(q2); + //check_fe(s1); +} + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + check<2> (); + //check<3> (); +} diff --git a/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=1.debug.output b/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=1.debug.output new file mode 100644 index 0000000000..86ad6eb4a4 --- /dev/null +++ b/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=1.debug.output @@ -0,0 +1,75 @@ + +DEAL:0::FE_Q<2>(1) +DEAL:0::cell=0_0: level_subdomain_id=0 +DEAL:0::cell=0_1:0 level_subdomain_id=0 +DEAL:0::cell=0_1:1 level_subdomain_id=0 +DEAL:0::cell=0_1:2 level_subdomain_id=0 +DEAL:0::cell=0_1:3 level_subdomain_id=0 +DEAL:0::cell=0_2:00 level_subdomain_id=0 +DEAL:0::cell=0_2:01 level_subdomain_id=0 +DEAL:0::cell=0_2:02 level_subdomain_id=0 +DEAL:0::cell=0_2:03 level_subdomain_id=0 +DEAL:0::cell=0_2:10 level_subdomain_id=0 +DEAL:0::cell=0_2:11 level_subdomain_id=0 +DEAL:0::cell=0_2:12 level_subdomain_id=0 +DEAL:0::cell=0_2:13 level_subdomain_id=0 +DEAL:0::cell=0_2:20 level_subdomain_id=0 +DEAL:0::cell=0_2:21 level_subdomain_id=0 +DEAL:0::cell=0_2:22 level_subdomain_id=0 +DEAL:0::cell=0_2:23 level_subdomain_id=0 +DEAL:0::cell=0_3:000 level_subdomain_id=0 +DEAL:0::cell=0_3:001 level_subdomain_id=0 +DEAL:0::cell=0_3:002 level_subdomain_id=0 +DEAL:0::cell=0_3:003 level_subdomain_id=0 +DEAL:0::cell=0_3:010 level_subdomain_id=0 +DEAL:0::cell=0_3:011 level_subdomain_id=0 +DEAL:0::cell=0_3:012 level_subdomain_id=0 +DEAL:0::cell=0_3:013 level_subdomain_id=0 +DEAL:0::cell=0_3:020 level_subdomain_id=0 +DEAL:0::cell=0_3:021 level_subdomain_id=0 +DEAL:0::cell=0_3:022 level_subdomain_id=0 +DEAL:0::cell=0_3:023 level_subdomain_id=0 +copy_indices[1] 0 3 +copy_indices[1] 1 5 +copy_indices[1] 2 7 +copy_indices[1] 3 8 +copy_indices[2] 0 8 +copy_indices[2] 1 14 +copy_indices[2] 2 20 +copy_indices[2] 4 3 +copy_indices[2] 5 5 +copy_indices[2] 6 7 +copy_indices[2] 7 4 +copy_indices[2] 8 9 +copy_indices[2] 9 10 +copy_indices[2] 10 11 +copy_indices[2] 11 12 +copy_indices[2] 12 13 +copy_indices[2] 13 6 +copy_indices[2] 14 15 +copy_indices[2] 15 16 +copy_indices[2] 16 17 +copy_indices[2] 17 18 +copy_indices[2] 18 19 +copy_indices[3] 4 8 +copy_indices[3] 5 14 +copy_indices[3] 6 20 +copy_indices[3] 7 11 +copy_indices[3] 13 18 +copy_indices[3] 19 0 +copy_indices[3] 20 1 +copy_indices[3] 21 2 +copy_indices[3] 22 3 +copy_indices[3] 23 4 +copy_indices[3] 24 5 +copy_indices[3] 25 6 +copy_indices[3] 26 7 +copy_indices[3] 27 9 +copy_indices[3] 28 10 +copy_indices[3] 29 12 +copy_indices[3] 30 13 +copy_indices[3] 31 15 +copy_indices[3] 32 16 +copy_indices[3] 33 17 +copy_indices[3] 34 19 +DEAL:0::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok diff --git a/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=2.debug.output b/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=2.debug.output new file mode 100644 index 0000000000..c9a3860e32 --- /dev/null +++ b/tests/multigrid/transfer_04b.with_trilinos=true.mpirun=2.debug.output @@ -0,0 +1,109 @@ + +DEAL:0::FE_Q<2>(1) +DEAL:0::cell=0_0: level_subdomain_id=0 +DEAL:0::cell=0_1:0 level_subdomain_id=0 +DEAL:0::cell=0_1:1 level_subdomain_id=1 +DEAL:0::cell=0_1:2 level_subdomain_id=1 +DEAL:0::cell=0_1:3 level_subdomain_id=1 +DEAL:0::cell=0_2:00 level_subdomain_id=0 +DEAL:0::cell=0_2:01 level_subdomain_id=0 +DEAL:0::cell=0_2:02 level_subdomain_id=0 +DEAL:0::cell=0_2:03 level_subdomain_id=1 +DEAL:0::cell=0_2:10 level_subdomain_id=1 +DEAL:0::cell=0_2:11 level_subdomain_id=4294967294 +DEAL:0::cell=0_2:12 level_subdomain_id=1 +DEAL:0::cell=0_2:13 level_subdomain_id=4294967294 +DEAL:0::cell=0_2:20 level_subdomain_id=1 +DEAL:0::cell=0_2:21 level_subdomain_id=1 +DEAL:0::cell=0_2:22 level_subdomain_id=4294967294 +DEAL:0::cell=0_2:23 level_subdomain_id=4294967294 +DEAL:0::cell=0_3:000 level_subdomain_id=0 +DEAL:0::cell=0_3:001 level_subdomain_id=0 +DEAL:0::cell=0_3:002 level_subdomain_id=0 +DEAL:0::cell=0_3:003 level_subdomain_id=0 +DEAL:0::cell=0_3:010 level_subdomain_id=0 +DEAL:0::cell=0_3:011 level_subdomain_id=0 +DEAL:0::cell=0_3:012 level_subdomain_id=0 +DEAL:0::cell=0_3:013 level_subdomain_id=0 +DEAL:0::cell=0_3:020 level_subdomain_id=0 +DEAL:0::cell=0_3:021 level_subdomain_id=0 +DEAL:0::cell=0_3:022 level_subdomain_id=0 +DEAL:0::cell=0_3:023 level_subdomain_id=0 +copy_indices[2] 8 3 +copy_indices[2] 11 4 +copy_indices[2] 14 5 +copy_indices[2] 18 6 +copy_indices[2] 20 7 +copy_indices[3] 0 0 +copy_indices[3] 1 1 +copy_indices[3] 2 2 +copy_indices[3] 3 3 +copy_indices[3] 4 4 +copy_indices[3] 5 5 +copy_indices[3] 6 6 +copy_indices[3] 7 7 +copy_indices[3] 8 8 +copy_indices[3] 9 9 +copy_indices[3] 10 10 +copy_indices[3] 11 11 +copy_indices[3] 12 12 +copy_indices[3] 13 13 +copy_indices[3] 14 14 +copy_indices[3] 15 15 +copy_indices[3] 16 16 +copy_indices[3] 17 17 +copy_indices[3] 18 18 +copy_indices[3] 19 19 +copy_indices[3] 20 20 +copy_ifrom [1] 21 3 +DEAL:0::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok + +DEAL:1::FE_Q<2>(1) +DEAL:1::cell=0_0: level_subdomain_id=0 +DEAL:1::cell=0_1:0 level_subdomain_id=0 +DEAL:1::cell=0_1:1 level_subdomain_id=1 +DEAL:1::cell=0_1:2 level_subdomain_id=1 +DEAL:1::cell=0_1:3 level_subdomain_id=1 +DEAL:1::cell=0_2:00 level_subdomain_id=0 +DEAL:1::cell=0_2:01 level_subdomain_id=0 +DEAL:1::cell=0_2:02 level_subdomain_id=0 +DEAL:1::cell=0_2:03 level_subdomain_id=1 +DEAL:1::cell=0_2:10 level_subdomain_id=1 +DEAL:1::cell=0_2:11 level_subdomain_id=1 +DEAL:1::cell=0_2:12 level_subdomain_id=1 +DEAL:1::cell=0_2:13 level_subdomain_id=1 +DEAL:1::cell=0_2:20 level_subdomain_id=1 +DEAL:1::cell=0_2:21 level_subdomain_id=1 +DEAL:1::cell=0_2:22 level_subdomain_id=1 +DEAL:1::cell=0_2:23 level_subdomain_id=1 +DEAL:1::cell=0_3:000 level_subdomain_id=4294967294 +DEAL:1::cell=0_3:001 level_subdomain_id=4294967294 +DEAL:1::cell=0_3:002 level_subdomain_id=4294967294 +DEAL:1::cell=0_3:003 level_subdomain_id=0 +DEAL:1::cell=0_3:010 level_subdomain_id=4294967294 +DEAL:1::cell=0_3:011 level_subdomain_id=0 +DEAL:1::cell=0_3:012 level_subdomain_id=0 +DEAL:1::cell=0_3:013 level_subdomain_id=0 +DEAL:1::cell=0_3:020 level_subdomain_id=4294967294 +DEAL:1::cell=0_3:021 level_subdomain_id=0 +DEAL:1::cell=0_3:022 level_subdomain_id=0 +DEAL:1::cell=0_3:023 level_subdomain_id=0 +copy_indices[1] 22 5 +copy_indices[1] 23 7 +copy_indices[1] 24 8 +copy_indices[2] 21 8 +copy_indices[2] 22 14 +copy_indices[2] 23 20 +copy_indices[2] 25 9 +copy_indices[2] 26 10 +copy_indices[2] 27 11 +copy_indices[2] 28 12 +copy_indices[2] 29 13 +copy_indices[2] 30 15 +copy_indices[2] 31 16 +copy_indices[2] 32 17 +copy_indices[2] 33 18 +copy_indices[2] 34 19 +copy_ito [1] 21 3 +DEAL:1::1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 1.00000 ok +