From f531c9dd7c425d93c916e6d9170b107a72845286 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Tue, 24 Jun 2014 07:21:44 +0000 Subject: [PATCH] Tentatively provide fix that chooses thread-safe assembly code path also in other cases with Trilinos version >= 11.9. However, it requires one more fix in Trilinos. git-svn-id: https://svn.dealii.org/trunk@33082 0785d39b-7218-0410-832d-ea1e28bc413d --- .../deal.II/lac/trilinos_sparse_matrix.h | 6 +- .../source/lac/trilinos_sparsity_pattern.cc | 29 +- tests/trilinos/assemble_matrix_parallel_07.cc | 462 ++++++++++++++++++ ...ssemble_matrix_parallel_07.mpirun=4.output | 13 + 4 files changed, 497 insertions(+), 13 deletions(-) create mode 100644 tests/trilinos/assemble_matrix_parallel_07.cc create mode 100644 tests/trilinos/assemble_matrix_parallel_07.mpirun=4.output diff --git a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h index 21b2f10545..4d2fdea844 100644 --- a/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/trilinos_sparse_matrix.h @@ -470,7 +470,11 @@ namespace TrilinosWrappers * TrilinosWrappers::SparsityPattern object that in turn has been * initialized with the reinit function specifying three index sets, one * for the rows, one for the columns and for the larger set of @p - * writeable_rows, and the operation is an addition. + * writeable_rows, and the operation is an addition. If Trilinos version + * 11.10 and greater is used, initializing from a + * TrilinosWrappers::SparsityPattern that has been filled by a function + * similar to DoFTools::make_sparsity_pattern always results in a matrix + * that allows several processes to write into the same matrix row. * * * Note that all other reinit methods and constructors of diff --git a/deal.II/source/lac/trilinos_sparsity_pattern.cc b/deal.II/source/lac/trilinos_sparsity_pattern.cc index 072b1ceaa2..a9991e2666 100644 --- a/deal.II/source/lac/trilinos_sparsity_pattern.cc +++ b/deal.II/source/lac/trilinos_sparsity_pattern.cc @@ -354,19 +354,20 @@ namespace TrilinosWrappers graph.reset (); column_space_map.reset (new Epetra_Map (input_col_map)); - // for more than one processor, need to - // specify only row map first and let the - // matrix entries decide about the column - // map (which says which columns are - // present in the matrix, not to be - // confused with the col_map that tells - // how the domain dofs of the matrix will - // be distributed). for only one - // processor, we can directly assign the - // columns as well. + // for more than one processor, need to specify only row map first and let + // the matrix entries decide about the column map (which says which + // columns are present in the matrix, not to be confused with the col_map + // that tells how the domain dofs of the matrix will be distributed). for + // only one processor, we can directly assign the columns as well. If we + // use a recent Trilinos version, we can also require building a non-local + // graph which gives us thread-safe initialization. if (input_row_map.Comm().NumProc() > 1) graph.reset (new Epetra_FECrsGraph(Copy, input_row_map, - n_entries_per_row, false)); + n_entries_per_row, false +#if DEAL_II_TRILINOS_VERSION_GTE(11,9,0) + , true +#endif + )); else graph.reset (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map, n_entries_per_row, false)); @@ -413,7 +414,11 @@ namespace TrilinosWrappers if (input_row_map.Comm().NumProc() > 1) graph.reset(new Epetra_FECrsGraph(Copy, input_row_map, n_entries_per_row[min_my_gid(input_row_map)], - false)); + false +#if DEAL_II_TRILINOS_VERSION_GTE(11,9,0) + , true +#endif + )); else graph.reset(new Epetra_FECrsGraph(Copy, input_row_map, input_col_map, n_entries_per_row[max_my_gid(input_row_map)], diff --git a/tests/trilinos/assemble_matrix_parallel_07.cc b/tests/trilinos/assemble_matrix_parallel_07.cc new file mode 100644 index 0000000000..b798296cdf --- /dev/null +++ b/tests/trilinos/assemble_matrix_parallel_07.cc @@ -0,0 +1,462 @@ +// --------------------------------------------------------------------- +// $Id$ +// +// Copyright (C) 2009 - 2014 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// tests parallel assembly of Trilinos matrices when also using MPI using a +// different code path for newer Trilinos versions + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +std::ofstream logfile("output"); + +using namespace dealii; + + +namespace Assembly +{ + namespace Scratch + { + template + struct Data + { + Data (const FiniteElement &fe, + const Quadrature &quadrature) + : + fe_values(fe, + quadrature, + update_values | update_gradients | + update_quadrature_points | update_JxW_values) + {} + + Data (const Data &data) + : + fe_values(data.fe_values.get_mapping(), + data.fe_values.get_fe(), + data.fe_values.get_quadrature(), + data.fe_values.get_update_flags()) + {} + + FEValues fe_values; + }; + } + + namespace Copy + { + struct Data + { + Data(const bool assemble_reference) + : + assemble_reference(assemble_reference) + {} + std::vector local_dof_indices; + FullMatrix local_matrix; + Vector local_rhs; + const bool assemble_reference; + }; + } +} + +template +class LaplaceProblem +{ +public: + LaplaceProblem (); + ~LaplaceProblem (); + + void run (); + +private: + void setup_system (); + void test_equality (); + void assemble_reference (); + void assemble_test (); + void solve (); + void create_coarse_grid (); + void postprocess (); + + void local_assemble (const FilteredIterator::active_cell_iterator> &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data); + void copy_local_to_global (const Assembly::Copy::Data &data); + + std::vector + get_conflict_indices (FilteredIterator::active_cell_iterator> const &cell) const; + + parallel::distributed::Triangulation triangulation; + + DoFHandler dof_handler; + FE_Q fe; + QGauss quadrature; + + ConstraintMatrix constraints; + + TrilinosWrappers::SparseMatrix reference_matrix; + TrilinosWrappers::SparseMatrix test_matrix; + + TrilinosWrappers::MPI::Vector reference_rhs; + TrilinosWrappers::MPI::Vector test_rhs; + + std::vector::active_cell_iterator> > > graph; +}; + + + +template +class BoundaryValues : public Function +{ +public: + BoundaryValues () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +BoundaryValues::value (const Point &p, + const unsigned int /*component*/) const +{ + double sum = 0; + for (unsigned int d=0; d +class RightHandSide : public Function +{ +public: + RightHandSide () : Function () {} + + virtual double value (const Point &p, + const unsigned int component) const; +}; + + +template +double +RightHandSide::value (const Point &p, + const unsigned int /*component*/) const +{ + double product = 1; + for (unsigned int d=0; d +LaplaceProblem::LaplaceProblem () + : + triangulation (MPI_COMM_WORLD), + dof_handler (triangulation), + fe (1), + quadrature(fe.degree+1) +{ +} + + +template +LaplaceProblem::~LaplaceProblem () +{ + dof_handler.clear (); +} + + + +template +std::vector +LaplaceProblem:: +get_conflict_indices (FilteredIterator::active_cell_iterator> const &cell) const +{ + std::vector local_dof_indices(cell->get_fe().dofs_per_cell); + cell->get_dof_indices(local_dof_indices); + + constraints.resolve_indices(local_dof_indices); + return local_dof_indices; +} + +template +void LaplaceProblem::setup_system () +{ + reference_matrix.clear(); + test_matrix.clear(); + dof_handler.distribute_dofs (fe); + + constraints.clear (); + + DoFTools::make_hanging_node_constraints (dof_handler, constraints); + + // add boundary conditions as inhomogeneous constraints here, do it after + // having added the hanging node constraints in order to be consistent and + // skip dofs that are already constrained (i.e., are hanging nodes on the + // boundary in 3D). In contrast to step-27, we choose a sine function. + VectorTools::interpolate_boundary_values (dof_handler, + 0, + BoundaryValues(), + constraints); + constraints.close (); + + typedef FilteredIterator::active_cell_iterator> CellFilter; + CellFilter begin(IteratorFilters::LocallyOwnedCell(),dof_handler.begin_active()); + CellFilter end(IteratorFilters::LocallyOwnedCell(),dof_handler.end()); + graph = GraphColoring::make_graph_coloring(begin,end, + static_cast + (FilteredIterator::active_cell_iterator> const &)> > + (std_cxx1x::bind(&LaplaceProblem::get_conflict_indices, this,std_cxx1x::_1))); + + IndexSet locally_owned = dof_handler.locally_owned_dofs(); + { + TrilinosWrappers::SparsityPattern csp; + csp.reinit(locally_owned, locally_owned, MPI_COMM_WORLD); + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, false); + csp.compress(); + reference_matrix.reinit (csp); + reference_rhs.reinit (locally_owned, MPI_COMM_WORLD); + } + { + TrilinosWrappers::SparsityPattern csp; + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dof_handler, relevant_set); +#if DEAL_II_TRILINOS_VERSION_GTE(11,9,0) + // Cannot pre-build sparsity pattern, Trilinos must provide it... + csp.reinit(locally_owned, locally_owned, MPI_COMM_WORLD); +#else + // OK, Trilinos not new enough - use exactly the same as + // assemble_matrix_parallel_02.cc + csp.reinit(locally_owned, locally_owned, relevant_set, MPI_COMM_WORLD); +#endif + DoFTools::make_sparsity_pattern (dof_handler, csp, + constraints, false); + csp.compress(); + test_matrix.reinit (csp); + test_rhs.reinit (locally_owned, relevant_set, MPI_COMM_WORLD, true); + } +} + + + +template +void +LaplaceProblem::local_assemble (const FilteredIterator::active_cell_iterator> &cell, + Assembly::Scratch::Data &scratch, + Assembly::Copy::Data &data) +{ + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + + data.local_matrix.reinit (dofs_per_cell, dofs_per_cell); + data.local_matrix = 0; + + data.local_rhs.reinit (dofs_per_cell); + data.local_rhs = 0; + + scratch.fe_values.reinit (cell); + + const FEValues &fe_values = scratch.fe_values; + + const RightHandSide rhs_function; + + for (unsigned int q_point=0; + q_pointget_dof_indices (data.local_dof_indices); +} + + + +template +void +LaplaceProblem::copy_local_to_global (const Assembly::Copy::Data &data) +{ + if (data.assemble_reference) + constraints.distribute_local_to_global(data.local_matrix, data.local_rhs, + data.local_dof_indices, + reference_matrix, reference_rhs); + else + constraints.distribute_local_to_global(data.local_matrix, data.local_rhs, + data.local_dof_indices, + test_matrix, test_rhs); +} + + + +template +void LaplaceProblem::assemble_reference () +{ + reference_matrix = 0; + reference_rhs = 0; + + Assembly::Copy::Data copy_data(true); + Assembly::Scratch::Data assembly_data(fe, quadrature); + + for (unsigned int color=0; color::active_cell_iterator> >::const_iterator p = graph[color].begin(); + p != graph[color].end(); ++p) + { + local_assemble(*p, assembly_data, copy_data); + copy_local_to_global(copy_data); + } + reference_matrix.compress(VectorOperation::add); + reference_rhs.compress(VectorOperation::add); +} + + + +template +void LaplaceProblem::assemble_test () +{ + test_matrix = 0; + test_rhs = 0; + + WorkStream:: + run (graph, + std_cxx1x::bind (&LaplaceProblem:: + local_assemble, + this, + std_cxx1x::_1, + std_cxx1x::_2, + std_cxx1x::_3), + std_cxx1x::bind (&LaplaceProblem:: + copy_local_to_global, + this, + std_cxx1x::_1), + Assembly::Scratch::Data(fe, quadrature), + Assembly::Copy::Data (false), + 2*multithread_info.n_threads(), + 1); + test_matrix.compress(VectorOperation::add); + test_matrix.compress(VectorOperation::add); + test_rhs.compress(VectorOperation::add); + + test_matrix.add(-1, reference_matrix); + + // there should not even be roundoff difference between matrices + deallog.threshold_double(1.e-30); + deallog << "error in matrix: " << test_matrix.frobenius_norm() << std::endl; + test_rhs.add(-1., reference_rhs); + deallog << "error in vector: " << test_rhs.l2_norm() << std::endl; +} + + + +template +void LaplaceProblem::postprocess () +{ + Vector estimated_error_per_cell (triangulation.n_active_cells()); + for (unsigned int i=0; i +void LaplaceProblem::run () +{ + for (unsigned int cycle=0; cycle<3; ++cycle) + { + if (cycle == 0) + { + GridGenerator::hyper_cube(triangulation, 0, 1); + triangulation.refine_global(6); + } + + setup_system (); + + assemble_reference (); + assemble_test (); + assemble_test (); + + if (cycle < 2) + postprocess (); + } +} + + + +int main (int argc, char **argv) +{ + deallog << std::setprecision (2); + logfile << std::setprecision (2); + deallog.attach(logfile); + deallog.depth_console(0); + + Utilities::MPI::MPI_InitFinalize init(argc, argv, numbers::invalid_unsigned_int); + + { + deallog.push("2d"); + LaplaceProblem<2> laplace_problem; + laplace_problem.run (); + deallog.pop(); + } +} + diff --git a/tests/trilinos/assemble_matrix_parallel_07.mpirun=4.output b/tests/trilinos/assemble_matrix_parallel_07.mpirun=4.output new file mode 100644 index 0000000000..abd7460d70 --- /dev/null +++ b/tests/trilinos/assemble_matrix_parallel_07.mpirun=4.output @@ -0,0 +1,13 @@ + +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 +DEAL:2d::error in matrix: 0 +DEAL:2d::error in vector: 0 -- 2.39.5