//Forward type declarations to allow MPI sums over tensorial types
template <int rank, int dim, typename Number> class Tensor;
template <int rank, int dim, typename Number> class SymmetricTensor;
-
+template <typename Number> class SparseMatrix;
namespace Utilities
{
sum (const Tensor<rank,dim,Number> &local,
const MPI_Comm &mpi_communicator);
+ /**
+ * Perform an MPI sum of the entries of a SparseMatrix.
+ *
+ * @note @p local and @p global should have the same sparsity
+ * pattern and it should be the same for all MPI processes.
+ *
+ * @relatesalso SparseMatrix
+ */
+ template <typename Number>
+ void
+ sum (const SparseMatrix<Number> &local,
+ const MPI_Comm &mpi_communicator,
+ SparseMatrix<Number> &global);
+
/**
* Return the maximum over all processors of the value @p t. This function
* is collective over all processors given in the
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/lapack_full_matrix.h>
+#include <deal.II/lac/sparse_matrix.h>
#include <vector>
const MPI_Comm &mpi_communicator)
{
Tensor<rank, dim, Number> sums;
- dealii::Utilities::MPI::sum(local, mpi_communicator, sums);
+ sum(local, mpi_communicator, sums);
return sums;
}
entries[i] = local[ local.unrolled_to_component_indices(i) ];
Number global_entries[ SymmetricTensor<rank,dim,Number>::n_independent_components ];
- dealii::Utilities::MPI::sum( entries, mpi_communicator, global_entries );
+ sum( entries, mpi_communicator, global_entries );
SymmetricTensor<rank,dim,Number> global;
for (unsigned int i=0; i< n_entries; ++i)
+ template <typename Number>
+ void
+ sum (const SparseMatrix<Number> &local,
+ const MPI_Comm &mpi_communicator,
+ SparseMatrix<Number> &global)
+ {
+ Assert(local.get_sparsity_pattern() == global.get_sparsity_pattern(),
+ ExcMessage("The sparsity pattern of the local and the global matrices should match."));
+#ifdef DEAL_II_WITH_MPI
+ // makes use of the fact that the matrix stores its data in a
+ // contiguous array.
+ sum(ArrayView<const Number> (&local.val[0], local.n_nonzero_elements()),
+ mpi_communicator,
+ ArrayView<Number>(&global.val[0], global.n_nonzero_elements()));
+#else
+ (void)mpi_communicator;
+ if (!PointerComparison::equal (&local, &global))
+ global = local;
+#endif
+ }
+
+
+
template <typename T>
T max (const T &t,
const MPI_Comm &mpi_communicator)
#include <deal.II/lac/identity_matrix.h>
#include <deal.II/lac/exceptions.h>
#include <deal.II/lac/vector_operation.h>
+#ifdef DEAL_II_WITH_MPI
+#include <mpi.h>
+#endif
#include <memory>
template <typename number> class FullMatrix;
template <typename Matrix> class BlockMatrixBase;
template <typename number> class SparseILU;
+#ifdef DEAL_II_WITH_MPI
+namespace Utilities
+{
+ namespace MPI
+ {
+ template <typename Number> void sum (const SparseMatrix<Number> &, const MPI_Comm &, SparseMatrix<Number> &);
+ }
+}
+#endif
#ifdef DEAL_II_WITH_TRILINOS
namespace TrilinosWrappers
* classes instead, since they are tailored better to a sparse matrix
* structure.
*/
- number operator () (const size_type i,
- const size_type j) const;
+ const number &operator () (const size_type i,
+ const size_type j) const;
+
+ /**
+ * In contrast to the one above, this function allows modifying the object.
+ */
+ number &operator () (const size_type i,
+ const size_type j);
/**
* This function is mostly like operator()() in that it returns the value of
*/
template <typename,bool> friend class SparseMatrixIterators::Iterator;
template <typename,bool> friend class SparseMatrixIterators::Accessor;
+
+#ifdef DEAL_II_WITH_MPI
+ /**
+ * Give access to internal datastructures to perform MPI operations.
+ */
+ template <typename Number> friend void Utilities::MPI::sum (const SparseMatrix<Number> &, const MPI_Comm &, SparseMatrix<Number> &);
+#endif
};
#ifndef DOXYGEN
template <typename number>
inline
-number SparseMatrix<number>::operator () (const size_type i,
- const size_type j) const
+const number &SparseMatrix<number>::operator () (const size_type i,
+ const size_type j) const
+{
+ Assert (cols != nullptr, ExcNotInitialized());
+ Assert (cols->operator()(i,j) != SparsityPattern::invalid_entry,
+ ExcInvalidIndex(i,j));
+ return val[cols->operator()(i,j)];
+}
+
+
+
+template <typename number>
+inline
+number &SparseMatrix<number>::operator () (const size_type i,
+ const size_type j)
{
Assert (cols != nullptr, ExcNotInitialized());
Assert (cols->operator()(i,j) != SparsityPattern::invalid_entry,
// ---------------------------------------------------------------------
+for (S : REAL_SCALARS)
+{
+ template
+ void sum<S> (const SparseMatrix<S> &, const MPI_Comm &, SparseMatrix<S> &);
+}
+
for (S : MPI_SCALARS)
{
template
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 1998 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// test Utilities::MPI::sum() for sparse matrix
+
+#include "../tests.h"
+#include "../testmatrix.h"
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/base/mpi.h>
+
+
+
+void test()
+{
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int size = 50;
+
+ FDMatrix testproblem (size, size);
+ unsigned int dim = (size-1) * (size-1);
+
+ SparsityPattern sparsity(dim, dim, size);
+ testproblem.five_point_structure(sparsity);
+ sparsity.compress();
+
+ SparseMatrix<double> matrix(sparsity);
+ const double val = std::pow(10,myid);
+ for (SparsityPattern::const_iterator it = sparsity.begin();
+ it != sparsity.end(); ++it)
+ {
+ const auto i = (*it).row();
+ const auto j = (*it).column();
+ matrix.add(i, j, -val);
+ matrix.add(i, i, val);
+ }
+
+ // compare with FullMatrix:
+ FullMatrix<double> full(dim,dim);
+ full.copy_from(matrix);
+
+ // deallog << "Local:" << std::endl;
+ // matrix.print(deallog.get_file_stream());
+ Utilities::MPI::sum(matrix, MPI_COMM_WORLD, matrix);
+ // deallog << "Global:" << std::endl;
+ // matrix.print(deallog.get_file_stream());
+
+ Utilities::MPI::sum(full, MPI_COMM_WORLD, full);
+
+ for (SparsityPattern::const_iterator it = sparsity.begin();
+ it != sparsity.end(); ++it)
+ {
+ const auto i = (*it).row();
+ const auto j = (*it).column();
+ AssertThrow (matrix(i,j) == full(i,j),
+ ExcMessage(std::to_string(matrix(i,j)) +
+ " != " +
+ std::to_string(full(i,j)) +
+ " for i=" + std::to_string(i) +
+ " j=" + std::to_string(j)
+ ));
+ }
+
+ deallog << "Ok" << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
+
+ // MPILogInitAll log;
+ // test();
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ {
+ initlog();
+ test();
+ }
+ else
+ test();
+
+ return 0;
+}
--- /dev/null
+
+DEAL::Ok
--- /dev/null
+
+DEAL::Ok