#include <deal.II/base/tensor.h>
#include <deal.II/base/symmetric_tensor.h>
#include <deal.II/lac/vector.h>
+#include <deal.II/lac/full_matrix.h>
#include <vector>
}
+
inline MPI_Datatype mpi_type_id (const long int *)
{
return MPI_LONG;
}
+
inline MPI_Datatype mpi_type_id (const unsigned int *)
{
return MPI_UNSIGNED;
}
+
inline MPI_Datatype mpi_type_id (const unsigned long int *)
{
return MPI_UNSIGNED_LONG;
}
+
inline MPI_Datatype mpi_type_id (const unsigned long long int *)
{
return MPI_UNSIGNED_LONG_LONG;
}
+
inline MPI_Datatype mpi_type_id (const float *)
{
return MPI_FLOAT;
}
+
inline MPI_Datatype mpi_type_id (const double *)
{
return MPI_DOUBLE;
}
+
inline MPI_Datatype mpi_type_id (const long double *)
{
return MPI_LONG_DOUBLE;
}
#endif
+
+
template <typename T>
void all_reduce (const MPI_Op &mpi_op,
const T *const values,
}
}
+
+
template <typename T>
void all_reduce (const MPI_Op &mpi_op,
const std::complex<T> *const values,
}
}
+
+
template <typename T>
T all_reduce (const MPI_Op &mpi_op,
const T &t,
return output;
}
+
+
template <typename T>
void all_reduce (const MPI_Op &mpi_op,
const std::vector<T> &values,
all_reduce(mpi_op, &values[0], mpi_communicator, &output[0], values.size());
}
+
+
template <typename T>
void all_reduce (const MPI_Op &mpi_op,
const Vector<T> &values,
ExcDimensionMismatch(values.size(), output.size()));
all_reduce(mpi_op, values.begin(), mpi_communicator, output.begin(), values.size());
}
+
+
+
+ template <typename T>
+ void all_reduce (const MPI_Op &mpi_op,
+ const FullMatrix<T> &values,
+ const MPI_Comm &mpi_communicator,
+ FullMatrix<T> &output)
+ {
+ Assert(values.m() == output.m(),
+ ExcDimensionMismatch(values.m(), output.m()));
+ Assert(values.n() == output.n(),
+ ExcDimensionMismatch(values.n(), output.n()));
+ all_reduce(mpi_op, &values[0][0], mpi_communicator, &output[0][0], values.m() * values.n());
+ }
+
}
+
template <typename T>
T sum (const T &t,
const MPI_Comm &mpi_communicator)
}
+
template <typename T>
void sum (const std::vector<T> &values,
const MPI_Comm &mpi_communicator,
internal::all_reduce(MPI_SUM, values, mpi_communicator, sums);
}
+
+
template <typename T>
void sum (const Vector<T> &values,
const MPI_Comm &mpi_communicator,
}
+
+ template <typename T>
+ void sum (const FullMatrix<T> &values,
+ const MPI_Comm &mpi_communicator,
+ FullMatrix<T> &sums)
+ {
+ internal::all_reduce(MPI_SUM, values, mpi_communicator, sums);
+ }
+
+
+
template <int rank, int dim, typename Number>
Tensor<rank,dim,Number>
sum (const Tensor<rank,dim,Number> &local,
return global;
}
+
+
template <int rank, int dim, typename Number>
SymmetricTensor<rank,dim,Number>
sum (const SymmetricTensor<rank,dim,Number> &local,
return global;
}
+
+
template <typename T>
T max (const T &t,
const MPI_Comm &mpi_communicator)
}
+
template <typename T>
void max (const std::vector<T> &values,
const MPI_Comm &mpi_communicator,
}
+
template <typename T>
T min (const T &t,
const MPI_Comm &mpi_communicator)
}
+
template <typename T>
void min (const std::vector<T> &values,
const MPI_Comm &mpi_communicator,
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check Utilities::MPI::sum() for FullMatrix objects
+
+#include "../tests.h"
+#include <deal.II/lac/full_matrix.h>
+#include <deal.II/lac/vector.h>
+
+template <typename NumberType>
+void test(const unsigned int m = 13, const unsigned int n = 5)
+{
+ Assert( Utilities::MPI::job_supports_mpi(), ExcInternalError());
+
+ FullMatrix<NumberType> full_matrix(m,n);
+ {
+ unsigned int index = 0;
+ for (unsigned int i = 0; i < full_matrix.m(); ++i)
+ for (unsigned int j = 0; j < full_matrix.n(); ++j)
+ full_matrix(i,j) = index++;
+ }
+
+ FullMatrix<NumberType> full_matrix_original(m,n);
+ full_matrix_original = full_matrix;
+
+ // inplace
+ Utilities::MPI::sum(full_matrix, MPI_COMM_WORLD, full_matrix);
+
+ const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+ for (unsigned int i = 0; i < full_matrix.m(); ++i)
+ for (unsigned int j = 0; j < full_matrix.n(); ++j)
+ Assert (full_matrix(i,j) == full_matrix_original(i,j) * numprocs, ExcInternalError());
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ deallog << "Ok" << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ {
+ initlog();
+ deallog.push("float");
+ test<float>();
+ deallog.pop();
+ deallog.push("double");
+ test<double>();
+ deallog.pop();
+ }
+ else
+ {
+ test<float>();
+ test<double>();
+ }
+
+}