--- /dev/null
+New: The new function Utilities::MPI::reduce() allows to reduce
+arbitrary types with a user-specified binary operation.
+<br>
+(Peter Munch, 2021/05/27)
const T & object_to_send,
const unsigned int root_process = 0);
+ /**
+ * A function that combines values @p local_value from all processes
+ * via a user-specified binary operation @p combiner on the @p root_process.
+ * As such this function is similar to MPI_Reduce (and
+ * Utilities::MPI::min/max()): however on the one hand due to the
+ * user-specified binary operation it is slower for built-in types but
+ * on the other hand general object types, including ones that store
+ * variable amounts of data, can be handled.
+ *
+ * In contrast to all_reduce, the result will be only available on a
+ * single rank. On all other processes, the returned value is undefined.
+ */
+ template <typename T>
+ T
+ reduce(const T & local_value,
+ const MPI_Comm & comm,
+ const std::function<T(const T &, const T &)> &combiner,
+ const unsigned int root_process = 0);
+
/**
* A function that combines values @p local_value from all processes
* via a user-specified binary operation @p combiner and distributes the
template <typename T>
T
- all_reduce(const T & vec,
- const MPI_Comm & comm,
- const std::function<T(const T &, const T &)> &combiner)
+ reduce(const T & vec,
+ const MPI_Comm & comm,
+ const std::function<T(const T &, const T &)> &combiner,
+ const unsigned int root_process)
{
#ifdef DEAL_II_WITH_MPI
- if (job_supports_mpi())
+ if (job_supports_mpi() && n_mpi_processes(comm) > 1)
{
// 1) perform custom reduction
T result = vec;
for (unsigned int stride = 1; stride < nproc; stride *= 2)
{
- const unsigned int rank_recv =
- (2 * stride) * (rank / (2 * stride));
- const unsigned int rank_send = rank_recv + stride;
+ unsigned int rank_recv =
+ (2 * stride) *
+ ((rank + nproc - root_process) % nproc / (2 * stride)) +
+ root_process;
+ unsigned int rank_send = rank_recv + stride;
- if (rank_send >= nproc) // nothing to do
+ if (rank_send >= nproc + root_process) // nothing to do
continue;
+ rank_recv = rank_recv % nproc;
+ rank_send = rank_send % nproc;
+
if (rank_recv == rank) // process receives data
{
MPI_Status status;
}
}
- // 2) broadcast result
- return Utilities::MPI::broadcast(comm, result);
+ if (rank == root_process)
+ return result;
+ else
+ return {};
}
#endif
(void)comm;
(void)combiner;
+ (void)root_process;
return vec;
}
+
+ template <typename T>
+ T
+ all_reduce(const T & vec,
+ const MPI_Comm & comm,
+ const std::function<T(const T &, const T &)> &combiner)
+ {
+ if (job_supports_mpi() && n_mpi_processes(comm) > 1)
+ {
+ // 1) perform reduction
+ const auto result = reduce(vec, comm, combiner);
+
+ // 2) broadcast result
+ return Utilities::MPI::broadcast(comm, result);
+ }
+ else
+ return vec;
+ }
+
+
template <typename T>
std::vector<T>
compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm)
const MPI_Comm &,
const ArrayView<S> &);
+ template S reduce(const S & vec,
+ const MPI_Comm & comm,
+ const std::function<S(const S &, const S &)> &process,
+ const unsigned int root_process);
+
+ template std::vector<S> reduce(
+ const std::vector<S> & vec,
+ const MPI_Comm & comm,
+ const std::function<std::vector<S>(const std::vector<S> &,
+ const std::vector<S> &)> &process,
+ const unsigned int root_process);
+
template S all_reduce(
const S & vec,
const MPI_Comm & comm,
for (const auto r : result)
deallog << r << " ";
deallog << std::endl;
+
+ for (unsigned int rank = 0;
+ rank < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ ++rank)
+ {
+ const auto result = Utilities::MPI::reduce(
+ std::vector<T>{Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)},
+ MPI_COMM_WORLD,
+ fu,
+ rank);
+
+ for (const auto r : result)
+ deallog << r << " ";
+ deallog << std::endl;
+ }
}
DEAL:0::0
DEAL:0::0
DEAL:0::0
+DEAL:0::0
+DEAL:0::0
+DEAL:0::0
+DEAL:0::0
DEAL:0::0
+DEAL:0::0
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::4
DEAL:0::4
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::
DEAL:0::10
+DEAL:0::10
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::0 1 2 3 4
DEAL:0::0 1 2 3 4
+DEAL:0::
+DEAL:0::
+DEAL:0::
+DEAL:0::
DEAL:1::0
+DEAL:1::
+DEAL:1::0
+DEAL:1::
+DEAL:1::
+DEAL:1::
+DEAL:1::4
+DEAL:1::
DEAL:1::4
+DEAL:1::
+DEAL:1::
+DEAL:1::
DEAL:1::10
+DEAL:1::
+DEAL:1::10
+DEAL:1::
+DEAL:1::
+DEAL:1::
DEAL:1::0 1 2 3 4
+DEAL:1::
+DEAL:1::1 2 3 4 0
+DEAL:1::
+DEAL:1::
+DEAL:1::
DEAL:2::0
+DEAL:2::
+DEAL:2::
+DEAL:2::0
+DEAL:2::
+DEAL:2::
DEAL:2::4
+DEAL:2::
+DEAL:2::
+DEAL:2::4
+DEAL:2::
+DEAL:2::
+DEAL:2::10
+DEAL:2::
+DEAL:2::
DEAL:2::10
+DEAL:2::
+DEAL:2::
DEAL:2::0 1 2 3 4
+DEAL:2::
+DEAL:2::
+DEAL:2::2 3 4 0 1
+DEAL:2::
+DEAL:2::
DEAL:3::0
+DEAL:3::
+DEAL:3::
+DEAL:3::
+DEAL:3::0
+DEAL:3::
+DEAL:3::4
+DEAL:3::
+DEAL:3::
+DEAL:3::
DEAL:3::4
+DEAL:3::
DEAL:3::10
+DEAL:3::
+DEAL:3::
+DEAL:3::
+DEAL:3::10
+DEAL:3::
DEAL:3::0 1 2 3 4
+DEAL:3::
+DEAL:3::
+DEAL:3::
+DEAL:3::3 4 0 1 2
+DEAL:3::
+DEAL:4::0
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::
DEAL:4::0
DEAL:4::4
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::4
+DEAL:4::10
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::
DEAL:4::10
DEAL:4::0 1 2 3 4
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::
+DEAL:4::4 0 1 2 3