--- /dev/null
+New: Utilities::MPI::all_gather and Utilities::MPI::some_to_some functions have been
+added to perform general collective communications between processors.
+<br>
+(Giovanni Alzetta, Luca Heltai, 2017/11/18)
#include <deal.II/base/array_view.h>
#include <vector>
+#include <map>
#if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
// without MPI, we would still like to use
min_max_avg (const double my_value,
const MPI_Comm &mpi_communicator);
-
-
/**
* A class that is used to initialize the MPI system at the beginning of a
* program and to shut it down again at the end. It also allows you to
*/
bool job_supports_mpi ();
+ /**
+ * Initiate a some-to-some communication, and exchange arbitrary objects
+ * (the class T should be serializable using boost::serialize) between
+ * processors.
+ *
+ * @param[in] comm MPI communicator.
+ *
+ * @param[in] objects_to_send A map from the rank (unsigned int) of the
+ * process meant to receive the data and the object to send (the type T
+ * must be serializable for this function to work properly).
+ *
+ * @param[out] A map from the rank (unsigned int) of the process
+ * which sent the data and object received.
+ *
+ * @author Giovanni Alzetta, Luca Heltai, 2017
+ */
+ template<typename T>
+ std::map<unsigned int, T>
+ some_to_some(const MPI_Comm &comm,
+ const std::map <unsigned int, T> &objects_to_send);
+
+ /**
+ * A generalization of the classic MPI_Allgather function, that accepts
+ * arbitrary data types T, as long as boost::serialize accepts T as an
+ * argument.
+ *
+ * @param[in] comm MPI communicator.
+ * @param[in] objects_to_send A map of `rank:object`, where the key `rank`
+ * is the rank of the processor we want to send to, and `object` is the
+ * object to send
+ *
+ * @param[out] A vector of objects, with size equal to the number of
+ * processes in the MPI communicator. Each entry contains the object
+ * received from the processor with the corresponding rank within the
+ * communicator.
+ *
+ * @author Giovanni Alzetta, Luca Heltai, 2017
+ */
+ template<typename T>
+ std::vector<T>
+ all_gather(const MPI_Comm &comm,
+ const T &object_to_send);
+
#ifndef DOXYGEN
// declaration for an internal function that lives in mpi.templates.h
namespace internal
+ template<typename T>
+ std::map<unsigned int, T>
+ some_to_some(const MPI_Comm &comm,
+ const std::map<unsigned int, T> &objects_to_send)
+ {
+#ifndef DEAL_II_WITH_MPI
+ (void)comm;
+ (void)objects_to_send;
+ Assert (false, ExcMessage ("The function some_to_some doesn't make"
+ "any sense if you do not have MPI enabled. "));
+#else
+ auto n_procs = dealii::Utilities::MPI::n_mpi_processes(comm);
+ auto my_proc = dealii::Utilities::MPI::this_mpi_process(comm);
+
+ std::vector<unsigned int> send_to(objects_to_send.size());
+ {
+ unsigned int i=0;
+ for (const auto &m: objects_to_send)
+ send_to[i++] = m.first;
+ }
+ AssertDimension(send_to.size(), objects_to_send.size());
+
+ const auto receive_from =
+ Utilities::MPI::compute_point_to_point_communication_pattern(comm, send_to);
+
+ // Sending buffers
+ std::vector<std::vector<char> > buffers_to_send(send_to.size());
+ std::vector<MPI_Request> buffer_send_requests(send_to.size());
+ {
+ unsigned int i = 0;
+ for (const auto &rank_obj : objects_to_send)
+ {
+ const auto &rank = rank_obj.first;
+ buffers_to_send[i] = Utilities::pack(rank_obj.second);
+ const int ierr = MPI_Isend(buffers_to_send[i].data(),
+ buffers_to_send[i].size(), MPI_CHAR,
+ rank, 21, comm, &buffer_send_requests[i]);
+ AssertThrowMPI(ierr);
+ ++i;
+ }
+ }
+
+ // Receiving buffers
+ std::map<unsigned int, T> received_objects;
+ {
+ unsigned int i = 0;
+ std::vector<char> buffer;
+
+ // We do this on a first come/first served basis
+ while (i<receive_from.size())
+ {
+ // Probe what's going on. Take data from the first available sender
+ MPI_Status status;
+ int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status);
+ AssertThrowMPI(ierr);
+
+ // Length of the message
+ int len;
+ ierr = MPI_Get_count(&status, MPI_CHAR, &len);
+ AssertThrowMPI(ierr);
+ buffer.resize(len);
+
+ // Source rank
+ const unsigned int rank = status.MPI_SOURCE;
+
+ // Actually receive the message
+ ierr = MPI_Recv(buffer.data(), len, MPI_CHAR,
+ rank, 21, comm, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ received_objects[rank] = Utilities::unpack<T>(buffer);
+ ++i;
+ }
+ }
+
+ // Wait to have sent all objects.
+ MPI_Waitall(send_to.size(), buffer_send_requests.data(),MPI_STATUSES_IGNORE);
+
+ return received_objects;
+#endif // deal.II with MPI
+ }
+
+
+
+ template<typename T>
+ std::vector<T> all_gather(const MPI_Comm &comm,
+ const T &object)
+ {
+#ifndef DEAL_II_WITH_MPI
+ (void)comm;
+ (void)objects_to_send;
+ Assert (false, ExcMessage ("The function all_gather doesn't make"
+ "any sense if you do not have MPI enabled. "));
+#else
+ auto n_procs = dealii::Utilities::MPI::n_mpi_processes(comm);
+
+ std::vector<char> buffer = Utilities::pack(object);
+
+ int n_local_data = buffer.size();
+
+ // Vector to store the size of loc_data_array for every process
+ std::vector<int> size_all_data(n_procs,0);
+
+ // Exchanging the size of each buffer
+ MPI_Allgather(&n_local_data, 1, MPI_INT,
+ &(size_all_data[0]), 1, MPI_INT,
+ comm);
+
+ // Now computing the the displacement, relative to recvbuf,
+ // at which to store the incoming buffer
+ std::vector<int> rdispls(n_procs);
+ rdispls[0] = 0;
+ for (unsigned int i=1; i < n_procs; ++i)
+ rdispls[i] = rdispls[i-1] + size_all_data[i-1];
+
+ // Step 3: exchange the buffer:
+ std::vector<char> received_unrolled_buffer(rdispls.back() + size_all_data.back());
+
+ MPI_Allgatherv(buffer.data(), n_local_data, MPI_CHAR,
+ received_unrolled_buffer.data(), size_all_data.data(),
+ rdispls.data(), MPI_CHAR, comm);
+
+ std::vector<T> received_objects(n_procs);
+ for (unsigned int i= 0; i < n_procs; ++i)
+ {
+ std::vector<char> local_buffer(received_unrolled_buffer.begin()+rdispls[i],
+ received_unrolled_buffer.begin()+rdispls[i]+size_all_data[i]);
+ received_objects[i] = Utilities::unpack<T>(local_buffer);
+ }
+
+ return received_objects;
+#endif
+ }
+
+
+
template <typename T>
T sum (const T &t,
const MPI_Comm &mpi_communicator)
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// Test for the internal::send_and_receive function in the case of
+// collective communication
+
+#include "../tests.h"
+#include "../../include/deal.II/base/mpi.templates.h"
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/point.h>
+
+#include <mpi.h>
+#include <vector>
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ initlog();
+
+ auto n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ auto my_proc = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ // Creating the local array of points
+ std::vector<Point<3> > local_points(my_proc + 1);
+ for (unsigned int i=0; i<my_proc+1; ++i)
+ local_points[i] = Point<3>(my_proc, -my_proc, i);
+
+ auto gathered_points = Utilities::MPI::all_gather(MPI_COMM_WORLD, local_points);
+ bool test_passed = true;
+ for (unsigned int i=0; i< n_procs; ++i)
+ {
+ if (gathered_points[i].size() != i+1)
+ {
+ test_passed = false;
+ deallog << "Error: Points received from rank " << i << " have wrong size. " << std::endl;
+ }
+ for (unsigned int p=0; p< gathered_points[i].size(); ++p)
+ if ( gathered_points[i][p][0] != (double) i ||
+ gathered_points[i][p][1] != (double) -i ||
+ gathered_points[i][p][2] != (double) p)
+ {
+ test_passed = false;
+ deallog << "Error with point " << p << " from rank " << i << std::endl;
+ }
+ }
+
+ if (test_passed)
+ deallog << "Test: ok" << std::endl;
+ else
+ deallog << "Test: FAILED" << std::endl;
+}
--- /dev/null
+
+DEAL::Test: ok
--- /dev/null
+
+DEAL::Test: ok
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// Test for the internal::send_and_receive function in the case of
+// collective communication
+
+#include "../tests.h"
+#include "../../include/deal.II/base/mpi.templates.h"
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/point.h>
+
+#include <mpi.h>
+#include <vector>
+
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ initlog();
+
+ auto n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ auto my_proc = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ // Creating the map array of points to be sent
+ std::map<unsigned int, std::vector< Point<2> > > m;
+ for (unsigned int i=0; i<n_procs; ++i)
+ if (i != my_proc)
+ m[i].push_back(Point<2>(my_proc, -2.0*my_proc));
+
+ auto received_pts = Utilities::MPI::some_to_some(MPI_COMM_WORLD, m);
+
+ bool test_passed = true;
+ for (auto const &pt: received_pts)
+ if ( std::abs(pt.first - pt.second[0][0]) > 1e-12 ||
+ std::abs(2.0*pt.first + pt.second[0][1]) > 1e-12 )
+ {
+ test_passed = false;
+ deallog << "Error with point " << pt.second[0] << " received from rank " << pt.first << std::endl;
+ }
+ if (test_passed)
+ deallog << "Test: ok" << std::endl;
+ else
+ deallog << "Test: FAILED" << std::endl;
+}
--- /dev/null
+
+DEAL::Test: ok
--- /dev/null
+
+DEAL::Test: ok