--- /dev/null
+Added: New function
+Utilities::MPI::compute_n_point_to_point_communications()
+is implemented to be used for computing number of processes
+in an MPI universe to expect communication from.
+<br>
+(Eldar Khattatov, 2018/09/04)
const MPI_Comm & mpi_comm,
const std::vector<unsigned int> &destinations);
+ /**
+ * Simplified (for efficiency) version of the
+ * compute_point_to_point_communication_pattern()
+ * which only computes the number of processes in an MPI universe to expect
+ * communication from.
+ *
+ * @param mpi_comm A
+ * @ref GlossMPICommunicator "communicator"
+ * that describes the processors that are going to communicate with each
+ * other.
+ *
+ * @param destinations The list of processors the current process wants to
+ * send information to. This list need not be sorted in any way. If it
+ * contains duplicate entries that means that multiple messages are
+ * intended for a given destination.
+ *
+ * @return A number of processors that want to send something to the current
+ * processor.
+ */
+ unsigned int
+ compute_n_point_to_point_communications(
+ const MPI_Comm & mpi_comm,
+ const std::vector<unsigned int> &destinations);
+
/**
* Given a
* @ref GlossMPICommunicator "communicator",
Assert(destinations.size() == cells_to_send.size(), ExcInternalError());
- std::vector<unsigned int> senders =
- Utilities::MPI::compute_point_to_point_communication_pattern(
- communicator, destinations);
+ const unsigned int n_senders =
+ Utilities::MPI::compute_n_point_to_point_communications(communicator,
+ destinations);
// receive data
std::vector<char> receive;
CellData cell_data;
- for (unsigned int index = 0; index < senders.size(); ++index)
+ for (unsigned int index = 0; index < n_senders; ++index)
{
MPI_Status status;
int len;
}
+
+ unsigned int
+ compute_n_point_to_point_communications(
+ const MPI_Comm & mpi_comm,
+ const std::vector<unsigned int> &destinations)
+ {
+ const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
+ const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
+
+ for (unsigned int i = 0; i < destinations.size(); ++i)
+ {
+ Assert(destinations[i] < n_procs,
+ ExcIndexRange(destinations[i], 0, n_procs));
+ Assert(destinations[i] != myid,
+ ExcMessage(
+ "There is no point in communicating with ourselves."));
+ }
+
+ // Calculate the number of messages to send to each process
+ std::vector<unsigned int> dest_vector(n_procs);
+ for (const auto &el : destinations)
+ ++dest_vector[el];
+
+# if DEAL_II_MPI_VERSION_GTE(2, 2)
+ // Find out how many processes will send to this one
+ // MPI_Reduce_scatter(_block) does exactly this
+ unsigned int n_recv_from;
+ const int ierr = MPI_Reduce_scatter_block(
+ &dest_vector[0], &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
+
+ AssertThrowMPI(ierr);
+
+ return n_recv_from;
+# else
+ // Find out how many processes will send to this one
+ // by reducing with sum and then scattering the
+ // results over all processes
+ std::vector<unsigned int> buffer(dest_vector.size());
+ unsigned int n_recv_from;
+
+ MPI_Reduce(&dest_vector[0],
+ &buffer[0],
+ dest_vector.size(),
+ MPI_UNSIGNED,
+ MPI_SUM,
+ 0,
+ mpi_comm);
+ MPI_Scatter(&buffer[0],
+ 1,
+ MPI_UNSIGNED,
+ &n_recv_from,
+ 1,
+ MPI_UNSIGNED,
+ 0,
+ mpi_comm);
+
+ return n_recv_from;
+# endif
+ }
+
+
+
namespace
{
// custom MIP_Op for calculate_collective_mpi_min_max_avg
// collect the neighbors
// that are going to send stuff to us
- const std::vector<unsigned int> senders =
- Utilities::MPI::compute_point_to_point_communication_pattern(
+ const unsigned int n_senders =
+ Utilities::MPI::compute_n_point_to_point_communications(
this->get_communicator(), destinations);
// receive ghostcelldata
std::vector<char> receive;
CommunicateLocallyMovedVertices::CellInfo<dim, spacedim> cellinfo;
- for (unsigned int i = 0; i < senders.size(); ++i)
+ for (unsigned int i = 0; i < n_senders; ++i)
{
MPI_Status status;
int len;
// check all msgs got sent and received
Assert(Utilities::MPI::sum(needs_to_get_cells.size(),
this->get_communicator()) ==
- Utilities::MPI::sum(senders.size(), this->get_communicator()),
+ Utilities::MPI::sum(n_senders, this->get_communicator()),
ExcInternalError());
}
send_to.push_back(it->first);
num_receive =
- Utilities::MPI::compute_point_to_point_communication_pattern(mpi_comm,
- send_to)
- .size();
+ Utilities::MPI::compute_n_point_to_point_communications(mpi_comm,
+ send_to);
}
std::vector<MPI_Request> requests(send_data.size());
send_to.push_back(it->first);
num_receive =
- Utilities::MPI::compute_point_to_point_communication_pattern(mpi_comm,
- send_to)
- .size();
+ Utilities::MPI::compute_n_point_to_point_communications(mpi_comm,
+ send_to);
}
std::vector<MPI_Request> requests(send_data.size());
// Check if point-to-point communication
// works correctly with MPI_Reduce_scatter_block
-// implementation
+// implementation.
+// Also check that the simplified point-to-point communication
+// computation agrees with the original function.
+// Namely, the size of
+// compute_point_to_point_communication_pattern()
+// returned vector should agree with the number returned
+// by compute_point_to_point_communications()
#include <deal.II/base/utilities.h>
std::vector<unsigned int> origins =
Utilities::MPI::compute_point_to_point_communication_pattern(MPI_COMM_WORLD,
destinations);
+
+ const unsigned int n_origins =
+ Utilities::MPI::compute_n_point_to_point_communications(MPI_COMM_WORLD,
+ destinations);
+
+ if (origins.size() != n_origins)
+ deallog << "Size mismatch!" << std::endl;
+
std::sort(origins.begin(), origins.end());
if (myid == 0)