const unsigned int n_point_point_communications =
Utilities::MPI::compute_n_point_to_point_communications(comm, send_to);
+ // Protect the following communication:
+ static CollectiveMutex mutex;
+ CollectiveMutex::ScopedLock lock(mutex, comm);
+
// If we have something to send, or we expect something from other
// processors, we need to visit one of the two scopes below. Otherwise,
// no other action is required by this mpi process, and we can safely
if (send_to.size() == 0 && n_point_point_communications == 0)
return received_objects;
- // Protect the following communication:
- static CollectiveMutex mutex;
- CollectiveMutex::ScopedLock lock(mutex, comm);
-
const int mpi_tag =
internal::Tags::compute_point_to_point_communication_pattern;
deallog.push(Utilities::int_to_string(myid));
deallog << "**** proc " << myid << ": \n\n";
- deallog << "Sparsity pattern:\n";
+ deallog << "Sparsity pattern:" << std::endl;
sp.print_gnuplot(deallog.get_file_stream());
MPI_Barrier(MPI_COMM_WORLD);
+DEAL:0:0:0::**** proc 0:
+
+Sparsity pattern:
0 0
1 0
2 0
90 -91
91 -91
+DEAL:1:1:1::**** proc 1:
+
+Sparsity pattern:
1 -1
3 -1
5 -1
119 -119
+DEAL:2:2:2::**** proc 2:
+
+Sparsity pattern:
2 -2
3 -2
7 -2
133 -133
+DEAL:3:3:3::**** proc 3:
+
+Sparsity pattern:
3 -3
50 -3
53 -3
146 -146
+DEAL:4:4:4::**** proc 4:
+
+Sparsity pattern: