const unsigned int root_process)
{
#ifdef DEAL_II_WITH_MPI
- if (job_supports_mpi() && n_mpi_processes(comm) > 1)
+ if (n_mpi_processes(comm) > 1)
{
// 1) perform custom reduction
T result = vec;
const MPI_Comm comm,
const std::function<T(const T &, const T &)> &combiner)
{
- if (job_supports_mpi() && n_mpi_processes(comm) > 1)
+ if (n_mpi_processes(comm) > 1)
{
// 1) perform reduction
const auto result = Utilities::MPI::reduce<T>(vec, comm, combiner);
const MPI_Comm comm)
{
(void)comm;
- Assert((Utilities::MPI::job_supports_mpi() == false) ||
- (Utilities::MPI::n_mpi_processes(comm) == 1),
+ Assert(Utilities::MPI::n_mpi_processes(comm) == 1,
ExcMessage("You shouldn't use the 'Serial' class on "
"communicators that have more than one process "
"associated with it."));
}
// make sure all messages have been sent
- const int ierr = MPI_Waitall(send_requests.size(),
- send_requests.data(),
- MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
+ if (!send_requests.empty())
+ {
+ const int ierr = MPI_Waitall(send_requests.size(),
+ send_requests.data(),
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
#endif
}
buffer_eval[send_permutation_inv[i]] = recv_buffer_unpacked[c];
}
- const int ierr = MPI_Waitall(send_requests.size(),
- send_requests.data(),
- MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
+ if (!send_requests.empty())
+ {
+ const int ierr = MPI_Waitall(send_requests.size(),
+ send_requests.data(),
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
// evaluate function at points
evaluation_function(buffer_eval, cell_data);
const IndexSet &locally_relevant_dofs,
const MPI_Comm mpi_communicator)
{
- if (Utilities::MPI::job_supports_mpi() == false ||
- Utilities::MPI::n_mpi_processes(mpi_communicator) == 1)
+ if (Utilities::MPI::n_mpi_processes(mpi_communicator) == 1)
return; // nothing to do, since serial
Assert(sorted == false, ExcMatrixIsClosed());
{
Assert(this->n_blocks() > 0, ExcEmptyObject());
- // use int instead of bool. in order to make global reduction operations
- // work also when MPI_Init was not called, only call MPI_Allreduce
- // commands when there is more than one processor (note that reinit()
- // functions handle this case correctly through the job_supports_mpi()
- // query). this is the same in all the functions below
+ // use int instead of bool
int local_result = -1;
for (unsigned int i = 0; i < this->n_blocks(); ++i)
local_result =
#ifdef DEAL_II_WITH_MPI
# ifdef DEBUG
- if (Utilities::MPI::job_supports_mpi())
+ Assert(Utilities::MPI::job_supports_mpi() ||
+ (update_ghost_values_requests.empty() &&
+ compress_requests.empty()),
+ ExcInternalError());
+
+ // make sure that there are not outstanding requests from updating
+ // ghost values or compress
+ int flag = 1;
+ if (update_ghost_values_requests.size() > 0)
{
- // make sure that there are not outstanding requests from updating
- // ghost values or compress
- int flag = 1;
- if (update_ghost_values_requests.size() > 0)
- {
- const int ierr = MPI_Testall(update_ghost_values_requests.size(),
- update_ghost_values_requests.data(),
- &flag,
- MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- Assert(flag == 1,
- ExcMessage(
- "MPI found unfinished update_ghost_values() requests "
- "when calling swap, which is not allowed."));
- }
- if (compress_requests.size() > 0)
- {
- const int ierr = MPI_Testall(compress_requests.size(),
- compress_requests.data(),
- &flag,
- MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- Assert(flag == 1,
- ExcMessage("MPI found unfinished compress() requests "
- "when calling swap, which is not allowed."));
- }
+ const int ierr = MPI_Testall(update_ghost_values_requests.size(),
+ update_ghost_values_requests.data(),
+ &flag,
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ Assert(flag == 1,
+ ExcMessage(
+ "MPI found unfinished update_ghost_values() requests "
+ "when calling swap, which is not allowed."));
+ }
+ if (compress_requests.size() > 0)
+ {
+ const int ierr = MPI_Testall(compress_requests.size(),
+ compress_requests.data(),
+ &flag,
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ Assert(flag == 1,
+ ExcMessage("MPI found unfinished compress() requests "
+ "when calling swap, which is not allowed."));
}
# endif
task_info.allow_ghosted_vectors_in_loops =
additional_data.allow_ghosted_vectors_in_loops;
- // set variables that are independent of FE
- if (Utilities::MPI::job_supports_mpi() == true)
- {
- task_info.communicator = dof_handler[0]->get_communicator();
- task_info.my_pid =
- Utilities::MPI::this_mpi_process(task_info.communicator);
- task_info.n_procs =
- Utilities::MPI::n_mpi_processes(task_info.communicator);
-
- task_info.communicator_sm = additional_data.communicator_sm;
- }
- else
- {
- task_info.communicator = MPI_COMM_SELF;
- task_info.communicator_sm = MPI_COMM_SELF;
- task_info.my_pid = 0;
- task_info.n_procs = 1;
- }
+ task_info.communicator = dof_handler[0]->get_communicator();
+ task_info.communicator_sm = additional_data.communicator_sm;
+ task_info.my_pid =
+ Utilities::MPI::this_mpi_process(task_info.communicator);
+ task_info.n_procs =
+ Utilities::MPI::n_mpi_processes(task_info.communicator);
#ifdef DEBUG
for (const auto &constraint : constraints)
unsigned int
n_mpi_processes(const MPI_Comm mpi_communicator)
{
- int n_jobs = 1;
- const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
- AssertThrowMPI(ierr);
+ int n_jobs = 1;
+ if (job_supports_mpi())
+ {
+ const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
+ AssertThrowMPI(ierr);
+ }
return n_jobs;
}
unsigned int
this_mpi_process(const MPI_Comm mpi_communicator)
{
- int rank = 0;
- const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
- AssertThrowMPI(ierr);
+ int rank = 0;
+ if (job_supports_mpi())
+ {
+ const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
+ AssertThrowMPI(ierr);
+ }
return rank;
}
{
// If MPI was not started, we have a serial computation and cannot run
// the other MPI commands
- if (job_supports_mpi() == false ||
- Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
+ if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
{
for (unsigned int i = 0; i < my_values.size(); ++i)
{
#ifdef DEAL_II_WITH_MPI
- // TODO: For now, we implement this mutex with a blocking barrier
- // in the lock and unlock. It needs to be tested, if we can move
- // to a nonblocking barrier (code disabled below).
+ if (job_supports_mpi())
+ {
+ // TODO: For now, we implement this mutex with a blocking barrier in
+ // the lock and unlock. It needs to be tested, if we can move to a
+ // nonblocking barrier (code disabled below).
- const int ierr = MPI_Barrier(comm);
- AssertThrowMPI(ierr);
+ const int ierr = MPI_Barrier(comm);
+ AssertThrowMPI(ierr);
# if 0
- // wait for non-blocking barrier to finish. This is a noop the
- // first time we lock().
- const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
- AssertThrowMPI(ierr);
+ // wait for non-blocking barrier to finish. This is a noop the
+ // first time we lock().
+ const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
# else
- // nothing to do as blocking barrier already completed
+ // nothing to do as blocking barrier already completed
# endif
+ }
#endif
locked = true;
#ifdef DEAL_II_WITH_MPI
- // TODO: For now, we implement this mutex with a blocking barrier
- // in the lock and unlock. It needs to be tested, if we can move
- // to a nonblocking barrier (code disabled below):
+ if (job_supports_mpi())
+ {
+ // TODO: For now, we implement this mutex with a blocking barrier
+ // in the lock and unlock. It needs to be tested, if we can move
+ // to a nonblocking barrier (code disabled below):
# if 0
const int ierr = MPI_Ibarrier(comm, &request);
AssertThrowMPI(ierr);
# else
- const int ierr = MPI_Barrier(comm);
- AssertThrowMPI(ierr);
+ const int ierr = MPI_Barrier(comm);
+ AssertThrowMPI(ierr);
# endif
+ }
#endif
locked = false;
// 1) set up the partition
this->partition(owned_indices, comm);
-#ifdef DEAL_II_WITH_MPI
unsigned int my_rank = this_mpi_process(comm);
types::global_dof_index dic_local_received = 0;
Assert(next_index > index_range.first, ExcInternalError());
-# ifdef DEBUG
+#ifdef DEBUG
// make sure that the owner is the same on the current
// interval
for (types::global_dof_index i = index_range.first + 1;
i < next_index;
++i)
AssertDimension(owner, dof_to_dict_rank(i));
-# endif
+#endif
// add the interval, either to the local range or into a
// buffer to be sent to another processor
}
}
+#ifdef DEAL_II_WITH_MPI
n_dict_procs_in_owned_indices = buffers.size();
std::vector<MPI_Request> request;
}
#else
- (void)owned_indices;
+ Assert(buffers.size() == 0, ExcInternalError());
(void)comm;
+ (void)dic_local_received;
#endif
}
Dictionary::partition(const IndexSet &owned_indices,
const MPI_Comm comm)
{
-#ifdef DEAL_II_WITH_MPI
const unsigned int n_procs = n_mpi_processes(comm);
const unsigned int my_rank = this_mpi_process(comm);
local_range.second = get_index_offset(my_rank + 1);
locally_owned_size = local_range.second - local_range.first;
-#else
- (void)owned_indices;
- (void)comm;
-#endif
}
void
Partitioner::set_owned_indices(const IndexSet &locally_owned_indices)
{
- if (Utilities::MPI::job_supports_mpi() == true)
- {
- my_pid = Utilities::MPI::this_mpi_process(communicator);
- n_procs = Utilities::MPI::n_mpi_processes(communicator);
- }
- else
- {
- my_pid = 0;
- n_procs = 1;
- }
+ my_pid = Utilities::MPI::this_mpi_process(communicator);
+ n_procs = Utilities::MPI::n_mpi_processes(communicator);
// set the local range
Assert(locally_owned_indices.is_contiguous() == true,
else if (n_locally_owned_dofs_per_processor.empty() == false)
{
AssertDimension(n_locally_owned_dofs_per_processor.size(),
- (Utilities::MPI::job_supports_mpi() ?
- Utilities::MPI::n_mpi_processes(mpi_communicator) :
- 1));
+ Utilities::MPI::n_mpi_processes(mpi_communicator));
return n_locally_owned_dofs_per_processor;
}
else
else if (locally_owned_dofs_per_processor.empty() == false)
{
AssertDimension(locally_owned_dofs_per_processor.size(),
- (Utilities::MPI::job_supports_mpi() ?
- Utilities::MPI::n_mpi_processes(mpi_communicator) :
- 1));
+ Utilities::MPI::n_mpi_processes(mpi_communicator));
return locally_owned_dofs_per_processor;
}
else