From 838d6b32c4d37ee2afefe9506a34de99c2c25535 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Wed, 28 Jun 2023 19:46:18 +0200 Subject: [PATCH] Move job_supports_mpi() to a more central place --- include/deal.II/base/mpi.templates.h | 4 +- .../deal.II/base/mpi_consensus_algorithms.h | 3 +- .../base/mpi_remote_point_evaluation.h | 22 ++++--- .../lac/affine_constraints.templates.h | 3 +- .../lac/la_parallel_block_vector.templates.h | 6 +- .../lac/la_parallel_vector.templates.h | 56 +++++++++--------- .../matrix_free/matrix_free.templates.h | 24 ++------ source/base/mpi.cc | 57 +++++++++++-------- .../base/mpi_compute_index_owner_internal.cc | 14 ++--- source/base/partitioner.cc | 12 +--- source/dofs/number_cache.cc | 8 +-- 11 files changed, 97 insertions(+), 112 deletions(-) diff --git a/include/deal.II/base/mpi.templates.h b/include/deal.II/base/mpi.templates.h index 3be70a21fd..64a93dd632 100644 --- a/include/deal.II/base/mpi.templates.h +++ b/include/deal.II/base/mpi.templates.h @@ -406,7 +406,7 @@ namespace Utilities const unsigned int root_process) { #ifdef DEAL_II_WITH_MPI - if (job_supports_mpi() && n_mpi_processes(comm) > 1) + if (n_mpi_processes(comm) > 1) { // 1) perform custom reduction T result = vec; @@ -488,7 +488,7 @@ namespace Utilities const MPI_Comm comm, const std::function &combiner) { - if (job_supports_mpi() && n_mpi_processes(comm) > 1) + if (n_mpi_processes(comm) > 1) { // 1) perform reduction const auto result = Utilities::MPI::reduce(vec, comm, combiner); diff --git a/include/deal.II/base/mpi_consensus_algorithms.h b/include/deal.II/base/mpi_consensus_algorithms.h index bde1118f1c..8f24df3370 100644 --- a/include/deal.II/base/mpi_consensus_algorithms.h +++ b/include/deal.II/base/mpi_consensus_algorithms.h @@ -2405,8 +2405,7 @@ namespace Utilities const MPI_Comm comm) { (void)comm; - Assert((Utilities::MPI::job_supports_mpi() == false) || - (Utilities::MPI::n_mpi_processes(comm) == 1), + Assert(Utilities::MPI::n_mpi_processes(comm) == 1, ExcMessage("You shouldn't use the 'Serial' class on " "communicators that have more than one process " "associated with it.")); diff --git a/include/deal.II/base/mpi_remote_point_evaluation.h b/include/deal.II/base/mpi_remote_point_evaluation.h index b412790630..7b6c53def0 100644 --- a/include/deal.II/base/mpi_remote_point_evaluation.h +++ b/include/deal.II/base/mpi_remote_point_evaluation.h @@ -476,10 +476,13 @@ namespace Utilities } // make sure all messages have been sent - const int ierr = MPI_Waitall(send_requests.size(), - send_requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); + if (!send_requests.empty()) + { + const int ierr = MPI_Waitall(send_requests.size(), + send_requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } #endif } @@ -644,10 +647,13 @@ namespace Utilities buffer_eval[send_permutation_inv[i]] = recv_buffer_unpacked[c]; } - const int ierr = MPI_Waitall(send_requests.size(), - send_requests.data(), - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); + if (!send_requests.empty()) + { + const int ierr = MPI_Waitall(send_requests.size(), + send_requests.data(), + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } // evaluate function at points evaluation_function(buffer_eval, cell_data); diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index 8df61f8e87..d74f42a881 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -490,8 +490,7 @@ AffineConstraints::make_consistent_in_parallel( const IndexSet &locally_relevant_dofs, const MPI_Comm mpi_communicator) { - if (Utilities::MPI::job_supports_mpi() == false || - Utilities::MPI::n_mpi_processes(mpi_communicator) == 1) + if (Utilities::MPI::n_mpi_processes(mpi_communicator) == 1) return; // nothing to do, since serial Assert(sorted == false, ExcMatrixIsClosed()); diff --git a/include/deal.II/lac/la_parallel_block_vector.templates.h b/include/deal.II/lac/la_parallel_block_vector.templates.h index e7476f0396..fd417fcd05 100644 --- a/include/deal.II/lac/la_parallel_block_vector.templates.h +++ b/include/deal.II/lac/la_parallel_block_vector.templates.h @@ -725,11 +725,7 @@ namespace LinearAlgebra { Assert(this->n_blocks() > 0, ExcEmptyObject()); - // use int instead of bool. in order to make global reduction operations - // work also when MPI_Init was not called, only call MPI_Allreduce - // commands when there is more than one processor (note that reinit() - // functions handle this case correctly through the job_supports_mpi() - // query). this is the same in all the functions below + // use int instead of bool int local_result = -1; for (unsigned int i = 0; i < this->n_blocks(); ++i) local_result = diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 999f6204ad..13e3c5a11b 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -1341,34 +1341,36 @@ namespace LinearAlgebra #ifdef DEAL_II_WITH_MPI # ifdef DEBUG - if (Utilities::MPI::job_supports_mpi()) + Assert(Utilities::MPI::job_supports_mpi() || + (update_ghost_values_requests.empty() && + compress_requests.empty()), + ExcInternalError()); + + // make sure that there are not outstanding requests from updating + // ghost values or compress + int flag = 1; + if (update_ghost_values_requests.size() > 0) { - // make sure that there are not outstanding requests from updating - // ghost values or compress - int flag = 1; - if (update_ghost_values_requests.size() > 0) - { - const int ierr = MPI_Testall(update_ghost_values_requests.size(), - update_ghost_values_requests.data(), - &flag, - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - Assert(flag == 1, - ExcMessage( - "MPI found unfinished update_ghost_values() requests " - "when calling swap, which is not allowed.")); - } - if (compress_requests.size() > 0) - { - const int ierr = MPI_Testall(compress_requests.size(), - compress_requests.data(), - &flag, - MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - Assert(flag == 1, - ExcMessage("MPI found unfinished compress() requests " - "when calling swap, which is not allowed.")); - } + const int ierr = MPI_Testall(update_ghost_values_requests.size(), + update_ghost_values_requests.data(), + &flag, + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + Assert(flag == 1, + ExcMessage( + "MPI found unfinished update_ghost_values() requests " + "when calling swap, which is not allowed.")); + } + if (compress_requests.size() > 0) + { + const int ierr = MPI_Testall(compress_requests.size(), + compress_requests.data(), + &flag, + MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + Assert(flag == 1, + ExcMessage("MPI found unfinished compress() requests " + "when calling swap, which is not allowed.")); } # endif diff --git a/include/deal.II/matrix_free/matrix_free.templates.h b/include/deal.II/matrix_free/matrix_free.templates.h index 1323f58aa0..0d6e09c878 100644 --- a/include/deal.II/matrix_free/matrix_free.templates.h +++ b/include/deal.II/matrix_free/matrix_free.templates.h @@ -441,24 +441,12 @@ MatrixFree::internal_reinit( task_info.allow_ghosted_vectors_in_loops = additional_data.allow_ghosted_vectors_in_loops; - // set variables that are independent of FE - if (Utilities::MPI::job_supports_mpi() == true) - { - task_info.communicator = dof_handler[0]->get_communicator(); - task_info.my_pid = - Utilities::MPI::this_mpi_process(task_info.communicator); - task_info.n_procs = - Utilities::MPI::n_mpi_processes(task_info.communicator); - - task_info.communicator_sm = additional_data.communicator_sm; - } - else - { - task_info.communicator = MPI_COMM_SELF; - task_info.communicator_sm = MPI_COMM_SELF; - task_info.my_pid = 0; - task_info.n_procs = 1; - } + task_info.communicator = dof_handler[0]->get_communicator(); + task_info.communicator_sm = additional_data.communicator_sm; + task_info.my_pid = + Utilities::MPI::this_mpi_process(task_info.communicator); + task_info.n_procs = + Utilities::MPI::n_mpi_processes(task_info.communicator); #ifdef DEBUG for (const auto &constraint : constraints) diff --git a/source/base/mpi.cc b/source/base/mpi.cc index dab88e2b97..76558a6064 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -149,9 +149,12 @@ namespace Utilities unsigned int n_mpi_processes(const MPI_Comm mpi_communicator) { - int n_jobs = 1; - const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs); - AssertThrowMPI(ierr); + int n_jobs = 1; + if (job_supports_mpi()) + { + const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs); + AssertThrowMPI(ierr); + } return n_jobs; } @@ -160,9 +163,12 @@ namespace Utilities unsigned int this_mpi_process(const MPI_Comm mpi_communicator) { - int rank = 0; - const int ierr = MPI_Comm_rank(mpi_communicator, &rank); - AssertThrowMPI(ierr); + int rank = 0; + if (job_supports_mpi()) + { + const int ierr = MPI_Comm_rank(mpi_communicator, &rank); + AssertThrowMPI(ierr); + } return rank; } @@ -541,8 +547,7 @@ namespace Utilities { // If MPI was not started, we have a serial computation and cannot run // the other MPI commands - if (job_supports_mpi() == false || - Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1) + if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1) { for (unsigned int i = 0; i < my_values.size(); ++i) { @@ -1184,21 +1189,24 @@ namespace Utilities #ifdef DEAL_II_WITH_MPI - // TODO: For now, we implement this mutex with a blocking barrier - // in the lock and unlock. It needs to be tested, if we can move - // to a nonblocking barrier (code disabled below). + if (job_supports_mpi()) + { + // TODO: For now, we implement this mutex with a blocking barrier in + // the lock and unlock. It needs to be tested, if we can move to a + // nonblocking barrier (code disabled below). - const int ierr = MPI_Barrier(comm); - AssertThrowMPI(ierr); + const int ierr = MPI_Barrier(comm); + AssertThrowMPI(ierr); # if 0 - // wait for non-blocking barrier to finish. This is a noop the - // first time we lock(). - const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); + // wait for non-blocking barrier to finish. This is a noop the + // first time we lock(). + const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); # else - // nothing to do as blocking barrier already completed + // nothing to do as blocking barrier already completed # endif + } #endif locked = true; @@ -1222,16 +1230,19 @@ namespace Utilities #ifdef DEAL_II_WITH_MPI - // TODO: For now, we implement this mutex with a blocking barrier - // in the lock and unlock. It needs to be tested, if we can move - // to a nonblocking barrier (code disabled below): + if (job_supports_mpi()) + { + // TODO: For now, we implement this mutex with a blocking barrier + // in the lock and unlock. It needs to be tested, if we can move + // to a nonblocking barrier (code disabled below): # if 0 const int ierr = MPI_Ibarrier(comm, &request); AssertThrowMPI(ierr); # else - const int ierr = MPI_Barrier(comm); - AssertThrowMPI(ierr); + const int ierr = MPI_Barrier(comm); + AssertThrowMPI(ierr); # endif + } #endif locked = false; diff --git a/source/base/mpi_compute_index_owner_internal.cc b/source/base/mpi_compute_index_owner_internal.cc index f3a333089c..778e92371b 100644 --- a/source/base/mpi_compute_index_owner_internal.cc +++ b/source/base/mpi_compute_index_owner_internal.cc @@ -170,7 +170,6 @@ namespace Utilities // 1) set up the partition this->partition(owned_indices, comm); -#ifdef DEAL_II_WITH_MPI unsigned int my_rank = this_mpi_process(comm); types::global_dof_index dic_local_received = 0; @@ -217,14 +216,14 @@ namespace Utilities Assert(next_index > index_range.first, ExcInternalError()); -# ifdef DEBUG +#ifdef DEBUG // make sure that the owner is the same on the current // interval for (types::global_dof_index i = index_range.first + 1; i < next_index; ++i) AssertDimension(owner, dof_to_dict_rank(i)); -# endif +#endif // add the interval, either to the local range or into a // buffer to be sent to another processor @@ -245,6 +244,7 @@ namespace Utilities } } +#ifdef DEAL_II_WITH_MPI n_dict_procs_in_owned_indices = buffers.size(); std::vector request; @@ -421,8 +421,9 @@ namespace Utilities } #else - (void)owned_indices; + Assert(buffers.size() == 0, ExcInternalError()); (void)comm; + (void)dic_local_received; #endif } @@ -432,7 +433,6 @@ namespace Utilities Dictionary::partition(const IndexSet &owned_indices, const MPI_Comm comm) { -#ifdef DEAL_II_WITH_MPI const unsigned int n_procs = n_mpi_processes(comm); const unsigned int my_rank = this_mpi_process(comm); @@ -452,10 +452,6 @@ namespace Utilities local_range.second = get_index_offset(my_rank + 1); locally_owned_size = local_range.second - local_range.first; -#else - (void)owned_indices; - (void)comm; -#endif } diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index 4c0177ee40..6c23bba98a 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -150,16 +150,8 @@ namespace Utilities void Partitioner::set_owned_indices(const IndexSet &locally_owned_indices) { - if (Utilities::MPI::job_supports_mpi() == true) - { - my_pid = Utilities::MPI::this_mpi_process(communicator); - n_procs = Utilities::MPI::n_mpi_processes(communicator); - } - else - { - my_pid = 0; - n_procs = 1; - } + my_pid = Utilities::MPI::this_mpi_process(communicator); + n_procs = Utilities::MPI::n_mpi_processes(communicator); // set the local range Assert(locally_owned_indices.is_contiguous() == true, diff --git a/source/dofs/number_cache.cc b/source/dofs/number_cache.cc index b5085c4a0b..e7b4e233ca 100644 --- a/source/dofs/number_cache.cc +++ b/source/dofs/number_cache.cc @@ -91,9 +91,7 @@ namespace internal else if (n_locally_owned_dofs_per_processor.empty() == false) { AssertDimension(n_locally_owned_dofs_per_processor.size(), - (Utilities::MPI::job_supports_mpi() ? - Utilities::MPI::n_mpi_processes(mpi_communicator) : - 1)); + Utilities::MPI::n_mpi_processes(mpi_communicator)); return n_locally_owned_dofs_per_processor; } else @@ -115,9 +113,7 @@ namespace internal else if (locally_owned_dofs_per_processor.empty() == false) { AssertDimension(locally_owned_dofs_per_processor.size(), - (Utilities::MPI::job_supports_mpi() ? - Utilities::MPI::n_mpi_processes(mpi_communicator) : - 1)); + Utilities::MPI::n_mpi_processes(mpi_communicator)); return locally_owned_dofs_per_processor; } else -- 2.39.5