namespace internal
{
- template <typename T>
- std::tuple<T, T>
- compute_prefix_sum(const T &value, const MPI_Comm comm)
- {
-# ifndef DEAL_II_WITH_MPI
- (void)comm;
- return {0, value};
-# else
- if (Utilities::MPI::n_mpi_processes(comm) == 1)
- return {0, value};
- else
- {
- T prefix = {};
-
- // First obtain every process's prefix sum:
- int ierr =
- MPI_Exscan(&value,
- &prefix,
- 1,
- Utilities::MPI::mpi_type_id_for_type<decltype(value)>,
- MPI_SUM,
- comm);
- AssertThrowMPI(ierr);
-
- // Then we also need the total sum. We could obtain it by
- // calling Utilities::MPI::sum(), but it is cheaper if we
- // broadcast it from the last process, which can compute it
- // from its own prefix sum plus its own value.
- T sum = Utilities::MPI::broadcast(
- comm, prefix + value, Utilities::MPI::n_mpi_processes(comm) - 1);
-
- return {prefix, sum};
- }
-# endif
- }
-
template <typename T>
using get_mpi_communicator_t =
decltype(std::declval<const T>().get_mpi_communicator());
{
std::vector<unsigned int> dummy(locally_active_dofs.n_elements());
- const auto local_size = get_local_size(system_matrix);
- const auto [prefix_sum, total_sum] = compute_prefix_sum(local_size, comm);
+ const auto local_size = get_local_size(system_matrix);
+ const auto [prefix_sum, total_sum] =
+ Utilities::MPI::partial_and_total_sum(local_size, comm);
IndexSet locally_owned_dofs(total_sum);
locally_owned_dofs.add_range(prefix_sum, prefix_sum + local_size);
{
// 0) determine which rows are locally owned and which ones are remote
const auto local_size = internal::get_local_size(system_matrix);
- const auto prefix_sum = internal::compute_prefix_sum(
+ const auto prefix_sum = Utilities::MPI::partial_and_total_sum(
local_size, internal::get_mpi_communicator(system_matrix));
IndexSet locally_owned_dofs(std::get<1>(prefix_sum));
locally_owned_dofs.add_range(std::get<0>(prefix_sum),
for (const auto &weight : weights)
process_local_weight += weight;
- // determine partial sum of weights of this process
- std::uint64_t process_local_weight_offset = 0;
-
- int ierr = MPI_Exscan(
- &process_local_weight,
- &process_local_weight_offset,
- 1,
- Utilities::MPI::mpi_type_id_for_type<decltype(process_local_weight)>,
- MPI_SUM,
- tria->get_communicator());
- AssertThrowMPI(ierr);
-
- // total weight of all processes
- std::uint64_t total_weight =
- process_local_weight_offset + process_local_weight;
-
- ierr =
- MPI_Bcast(&total_weight,
- 1,
- Utilities::MPI::mpi_type_id_for_type<decltype(total_weight)>,
- n_subdomains - 1,
- mpi_communicator);
- AssertThrowMPI(ierr);
+ // determine partial sum of weights of this process, as well as the total
+ // weight
+ const auto [process_local_weight_offset, total_weight] =
+ Utilities::MPI::partial_and_total_sum(process_local_weight,
+ tria->get_communicator());
// set up partition
LinearAlgebra::distributed::Vector<double> partition(partitioner);
// --------- Phase 4: shift indices so that each processor has a unique
// range of indices
- dealii::types::global_dof_index my_shift = 0;
- const int ierr = MPI_Exscan(&n_locally_owned_dofs,
- &my_shift,
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM,
- triangulation->get_communicator());
- AssertThrowMPI(ierr);
+ const auto [my_shift, n_global_dofs] =
+ Utilities::MPI::partial_and_total_sum(
+ n_locally_owned_dofs, triangulation->get_communicator());
+
// make dof indices globally consecutive
Implementation::enumerate_dof_indices_for_renumbering(
*dof_handler,
/*check_validity=*/false);
- // now a little bit of housekeeping
- const dealii::types::global_dof_index n_global_dofs =
- Utilities::MPI::sum(n_locally_owned_dofs,
- triangulation->get_communicator());
-
NumberCache number_cache;
number_cache.n_global_dofs = n_global_dofs;
number_cache.n_locally_owned_dofs = n_locally_owned_dofs;
//* 3. communicate local dofcount and shift ids to make
// them unique
- dealii::types::global_dof_index my_shift = 0;
- int ierr = MPI_Exscan(&level_number_cache.n_locally_owned_dofs,
- &my_shift,
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM,
- triangulation->get_communicator());
- AssertThrowMPI(ierr);
-
- // The last processor knows about the total number of dofs, so we
- // can use a cheaper broadcast rather than an MPI_Allreduce via
- // MPI::sum().
- level_number_cache.n_global_dofs =
- my_shift + level_number_cache.n_locally_owned_dofs;
- ierr = MPI_Bcast(&level_number_cache.n_global_dofs,
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- Utilities::MPI::n_mpi_processes(
- triangulation->get_communicator()) -
- 1,
- triangulation->get_communicator());
- AssertThrowMPI(ierr);
+ const auto [my_shift, n_global_dofs] =
+ Utilities::MPI::partial_and_total_sum(
+ level_number_cache.n_locally_owned_dofs,
+ triangulation->get_communicator());
+ level_number_cache.n_global_dofs = n_global_dofs;
// assign appropriate indices
+ types::global_dof_index next_free_index = my_shift;
for (types::global_dof_index &index : renumbering)
if (index == enumeration_dof_index)
- index = my_shift++;
+ index = next_free_index++;
// now re-enumerate all dofs to this shifted and condensed
// numbering form. we renumber some dofs as invalid, so
level_number_cache.locally_owned_dofs =
IndexSet(level_number_cache.n_global_dofs);
level_number_cache.locally_owned_dofs.add_range(
- my_shift - level_number_cache.n_locally_owned_dofs, my_shift);
+ next_free_index - level_number_cache.n_locally_owned_dofs,
+ next_free_index);
level_number_cache.locally_owned_dofs.compress();
number_caches.emplace_back(level_number_cache);
cumulative_cell_weights.back() :
0.0;
- double global_weight_integral;
+
+ double local_start_weight = numbers::signaling_nan<double>();
+ double global_weight_integral = numbers::signaling_nan<double>();
if (const auto tria =
dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&triangulation))
{
- global_weight_integral =
- Utilities::MPI::sum(local_weight_integral,
- tria->get_communicator());
+ std::tie(local_start_weight, global_weight_integral) =
+ Utilities::MPI::partial_and_total_sum(local_weight_integral,
+ tria->get_communicator());
}
else
{
+ local_start_weight = 0;
global_weight_integral = local_weight_integral;
}
"part of the domain; also check the syntax of "
"the function."));
- // Determine the starting weight of this process, which is the sum of
- // the weights of all processes with a lower rank
- double local_start_weight = 0.0;
-
-#ifdef DEAL_II_WITH_MPI
- if (const auto tria =
- dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &triangulation))
- {
- const int ierr = MPI_Exscan(&local_weight_integral,
- &local_start_weight,
- 1,
- MPI_DOUBLE,
- MPI_SUM,
- tria->get_communicator());
- AssertThrowMPI(ierr);
- }
-#endif
-
// Calculate start id
start_particle_id =
std::llround(static_cast<double>(n_particles_to_create) *