*/
namespace MPI
{
+#ifdef DOXYGEN
+ /**
+ * Given a pointer to an object of class T, return the matching
+ * `MPI_Datatype` to be used for MPI communication.
+ *
+ * As an example, passing an `int*` to this function returns `MPI_INT`.
+ *
+ * @note In reality, these functions are not template functions templated
+ * on the parameter T, but free standing inline function overloads. This
+ * templated version only exists so that it shows up in the documentation.
+ */
+ template <typename T>
+ MPI_Datatype
+ mpi_type_id(const T *);
+#endif
+
/**
* Return the number of MPI processes there exist in the given
* @ref GlossMPICommunicator "communicator"
compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
+
#ifndef DOXYGEN
- // declaration for an internal function that lives in mpi.templates.h
+
+ /* --------------------------- inline functions ------------------------- */
+
+# ifdef DEAL_II_WITH_MPI
+ inline MPI_Datatype
+ mpi_type_id(const bool *)
+ {
+# if DEAL_II_MPI_VERSION_GTE(2, 2)
+ return MPI_CXX_BOOL;
+# else
+ return MPI_C_BOOL;
+# endif
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const char *)
+ {
+ return MPI_CHAR;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const signed char *)
+ {
+ return MPI_SIGNED_CHAR;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const short *)
+ {
+ return MPI_SHORT;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const int *)
+ {
+ return MPI_INT;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const long int *)
+ {
+ return MPI_LONG;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const unsigned char *)
+ {
+ return MPI_UNSIGNED_CHAR;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const unsigned short *)
+ {
+ return MPI_UNSIGNED_SHORT;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const unsigned int *)
+ {
+ return MPI_UNSIGNED;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const unsigned long int *)
+ {
+ return MPI_UNSIGNED_LONG;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const unsigned long long int *)
+ {
+ return MPI_UNSIGNED_LONG_LONG;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const float *)
+ {
+ return MPI_FLOAT;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const double *)
+ {
+ return MPI_DOUBLE;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const long double *)
+ {
+ return MPI_LONG_DOUBLE;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const std::complex<float> *)
+ {
+ return MPI_COMPLEX;
+ }
+
+
+
+ inline MPI_Datatype
+ mpi_type_id(const std::complex<double> *)
+ {
+ return MPI_DOUBLE_COMPLEX;
+ }
+# endif
+
+
namespace internal
{
+ // declaration for an internal function that lives in mpi.templates.h
template <typename T>
void
all_reduce(const MPI_Op & mpi_op,
const ArrayView<const T> &values,
const MPI_Comm & mpi_communicator,
const ArrayView<T> & output);
- }
+ } // namespace internal
{
namespace internal
{
-#ifdef DEAL_II_WITH_MPI
- /**
- * Return the corresponding MPI data type id for the argument given.
- */
- inline MPI_Datatype
- mpi_type_id(const bool *)
- {
-# if DEAL_II_MPI_VERSION_GTE(2, 2)
- return MPI_CXX_BOOL;
-# else
- return MPI_C_BOOL;
-# endif
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const char *)
- {
- return MPI_CHAR;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const signed char *)
- {
- return MPI_SIGNED_CHAR;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const short *)
- {
- return MPI_SHORT;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const int *)
- {
- return MPI_INT;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const long int *)
- {
- return MPI_LONG;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const unsigned char *)
- {
- return MPI_UNSIGNED_CHAR;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const unsigned short *)
- {
- return MPI_UNSIGNED_SHORT;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const unsigned int *)
- {
- return MPI_UNSIGNED;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const unsigned long int *)
- {
- return MPI_UNSIGNED_LONG;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const unsigned long long int *)
- {
- return MPI_UNSIGNED_LONG_LONG;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const float *)
- {
- return MPI_FLOAT;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const double *)
- {
- return MPI_DOUBLE;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const long double *)
- {
- return MPI_LONG_DOUBLE;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const std::complex<float> *)
- {
- return MPI_COMPLEX;
- }
-
-
-
- inline MPI_Datatype
- mpi_type_id(const std::complex<double> *)
- {
- return MPI_DOUBLE_COMPLEX;
- }
-#endif
-
-
template <typename T>
void
all_reduce(const MPI_Op & mpi_op,
MPI_IN_PLACE,
static_cast<void *>(output.data()),
static_cast<int>(values.size()),
- internal::mpi_type_id(values.data()),
+ mpi_type_id(values.data()),
mpi_op,
mpi_communicator);
AssertThrowMPI(ierr);
MPI_IN_PLACE,
static_cast<void *>(output.data()),
static_cast<int>(values.size() * 2),
- internal::mpi_type_id(static_cast<T *>(nullptr)),
+ mpi_type_id(static_cast<T *>(nullptr)),
mpi_op,
mpi_communicator);
AssertThrowMPI(ierr);
const int ierr =
MPI_Irecv(buffers.data() + recv_ptr[i],
recv_ptr[i + 1] - recv_ptr[i],
- Utilities::MPI::internal::mpi_type_id(buffers.data()),
+ Utilities::MPI::mpi_type_id(buffers.data()),
recv_ranks[i],
tag,
communicator,
const int ierr =
MPI_Isend(buffers.data() + send_ptr[i],
send_ptr[i + 1] - send_ptr[i],
- Utilities::MPI::internal::mpi_type_id(buffers.data()),
+ Utilities::MPI::mpi_type_id(buffers.data()),
send_ranks[i],
tag,
communicator,
const auto ierr_1 = MPI_Isend(
buffer.data(),
buffer.size(),
- Utilities::MPI::internal::mpi_type_id(buffer.data()),
+ Utilities::MPI::mpi_type_id(buffer.data()),
i.first,
Utilities::MPI::internal::Tags::fine_dof_handler_view_reinit,
communicator,
int message_length;
const int ierr_2 =
MPI_Get_count(&status,
- Utilities::MPI::internal::mpi_type_id(
- buffer.data()),
+ Utilities::MPI::mpi_type_id(buffer.data()),
&message_length);
AssertThrowMPI(ierr_2);
const int ierr_3 = MPI_Recv(
buffer.data(),
buffer.size(),
- Utilities::MPI::internal::mpi_type_id(buffer.data()),
+ Utilities::MPI::mpi_type_id(buffer.data()),
status.MPI_SOURCE,
Utilities::MPI::internal::Tags::fine_dof_handler_view_reinit,
communicator,
types::global_dof_index prefix_sum = 0;
#ifdef DEAL_II_WITH_MPI
- const int ierr =
- MPI_Exscan(&local_size,
- &prefix_sum,
- 1,
- Utilities::MPI::internal::mpi_type_id(&prefix_sum),
- MPI_SUM,
- communicator);
+ const int ierr = MPI_Exscan(&local_size,
+ &prefix_sum,
+ 1,
+ Utilities::MPI::mpi_type_id(&prefix_sum),
+ MPI_SUM,
+ communicator);
AssertThrowMPI(ierr);
#endif
Assert(count > 0, ExcInternalError());
if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
{
- const int ierr =
- MPI_Bcast(value,
- count,
- Utilities::MPI::internal::mpi_type_id(value),
- 0 /*from root*/,
- mpi_communicator_inactive_with_root);
+ const int ierr = MPI_Bcast(value,
+ count,
+ Utilities::MPI::mpi_type_id(value),
+ 0 /*from root*/,
+ mpi_communicator_inactive_with_root);
AssertThrowMPI(ierr);
}
}
const int ierr = MPI_Exscan(&process_has_active_locally_owned_cells,
&offset,
1,
- Utilities::MPI::internal::mpi_type_id(
+ Utilities::MPI::mpi_type_id(
&process_has_active_locally_owned_cells),
MPI_SUM,
comm);
// determine partial sum of weights of this process
uint64_t process_local_weight_offset = 0;
- int ierr =
- MPI_Exscan(&process_local_weight,
- &process_local_weight_offset,
- 1,
- Utilities::MPI::internal::mpi_type_id(&process_local_weight),
- MPI_SUM,
- tria->get_communicator());
+ int ierr = MPI_Exscan(&process_local_weight,
+ &process_local_weight_offset,
+ 1,
+ Utilities::MPI::mpi_type_id(&process_local_weight),
+ MPI_SUM,
+ tria->get_communicator());
AssertThrowMPI(ierr);
// total weight of all processes
ierr = MPI_Bcast(&total_weight,
1,
- Utilities::MPI::internal::mpi_type_id(&total_weight),
+ Utilities::MPI::mpi_type_id(&total_weight),
n_subdomains - 1,
mpi_communicator);
AssertThrowMPI(ierr);
MPI_Exscan(&n_locally_owned_cells,
&cell_index,
1,
- Utilities::MPI::internal::mpi_type_id(&n_locally_owned_cells),
+ Utilities::MPI::mpi_type_id(&n_locally_owned_cells),
MPI_SUM,
this->mpi_communicator);
AssertThrowMPI(ierr);
std::vector<types::global_cell_index> cell_index(
this->n_global_levels(), 0);
- int ierr = MPI_Exscan(n_locally_owned_cells.data(),
- cell_index.data(),
- this->n_global_levels(),
- Utilities::MPI::internal::mpi_type_id(
- n_locally_owned_cells.data()),
- MPI_SUM,
- this->mpi_communicator);
+ int ierr =
+ MPI_Exscan(n_locally_owned_cells.data(),
+ cell_index.data(),
+ this->n_global_levels(),
+ Utilities::MPI::mpi_type_id(n_locally_owned_cells.data()),
+ MPI_SUM,
+ this->mpi_communicator);
AssertThrowMPI(ierr);
// 3) determine global number of "active" cells on each level
for (unsigned int l = 0; l < this->n_global_levels(); ++l)
n_cells_level[l] = n_locally_owned_cells[l] + cell_index[l];
- ierr =
- MPI_Bcast(n_cells_level.data(),
- this->n_global_levels(),
- Utilities::MPI::internal::mpi_type_id(n_cells_level.data()),
- this->n_subdomains - 1,
- this->mpi_communicator);
+ ierr = MPI_Bcast(n_cells_level.data(),
+ this->n_global_levels(),
+ Utilities::MPI::mpi_type_id(n_cells_level.data()),
+ this->n_subdomains - 1,
+ this->mpi_communicator);
AssertThrowMPI(ierr);
// 4) give global indices to locally-owned cells on level and mark
}
int ierr = MPI_Bcast(&n_rows,
1,
- Utilities::MPI::internal::mpi_type_id(&n_rows),
+ Utilities::MPI::mpi_type_id(&n_rows),
0 /*from root*/,
process_grid->mpi_communicator);
AssertThrowMPI(ierr);
ierr = MPI_Bcast(&n_columns,
1,
- Utilities::MPI::internal::mpi_type_id(&n_columns),
+ Utilities::MPI::mpi_type_id(&n_columns),
0 /*from root*/,
process_grid->mpi_communicator);
AssertThrowMPI(ierr);
const int ierr =
MPI_Irecv(buffer.data() + ghost_targets_data[i][1] + offset,
ghost_targets_data[i][2],
- Utilities::MPI::internal::mpi_type_id(buffer.data()),
+ Utilities::MPI::mpi_type_id(buffer.data()),
ghost_targets_data[i][0],
communication_channel + 1,
comm,
const int ierr =
MPI_Isend(temporary_storage.data() + import_targets_data[i][1],
import_targets_data[i][2],
- Utilities::MPI::internal::mpi_type_id(data_this.data()),
+ Utilities::MPI::mpi_type_id(data_this.data()),
import_targets_data[i][0],
communication_channel + 1,
comm,
const int ierr =
MPI_Isend(buffer.data() + ghost_targets_data[i][1],
ghost_targets_data[i][2],
- Utilities::MPI::internal::mpi_type_id(buffer.data()),
+ Utilities::MPI::mpi_type_id(buffer.data()),
ghost_targets_data[i][0],
communication_channel + 0,
comm,
for (unsigned int i = 0; i < import_targets_data.size(); ++i)
{
- const int ierr = MPI_Irecv(
- temporary_storage.data() + import_targets_data[i][1],
- import_targets_data[i][2],
- Utilities::MPI::internal::mpi_type_id(temporary_storage.data()),
- import_targets_data[i][0],
- communication_channel + 0,
- comm,
- requests.data() + sm_ghost_ranks.size() + sm_import_ranks.size() +
- ghost_targets_data.size() + i);
+ const int ierr =
+ MPI_Irecv(temporary_storage.data() + import_targets_data[i][1],
+ import_targets_data[i][2],
+ Utilities::MPI::mpi_type_id(temporary_storage.data()),
+ import_targets_data[i][0],
+ communication_channel + 0,
+ comm,
+ requests.data() + sm_ghost_ranks.size() +
+ sm_import_ranks.size() + ghost_targets_data.size() +
+ i);
AssertThrowMPI(ierr);
}
#endif
if (myid == i)
MPI_Send(&renumbering[0],
renumbering.size(),
- Utilities::MPI::internal::mpi_type_id(
- &complete_renumbering[0]),
+ Utilities::MPI::mpi_type_id(&complete_renumbering[0]),
0,
i,
MPI_COMM_WORLD);
else if (myid == 0)
MPI_Recv(&complete_renumbering[offset],
dofs_per_proc[i].n_elements(),
- Utilities::MPI::internal::mpi_type_id(
- &complete_renumbering[0]),
+ Utilities::MPI::mpi_type_id(&complete_renumbering[0]),
i,
i,
MPI_COMM_WORLD,
if (myid == i)
MPI_Send(&renumbering[0],
renumbering.size(),
- Utilities::MPI::internal::mpi_type_id(
- &complete_renumbering[0]),
+ Utilities::MPI::mpi_type_id(&complete_renumbering[0]),
0,
i,
MPI_COMM_WORLD);
else if (myid == 0)
MPI_Recv(&complete_renumbering[offset],
locally_owned_dofs_per_processor[i].n_elements(),
- Utilities::MPI::internal::mpi_type_id(
- &complete_renumbering[0]),
+ Utilities::MPI::mpi_type_id(&complete_renumbering[0]),
i,
i,
MPI_COMM_WORLD,