/* --------------------------- inline functions ------------------------- */
- /**
- * Given a pointer to an object of class T, return the matching
- * `MPI_Datatype` to be used for MPI communication.
- *
- * As an example, passing an `int*` to this function returns `MPI_INT`.
- *
- * @note In reality, these functions are not template functions templated
- * on the parameter T, but free standing inline function overloads. This
- * templated version only exists so that it shows up in the documentation.
- * The `=delete` statement at the end of the declaration ensures that the
- * compiler will never choose this general template and instead look
- * for one of the overloads.
- */
- template <typename T>
- inline MPI_Datatype
- mpi_type_id(const T *) = delete;
+ namespace internal
+ {
+ namespace MPIDataTypes
+ {
+ /**
+ * Given a pointer to an object of class T, return the matching
+ * `MPI_Datatype` to be used for MPI communication.
+ *
+ * As an example, passing an `int*` to this function returns `MPI_INT`.
+ *
+ * @note In reality, these functions are not template functions templated
+ * on the parameter T, but free standing inline function overloads. This
+ * templated version only exists so that it shows up in the
+ * documentation. The `=delete` statement at the end of the declaration
+ * ensures that the compiler will never choose this general template and
+ * instead look for one of the overloads.
+ */
+ template <typename T>
+ inline MPI_Datatype
+ mpi_type_id(const T *) = delete;
#ifndef DOXYGEN
# ifdef DEAL_II_WITH_MPI
- inline MPI_Datatype
- mpi_type_id(const bool *)
- {
- return MPI_CXX_BOOL;
- }
+ inline MPI_Datatype
+ mpi_type_id(const bool *)
+ {
+ return MPI_CXX_BOOL;
+ }
- inline MPI_Datatype
- mpi_type_id(const char *)
- {
- return MPI_CHAR;
- }
+ inline MPI_Datatype
+ mpi_type_id(const char *)
+ {
+ return MPI_CHAR;
+ }
- inline MPI_Datatype
- mpi_type_id(const signed char *)
- {
- return MPI_SIGNED_CHAR;
- }
+ inline MPI_Datatype
+ mpi_type_id(const signed char *)
+ {
+ return MPI_SIGNED_CHAR;
+ }
- inline MPI_Datatype
- mpi_type_id(const short *)
- {
- return MPI_SHORT;
- }
+ inline MPI_Datatype
+ mpi_type_id(const short *)
+ {
+ return MPI_SHORT;
+ }
- inline MPI_Datatype
- mpi_type_id(const int *)
- {
- return MPI_INT;
- }
+ inline MPI_Datatype
+ mpi_type_id(const int *)
+ {
+ return MPI_INT;
+ }
- inline MPI_Datatype
- mpi_type_id(const long int *)
- {
- return MPI_LONG;
- }
+ inline MPI_Datatype
+ mpi_type_id(const long int *)
+ {
+ return MPI_LONG;
+ }
- inline MPI_Datatype
- mpi_type_id(const unsigned char *)
- {
- return MPI_UNSIGNED_CHAR;
- }
+ inline MPI_Datatype
+ mpi_type_id(const unsigned char *)
+ {
+ return MPI_UNSIGNED_CHAR;
+ }
- inline MPI_Datatype
- mpi_type_id(const unsigned short *)
- {
- return MPI_UNSIGNED_SHORT;
- }
+ inline MPI_Datatype
+ mpi_type_id(const unsigned short *)
+ {
+ return MPI_UNSIGNED_SHORT;
+ }
- inline MPI_Datatype
- mpi_type_id(const unsigned int *)
- {
- return MPI_UNSIGNED;
- }
+ inline MPI_Datatype
+ mpi_type_id(const unsigned int *)
+ {
+ return MPI_UNSIGNED;
+ }
- inline MPI_Datatype
- mpi_type_id(const unsigned long int *)
- {
- return MPI_UNSIGNED_LONG;
- }
+ inline MPI_Datatype
+ mpi_type_id(const unsigned long int *)
+ {
+ return MPI_UNSIGNED_LONG;
+ }
- inline MPI_Datatype
- mpi_type_id(const unsigned long long int *)
- {
- return MPI_UNSIGNED_LONG_LONG;
- }
+ inline MPI_Datatype
+ mpi_type_id(const unsigned long long int *)
+ {
+ return MPI_UNSIGNED_LONG_LONG;
+ }
- inline MPI_Datatype
- mpi_type_id(const float *)
- {
- return MPI_FLOAT;
- }
+ inline MPI_Datatype
+ mpi_type_id(const float *)
+ {
+ return MPI_FLOAT;
+ }
- inline MPI_Datatype
- mpi_type_id(const double *)
- {
- return MPI_DOUBLE;
- }
+ inline MPI_Datatype
+ mpi_type_id(const double *)
+ {
+ return MPI_DOUBLE;
+ }
- inline MPI_Datatype
- mpi_type_id(const long double *)
- {
- return MPI_LONG_DOUBLE;
- }
+ inline MPI_Datatype
+ mpi_type_id(const long double *)
+ {
+ return MPI_LONG_DOUBLE;
+ }
- inline MPI_Datatype
- mpi_type_id(const std::complex<float> *)
- {
- return MPI_COMPLEX;
- }
+ inline MPI_Datatype
+ mpi_type_id(const std::complex<float> *)
+ {
+ return MPI_COMPLEX;
+ }
- inline MPI_Datatype
- mpi_type_id(const std::complex<double> *)
- {
- return MPI_DOUBLE_COMPLEX;
- }
+ inline MPI_Datatype
+ mpi_type_id(const std::complex<double> *)
+ {
+ return MPI_DOUBLE_COMPLEX;
+ }
# endif
+#endif
+ } // namespace MPIDataTypes
+ } // namespace internal
+
+
+
+ /**
+ * A template variable that translates from the data type given as
+ * template argument to the corresponding
+ * `MPI_Datatype` to be used for MPI communication.
+ *
+ * As an example, the value of `mpi_type_id<int>` is `MPI_INT`. A
+ * common way to use this variable is when sending an object `obj`
+ * via MPI functions to another process, and using
+ * `mpi_type_id<decltype(obj)>` to infer the correct MPI type to
+ * use for the communication.
+ */
+ template <typename T>
+ const MPI_Datatype mpi_type_id = internal::MPIDataTypes::mpi_type_id(
+ static_cast<std::remove_cv_t<
+ std::remove_reference_t<std::remove_all_extents_t<T>>> *>(nullptr));
+#ifndef DOXYGEN
namespace internal
{
// declaration for an internal function that lives in mpi.templates.h
const int ierr = MPI_Bcast(buffer + total_sent_count,
current_count,
- mpi_type_id(buffer),
+ mpi_type_id<decltype(*buffer)>,
root,
comm);
AssertThrowMPI(ierr);
}
// Exchange the size of buffer
- int ierr = MPI_Bcast(
- &buffer_size, 1, mpi_type_id(&buffer_size), root_process, comm);
+ int ierr = MPI_Bcast(&buffer_size,
+ 1,
+ mpi_type_id<decltype(buffer_size)>,
+ root_process,
+ comm);
AssertThrowMPI(ierr);
// If not on the root process, correctly size the buffer to
MPI_IN_PLACE,
static_cast<void *>(output.data()),
static_cast<int>(values.size()),
- mpi_type_id(values.data()),
+ mpi_type_id<decltype(*values.data())>,
mpi_op,
mpi_communicator);
AssertThrowMPI(ierr);
MPI_IN_PLACE,
static_cast<void *>(output.data()),
static_cast<int>(values.size() * 2),
- mpi_type_id(static_cast<T *>(nullptr)),
+ mpi_type_id<T>,
mpi_op,
mpi_communicator);
AssertThrowMPI(ierr);
AssertIndexRange(recv_ranks.size(), recv_ptr.size());
for (types::global_dof_index i = 0; i < recv_ranks.size(); ++i)
{
- const int ierr =
- MPI_Irecv(buffers.data() + recv_ptr[i],
- recv_ptr[i + 1] - recv_ptr[i],
- Utilities::MPI::mpi_type_id(buffers.data()),
- recv_ranks[i],
- tag,
- communicator,
- &requests[i + send_ranks.size()]);
+ const int ierr = MPI_Irecv(buffers.data() + recv_ptr[i],
+ recv_ptr[i + 1] - recv_ptr[i],
+ Utilities::MPI::mpi_type_id<Number>,
+ recv_ranks[i],
+ tag,
+ communicator,
+ &requests[i + send_ranks.size()]);
AssertThrowMPI(ierr);
}
(send_ptr[i] == buffers.size() &&
send_ptr[i + 1] == send_ptr[i]),
ExcMessage("The input buffer doesn't contain enough entries"));
- const int ierr =
- MPI_Isend(buffers.data() + send_ptr[i],
- send_ptr[i + 1] - send_ptr[i],
- Utilities::MPI::mpi_type_id(buffers.data()),
- send_ranks[i],
- tag,
- communicator,
- &requests[i]);
+ const int ierr = MPI_Isend(buffers.data() + send_ptr[i],
+ send_ptr[i + 1] - send_ptr[i],
+ Utilities::MPI::mpi_type_id<Number>,
+ send_ranks[i],
+ tag,
+ communicator,
+ &requests[i]);
AssertThrowMPI(ierr);
}
#endif
const auto ierr_1 = MPI_Isend(
buffer.data(),
buffer.size(),
- Utilities::MPI::mpi_type_id(buffer.data()),
+ Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
i.first,
Utilities::MPI::internal::Tags::fine_dof_handler_view_reinit,
communicator,
std::vector<types::global_dof_index> buffer;
int message_length;
- const int ierr_2 =
- MPI_Get_count(&status,
- Utilities::MPI::mpi_type_id(buffer.data()),
- &message_length);
+ const int ierr_2 = MPI_Get_count(
+ &status,
+ Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
+ &message_length);
AssertThrowMPI(ierr_2);
buffer.resize(message_length);
const int ierr_3 = MPI_Recv(
buffer.data(),
buffer.size(),
- Utilities::MPI::mpi_type_id(buffer.data()),
+ Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
status.MPI_SOURCE,
Utilities::MPI::internal::Tags::fine_dof_handler_view_reinit,
communicator,
namespace MPI
{
+#ifdef DEAL_II_WITH_MPI
+ // Provide definitions of template variables for all valid instantiations.
+ template const MPI_Datatype mpi_type_id<bool>;
+ template const MPI_Datatype mpi_type_id<char>;
+ template const MPI_Datatype mpi_type_id<signed char>;
+ template const MPI_Datatype mpi_type_id<short>;
+ template const MPI_Datatype mpi_type_id<int>;
+ template const MPI_Datatype mpi_type_id<long int>;
+ template const MPI_Datatype mpi_type_id<unsigned char>;
+ template const MPI_Datatype mpi_type_id<unsigned short>;
+ template const MPI_Datatype mpi_type_id<unsigned long int>;
+ template const MPI_Datatype mpi_type_id<unsigned long long int>;
+ template const MPI_Datatype mpi_type_id<float>;
+ template const MPI_Datatype mpi_type_id<double>;
+ template const MPI_Datatype mpi_type_id<long double>;
+ template const MPI_Datatype mpi_type_id<std::complex<float>>;
+ template const MPI_Datatype mpi_type_id<std::complex<double>>;
+#endif
+
+
MinMaxAvg
min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
{
types::global_dof_index prefix_sum = 0;
#ifdef DEAL_II_WITH_MPI
- const int ierr = MPI_Exscan(&local_size,
- &prefix_sum,
- 1,
- Utilities::MPI::mpi_type_id(&prefix_sum),
- MPI_SUM,
- communicator);
+ const int ierr =
+ MPI_Exscan(&local_size,
+ &prefix_sum,
+ 1,
+ Utilities::MPI::mpi_type_id<decltype(prefix_sum)>,
+ MPI_SUM,
+ communicator);
AssertThrowMPI(ierr);
#endif
Assert(count > 0, ExcInternalError());
if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
{
- const int ierr = MPI_Bcast(value,
- count,
- Utilities::MPI::mpi_type_id(value),
- 0 /*from root*/,
- mpi_communicator_inactive_with_root);
+ const int ierr =
+ MPI_Bcast(value,
+ count,
+ Utilities::MPI::mpi_type_id<decltype(*value)>,
+ 0 /*from root*/,
+ mpi_communicator_inactive_with_root);
AssertThrowMPI(ierr);
}
}
const int ierr = MPI_Exscan(&process_has_active_locally_owned_cells,
&offset,
1,
- Utilities::MPI::mpi_type_id(
- &process_has_active_locally_owned_cells),
+ Utilities::MPI::mpi_type_id<decltype(
+ process_has_active_locally_owned_cells)>,
MPI_SUM,
comm);
AssertThrowMPI(ierr);
// determine partial sum of weights of this process
uint64_t process_local_weight_offset = 0;
- int ierr = MPI_Exscan(&process_local_weight,
- &process_local_weight_offset,
- 1,
- Utilities::MPI::mpi_type_id(&process_local_weight),
- MPI_SUM,
- tria->get_communicator());
+ int ierr =
+ MPI_Exscan(&process_local_weight,
+ &process_local_weight_offset,
+ 1,
+ Utilities::MPI::mpi_type_id<decltype(process_local_weight)>,
+ MPI_SUM,
+ tria->get_communicator());
AssertThrowMPI(ierr);
// total weight of all processes
ierr = MPI_Bcast(&total_weight,
1,
- Utilities::MPI::mpi_type_id(&total_weight),
+ Utilities::MPI::mpi_type_id<decltype(total_weight)>,
n_subdomains - 1,
mpi_communicator);
AssertThrowMPI(ierr);
MPI_Exscan(&n_locally_owned_cells,
&cell_index,
1,
- Utilities::MPI::mpi_type_id(&n_locally_owned_cells),
+ Utilities::MPI::mpi_type_id<decltype(n_locally_owned_cells)>,
MPI_SUM,
this->mpi_communicator);
AssertThrowMPI(ierr);
std::vector<types::global_cell_index> cell_index(
this->n_global_levels(), 0);
- int ierr =
- MPI_Exscan(n_locally_owned_cells.data(),
- cell_index.data(),
- this->n_global_levels(),
- Utilities::MPI::mpi_type_id(n_locally_owned_cells.data()),
- MPI_SUM,
- this->mpi_communicator);
+ int ierr = MPI_Exscan(
+ n_locally_owned_cells.data(),
+ cell_index.data(),
+ this->n_global_levels(),
+ Utilities::MPI::mpi_type_id<decltype(*n_locally_owned_cells.data())>,
+ MPI_SUM,
+ this->mpi_communicator);
AssertThrowMPI(ierr);
// 3) determine global number of "active" cells on each level
for (unsigned int l = 0; l < this->n_global_levels(); ++l)
n_cells_level[l] = n_locally_owned_cells[l] + cell_index[l];
- ierr = MPI_Bcast(n_cells_level.data(),
- this->n_global_levels(),
- Utilities::MPI::mpi_type_id(n_cells_level.data()),
- this->n_subdomains - 1,
- this->mpi_communicator);
+ ierr = MPI_Bcast(
+ n_cells_level.data(),
+ this->n_global_levels(),
+ Utilities::MPI::mpi_type_id<decltype(*n_cells_level.data())>,
+ this->n_subdomains - 1,
+ this->mpi_communicator);
AssertThrowMPI(ierr);
// 4) give global indices to locally-owned cells on level and mark
}
int ierr = MPI_Bcast(&n_rows,
1,
- Utilities::MPI::mpi_type_id(&n_rows),
+ Utilities::MPI::mpi_type_id<decltype(n_rows)>,
0 /*from root*/,
process_grid->mpi_communicator);
AssertThrowMPI(ierr);
ierr = MPI_Bcast(&n_columns,
1,
- Utilities::MPI::mpi_type_id(&n_columns),
+ Utilities::MPI::mpi_type_id<decltype(n_columns)>,
0 /*from root*/,
process_grid->mpi_communicator);
AssertThrowMPI(ierr);
const int ierr =
MPI_Irecv(buffer.data() + ghost_targets_data[i][1] + offset,
ghost_targets_data[i][2],
- Utilities::MPI::mpi_type_id(buffer.data()),
+ Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
ghost_targets_data[i][0],
communication_channel + 1,
comm,
data_this[import_indices_data.second[j].first + l];
// send data away
- const int ierr =
- MPI_Isend(temporary_storage.data() + import_targets_data[i][1],
- import_targets_data[i][2],
- Utilities::MPI::mpi_type_id(data_this.data()),
- import_targets_data[i][0],
- communication_channel + 1,
- comm,
- requests.data() + sm_import_ranks.size() +
- sm_ghost_ranks.size() + ghost_targets_data.size() +
- i);
+ const int ierr = MPI_Isend(
+ temporary_storage.data() + import_targets_data[i][1],
+ import_targets_data[i][2],
+ Utilities::MPI::mpi_type_id<decltype(*data_this.data())>,
+ import_targets_data[i][0],
+ communication_channel + 1,
+ comm,
+ requests.data() + sm_import_ranks.size() + sm_ghost_ranks.size() +
+ ghost_targets_data.size() + i);
AssertThrowMPI(ierr);
}
#endif
const int ierr =
MPI_Isend(buffer.data() + ghost_targets_data[i][1],
ghost_targets_data[i][2],
- Utilities::MPI::mpi_type_id(buffer.data()),
+ Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
ghost_targets_data[i][0],
communication_channel + 0,
comm,
for (unsigned int i = 0; i < import_targets_data.size(); ++i)
{
- const int ierr =
- MPI_Irecv(temporary_storage.data() + import_targets_data[i][1],
- import_targets_data[i][2],
- Utilities::MPI::mpi_type_id(temporary_storage.data()),
- import_targets_data[i][0],
- communication_channel + 0,
- comm,
- requests.data() + sm_ghost_ranks.size() +
- sm_import_ranks.size() + ghost_targets_data.size() +
- i);
+ const int ierr = MPI_Irecv(
+ temporary_storage.data() + import_targets_data[i][1],
+ import_targets_data[i][2],
+ Utilities::MPI::mpi_type_id<decltype(*temporary_storage.data())>,
+ import_targets_data[i][0],
+ communication_channel + 0,
+ comm,
+ requests.data() + sm_ghost_ranks.size() + sm_import_ranks.size() +
+ ghost_targets_data.size() + i);
AssertThrowMPI(ierr);
}
#endif