* template argument to the corresponding
* `MPI_Datatype` to be used for MPI communication.
*
- * As an example, the value of `mpi_type_id<int>` is `MPI_INT`. A
+ * As an example, the value of `mpi_type_id_for_type<int>` is `MPI_INT`. A
* common way to use this variable is when sending an object `obj`
* via MPI functions to another process, and using
- * `mpi_type_id<decltype(obj)>` to infer the correct MPI type to
+ * `mpi_type_id_for_type<decltype(obj)>` to infer the correct MPI type to
* use for the communication.
*/
template <typename T>
- const MPI_Datatype mpi_type_id = internal::MPIDataTypes::mpi_type_id(
- static_cast<std::remove_cv_t<
- std::remove_reference_t<std::remove_all_extents_t<T>>> *>(nullptr));
+ const MPI_Datatype
+ mpi_type_id_for_type = internal::MPIDataTypes::mpi_type_id(
+ static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
#endif
#ifndef DOXYGEN
const int ierr = MPI_Bcast(buffer + total_sent_count,
current_count,
- mpi_type_id<decltype(*buffer)>,
+ mpi_type_id_for_type<decltype(*buffer)>,
root,
comm);
AssertThrowMPI(ierr);
// Exchange the size of buffer
int ierr = MPI_Bcast(&buffer_size,
1,
- mpi_type_id<decltype(buffer_size)>,
+ mpi_type_id_for_type<decltype(buffer_size)>,
root_process,
comm);
AssertThrowMPI(ierr);
MPI_IN_PLACE,
static_cast<void *>(output.data()),
static_cast<int>(values.size()),
- mpi_type_id<decltype(*values.data())>,
+ mpi_type_id_for_type<decltype(*values.data())>,
mpi_op,
mpi_communicator);
AssertThrowMPI(ierr);
MPI_IN_PLACE,
static_cast<void *>(output.data()),
static_cast<int>(values.size() * 2),
- mpi_type_id<T>,
+ mpi_type_id_for_type<T>,
mpi_op,
mpi_communicator);
AssertThrowMPI(ierr);
AssertIndexRange(recv_ranks.size(), recv_ptr.size());
for (types::global_dof_index i = 0; i < recv_ranks.size(); ++i)
{
- const int ierr = MPI_Irecv(buffers.data() + recv_ptr[i],
- recv_ptr[i + 1] - recv_ptr[i],
- Utilities::MPI::mpi_type_id<Number>,
- recv_ranks[i],
- tag,
- communicator,
- &requests[i + send_ranks.size()]);
+ const int ierr =
+ MPI_Irecv(buffers.data() + recv_ptr[i],
+ recv_ptr[i + 1] - recv_ptr[i],
+ Utilities::MPI::mpi_type_id_for_type<Number>,
+ recv_ranks[i],
+ tag,
+ communicator,
+ &requests[i + send_ranks.size()]);
AssertThrowMPI(ierr);
}
(send_ptr[i] == buffers.size() &&
send_ptr[i + 1] == send_ptr[i]),
ExcMessage("The input buffer doesn't contain enough entries"));
- const int ierr = MPI_Isend(buffers.data() + send_ptr[i],
- send_ptr[i + 1] - send_ptr[i],
- Utilities::MPI::mpi_type_id<Number>,
- send_ranks[i],
- tag,
- communicator,
- &requests[i]);
+ const int ierr =
+ MPI_Isend(buffers.data() + send_ptr[i],
+ send_ptr[i + 1] - send_ptr[i],
+ Utilities::MPI::mpi_type_id_for_type<Number>,
+ send_ranks[i],
+ tag,
+ communicator,
+ &requests[i]);
AssertThrowMPI(ierr);
}
#endif
const auto ierr_1 = MPI_Isend(
buffer.data(),
buffer.size(),
- Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(*buffer.data())>,
i.first,
Utilities::MPI::internal::Tags::fine_dof_handler_view_reinit,
communicator,
int message_length;
const int ierr_2 = MPI_Get_count(
&status,
- Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(*buffer.data())>,
&message_length);
AssertThrowMPI(ierr_2);
const int ierr_3 = MPI_Recv(
buffer.data(),
buffer.size(),
- Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(*buffer.data())>,
status.MPI_SOURCE,
Utilities::MPI::internal::Tags::fine_dof_handler_view_reinit,
communicator,
{
#ifdef DEAL_II_WITH_MPI
// Provide definitions of template variables for all valid instantiations.
- template const MPI_Datatype mpi_type_id<bool>;
- template const MPI_Datatype mpi_type_id<char>;
- template const MPI_Datatype mpi_type_id<signed char>;
- template const MPI_Datatype mpi_type_id<short>;
- template const MPI_Datatype mpi_type_id<int>;
- template const MPI_Datatype mpi_type_id<long int>;
- template const MPI_Datatype mpi_type_id<unsigned char>;
- template const MPI_Datatype mpi_type_id<unsigned short>;
- template const MPI_Datatype mpi_type_id<unsigned long int>;
- template const MPI_Datatype mpi_type_id<unsigned long long int>;
- template const MPI_Datatype mpi_type_id<float>;
- template const MPI_Datatype mpi_type_id<double>;
- template const MPI_Datatype mpi_type_id<long double>;
- template const MPI_Datatype mpi_type_id<std::complex<float>>;
- template const MPI_Datatype mpi_type_id<std::complex<double>>;
+ template const MPI_Datatype mpi_type_id_for_type<bool>;
+ template const MPI_Datatype mpi_type_id_for_type<char>;
+ template const MPI_Datatype mpi_type_id_for_type<signed char>;
+ template const MPI_Datatype mpi_type_id_for_type<short>;
+ template const MPI_Datatype mpi_type_id_for_type<int>;
+ template const MPI_Datatype mpi_type_id_for_type<long int>;
+ template const MPI_Datatype mpi_type_id_for_type<unsigned char>;
+ template const MPI_Datatype mpi_type_id_for_type<unsigned short>;
+ template const MPI_Datatype mpi_type_id_for_type<unsigned long int>;
+ template const MPI_Datatype mpi_type_id_for_type<unsigned long long int>;
+ template const MPI_Datatype mpi_type_id_for_type<float>;
+ template const MPI_Datatype mpi_type_id_for_type<double>;
+ template const MPI_Datatype mpi_type_id_for_type<long double>;
+ template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
+ template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
#endif
MPI_Exscan(&local_size,
&prefix_sum,
1,
- Utilities::MPI::mpi_type_id<decltype(prefix_sum)>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(prefix_sum)>,
MPI_SUM,
communicator);
AssertThrowMPI(ierr);
const int ierr =
MPI_Bcast(value,
count,
- Utilities::MPI::mpi_type_id<decltype(*value)>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(*value)>,
0 /*from root*/,
mpi_communicator_inactive_with_root);
AssertThrowMPI(ierr);
const int ierr = MPI_Exscan(&process_has_active_locally_owned_cells,
&offset,
1,
- Utilities::MPI::mpi_type_id<decltype(
+ Utilities::MPI::mpi_type_id_for_type<decltype(
process_has_active_locally_owned_cells)>,
MPI_SUM,
comm);
// determine partial sum of weights of this process
uint64_t process_local_weight_offset = 0;
- int ierr =
- MPI_Exscan(&process_local_weight,
- &process_local_weight_offset,
- 1,
- Utilities::MPI::mpi_type_id<decltype(process_local_weight)>,
- MPI_SUM,
- tria->get_communicator());
+ int ierr = MPI_Exscan(
+ &process_local_weight,
+ &process_local_weight_offset,
+ 1,
+ Utilities::MPI::mpi_type_id_for_type<decltype(process_local_weight)>,
+ MPI_SUM,
+ tria->get_communicator());
AssertThrowMPI(ierr);
// total weight of all processes
uint64_t total_weight = process_local_weight_offset + process_local_weight;
- ierr = MPI_Bcast(&total_weight,
- 1,
- Utilities::MPI::mpi_type_id<decltype(total_weight)>,
- n_subdomains - 1,
- mpi_communicator);
+ ierr =
+ MPI_Bcast(&total_weight,
+ 1,
+ Utilities::MPI::mpi_type_id_for_type<decltype(total_weight)>,
+ n_subdomains - 1,
+ mpi_communicator);
AssertThrowMPI(ierr);
// setup partition
// 2) determine the offset of each process
types::global_cell_index cell_index = 0;
- const int ierr =
- MPI_Exscan(&n_locally_owned_cells,
- &cell_index,
- 1,
- Utilities::MPI::mpi_type_id<decltype(n_locally_owned_cells)>,
- MPI_SUM,
- this->mpi_communicator);
+ const int ierr = MPI_Exscan(
+ &n_locally_owned_cells,
+ &cell_index,
+ 1,
+ Utilities::MPI::mpi_type_id_for_type<decltype(n_locally_owned_cells)>,
+ MPI_SUM,
+ this->mpi_communicator);
AssertThrowMPI(ierr);
// 3) give global indices to locally-owned cells and mark all other cells as
std::vector<types::global_cell_index> cell_index(
this->n_global_levels(), 0);
- int ierr = MPI_Exscan(
- n_locally_owned_cells.data(),
- cell_index.data(),
- this->n_global_levels(),
- Utilities::MPI::mpi_type_id<decltype(*n_locally_owned_cells.data())>,
- MPI_SUM,
- this->mpi_communicator);
+ int ierr = MPI_Exscan(n_locally_owned_cells.data(),
+ cell_index.data(),
+ this->n_global_levels(),
+ Utilities::MPI::mpi_type_id_for_type<decltype(
+ *n_locally_owned_cells.data())>,
+ MPI_SUM,
+ this->mpi_communicator);
AssertThrowMPI(ierr);
// 3) determine global number of "active" cells on each level
ierr = MPI_Bcast(
n_cells_level.data(),
this->n_global_levels(),
- Utilities::MPI::mpi_type_id<decltype(*n_cells_level.data())>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(*n_cells_level.data())>,
this->n_subdomains - 1,
this->mpi_communicator);
AssertThrowMPI(ierr);
}
int ierr = MPI_Bcast(&n_rows,
1,
- Utilities::MPI::mpi_type_id<decltype(n_rows)>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(n_rows)>,
0 /*from root*/,
process_grid->mpi_communicator);
AssertThrowMPI(ierr);
ierr = MPI_Bcast(&n_columns,
1,
- Utilities::MPI::mpi_type_id<decltype(n_columns)>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(n_columns)>,
0 /*from root*/,
process_grid->mpi_communicator);
AssertThrowMPI(ierr);
n_ghost_indices_in_larger_set_by_remote_rank[i] -
ghost_targets_data[i][2];
- const int ierr =
- MPI_Irecv(buffer.data() + ghost_targets_data[i][1] + offset,
- ghost_targets_data[i][2],
- Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
- ghost_targets_data[i][0],
- communication_channel + 1,
- comm,
- requests.data() + sm_import_ranks.size() +
- sm_ghost_ranks.size() + i);
+ const int ierr = MPI_Irecv(
+ buffer.data() + ghost_targets_data[i][1] + offset,
+ ghost_targets_data[i][2],
+ Utilities::MPI::mpi_type_id_for_type<decltype(*buffer.data())>,
+ ghost_targets_data[i][0],
+ communication_channel + 1,
+ comm,
+ requests.data() + sm_import_ranks.size() + sm_ghost_ranks.size() +
+ i);
AssertThrowMPI(ierr);
}
const int ierr = MPI_Isend(
temporary_storage.data() + import_targets_data[i][1],
import_targets_data[i][2],
- Utilities::MPI::mpi_type_id<decltype(*data_this.data())>,
+ Utilities::MPI::mpi_type_id_for_type<decltype(*data_this.data())>,
import_targets_data[i][0],
communication_channel + 1,
comm,
}
}
- const int ierr =
- MPI_Isend(buffer.data() + ghost_targets_data[i][1],
- ghost_targets_data[i][2],
- Utilities::MPI::mpi_type_id<decltype(*buffer.data())>,
- ghost_targets_data[i][0],
- communication_channel + 0,
- comm,
- requests.data() + sm_ghost_ranks.size() +
- sm_import_ranks.size() + i);
+ const int ierr = MPI_Isend(
+ buffer.data() + ghost_targets_data[i][1],
+ ghost_targets_data[i][2],
+ Utilities::MPI::mpi_type_id_for_type<decltype(*buffer.data())>,
+ ghost_targets_data[i][0],
+ communication_channel + 0,
+ comm,
+ requests.data() + sm_ghost_ranks.size() + sm_import_ranks.size() +
+ i);
AssertThrowMPI(ierr);
}
for (unsigned int i = 0; i < import_targets_data.size(); ++i)
{
- const int ierr = MPI_Irecv(
- temporary_storage.data() + import_targets_data[i][1],
- import_targets_data[i][2],
- Utilities::MPI::mpi_type_id<decltype(*temporary_storage.data())>,
- import_targets_data[i][0],
- communication_channel + 0,
- comm,
- requests.data() + sm_ghost_ranks.size() + sm_import_ranks.size() +
- ghost_targets_data.size() + i);
+ const int ierr =
+ MPI_Irecv(temporary_storage.data() + import_targets_data[i][1],
+ import_targets_data[i][2],
+ Utilities::MPI::mpi_type_id_for_type<decltype(
+ *temporary_storage.data())>,
+ import_targets_data[i][0],
+ communication_channel + 0,
+ comm,
+ requests.data() + sm_ghost_ranks.size() +
+ sm_import_ranks.size() + ghost_targets_data.size() +
+ i);
AssertThrowMPI(ierr);
}
#endif