/**
* An identifier that denotes the MPI type associated with
* types::global_vertex_index.
+ *
+ * This preprocessor variable is deprecated. Use the variable
+ * `Utilities::MPI::mpi_type_id_for_type<types::global_vertex_index>`
+ * instead.
*/
#define DEAL_II_VERTEX_INDEX_MPI_TYPE MPI_UINT64_T
/**
* An identifier that denotes the MPI type associated with
* types::global_dof_index.
+ *
+ * This preprocessor variable is deprecated. Use the variable
+ * `Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>`
+ * instead.
*/
#ifdef DEAL_II_WITH_64BIT_INDICES
# define DEAL_II_DOF_INDEX_MPI_TYPE MPI_UINT64_T
/**
* An identifier that denotes the MPI type associated with
* types::global_dof_index.
+ *
+ * This preprocessor variable is deprecated. Use the variable
+ * `Utilities::MPI::mpi_type_id_for_type<types::particle_index>`
+ * instead.
*/
# define DEAL_II_PARTICLE_INDEX_MPI_TYPE MPI_UINT64_T
# endif
for (const auto &rank_pair : buffers)
{
request.push_back(MPI_Request());
- const int ierr = MPI_Isend(rank_pair.second.data(),
- rank_pair.second.size() * 2,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- rank_pair.first,
- mpi_tag,
- comm,
- &request.back());
+ const int ierr =
+ MPI_Isend(rank_pair.second.data(),
+ rank_pair.second.size() * 2,
+ Utilities::MPI::mpi_type_id_for_type<
+ types::global_dof_index>,
+ rank_pair.first,
+ mpi_tag,
+ comm,
+ &request.back());
AssertThrowMPI(ierr);
}
// retrieve size of incoming message
int number_amount;
ierr = MPI_Get_count(&status,
- DEAL_II_DOF_INDEX_MPI_TYPE,
+ Utilities::MPI::mpi_type_id_for_type<
+ types::global_dof_index>,
&number_amount);
AssertThrowMPI(ierr);
buffer(number_amount / 2);
ierr = MPI_Recv(buffer.data(),
number_amount,
- DEAL_II_DOF_INDEX_MPI_TYPE,
+ Utilities::MPI::mpi_type_id_for_type<
+ types::global_dof_index>,
status.MPI_SOURCE,
status.MPI_TAG,
comm,
types::global_dof_index my_shift = 0;
{
- const int ierr = MPI_Exscan(&my_size,
- &my_shift,
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM,
- communicator);
+ const int ierr = MPI_Exscan(
+ &my_size,
+ &my_shift,
+ 1,
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ MPI_SUM,
+ communicator);
AssertThrowMPI(ierr);
}
rcounts[Utilities::MPI::this_mpi_process(
tr->get_mpi_communicator())]),
ExcInternalError());
- ierr = MPI_Allgatherv(new_numbers_copy.data(),
- new_numbers_copy.size(),
- DEAL_II_DOF_INDEX_MPI_TYPE,
- gathered_new_numbers.data(),
- rcounts.data(),
- displacements.data(),
- DEAL_II_DOF_INDEX_MPI_TYPE,
- tr->get_mpi_communicator());
+ ierr = MPI_Allgatherv(
+ new_numbers_copy.data(),
+ new_numbers_copy.size(),
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ gathered_new_numbers.data(),
+ rcounts.data(),
+ displacements.data(),
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ tr->get_mpi_communicator());
AssertThrowMPI(ierr);
}
local_dof_count[c] = component_to_dof_map[c].size();
std::vector<types::global_dof_index> prefix_dof_count(n_buckets);
- const int ierr = MPI_Exscan(local_dof_count.data(),
- prefix_dof_count.data(),
- n_buckets,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM,
- tria->get_mpi_communicator());
+ const int ierr = MPI_Exscan(
+ local_dof_count.data(),
+ prefix_dof_count.data(),
+ n_buckets,
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ MPI_SUM,
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
std::vector<types::global_dof_index> global_dof_count(n_buckets);
local_dof_count[c] = block_to_dof_map[c].size();
std::vector<types::global_dof_index> prefix_dof_count(n_buckets);
- const int ierr = MPI_Exscan(local_dof_count.data(),
- prefix_dof_count.data(),
- n_buckets,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM,
- tria->get_mpi_communicator());
+ const int ierr = MPI_Exscan(
+ local_dof_count.data(),
+ prefix_dof_count.data(),
+ n_buckets,
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ MPI_SUM,
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
std::vector<types::global_dof_index> global_dof_count(n_buckets);
#ifdef DEAL_II_WITH_MPI
types::global_dof_index locally_owned_size =
dof_handler.locally_owned_dofs().n_elements();
- const int ierr = MPI_Exscan(&locally_owned_size,
- &my_starting_index,
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM,
- tria->get_mpi_communicator());
+ const int ierr = MPI_Exscan(
+ &locally_owned_size,
+ &my_starting_index,
+ 1,
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ MPI_SUM,
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
#endif
}
std::vector<types::global_dof_index> local_dof_count =
dofs_per_component;
- const int ierr = MPI_Allreduce(local_dof_count.data(),
- dofs_per_component.data(),
- n_target_components,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM,
- tria->get_mpi_communicator());
+ const int ierr = MPI_Allreduce(
+ local_dof_count.data(),
+ dofs_per_component.data(),
+ n_target_components,
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ MPI_SUM,
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
}
#endif
{
std::vector<types::global_dof_index> local_dof_count =
dofs_per_block;
- const int ierr = MPI_Allreduce(local_dof_count.data(),
- dofs_per_block.data(),
- n_target_blocks,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- MPI_SUM,
- tria->get_mpi_communicator());
+ const int ierr = MPI_Allreduce(
+ local_dof_count.data(),
+ dofs_per_block.data(),
+ n_target_blocks,
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ MPI_SUM,
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
}
#endif
// Make indices global by getting the number of vertices owned by each
// processors and shifting the indices accordingly
types::global_vertex_index shift = 0;
- int ierr = MPI_Exscan(&next_index,
- &shift,
- 1,
- DEAL_II_VERTEX_INDEX_MPI_TYPE,
- MPI_SUM,
- triangulation.get_mpi_communicator());
+ int ierr = MPI_Exscan(
+ &next_index,
+ &shift,
+ 1,
+ Utilities::MPI::mpi_type_id_for_type<types::global_vertex_index>,
+ MPI_SUM,
+ triangulation.get_mpi_communicator());
AssertThrowMPI(ierr);
for (auto &global_index_it : local_to_global_vertex_index)
}
// Send the message
- ierr = MPI_Isend(vertices_send_buffers[i].data(),
- buffer_size,
- DEAL_II_VERTEX_INDEX_MPI_TYPE,
- destination,
- mpi_tag,
- triangulation.get_mpi_communicator(),
- &first_requests[i]);
+ ierr = MPI_Isend(
+ vertices_send_buffers[i].data(),
+ buffer_size,
+ Utilities::MPI::mpi_type_id_for_type<types::global_vertex_index>,
+ destination,
+ mpi_tag,
+ triangulation.get_mpi_communicator(),
+ &first_requests[i]);
AssertThrowMPI(ierr);
}
vertices_recv_buffers[i].resize(buffer_size);
// Receive the message
- ierr = MPI_Recv(vertices_recv_buffers[i].data(),
- buffer_size,
- DEAL_II_VERTEX_INDEX_MPI_TYPE,
- source,
- mpi_tag,
- triangulation.get_mpi_communicator(),
- MPI_STATUS_IGNORE);
+ ierr = MPI_Recv(
+ vertices_recv_buffers[i].data(),
+ buffer_size,
+ Utilities::MPI::mpi_type_id_for_type<types::global_vertex_index>,
+ source,
+ mpi_tag,
+ triangulation.get_mpi_communicator(),
+ MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
}
unsigned int idx = 0;
for (const auto &sparsity_line : send_data)
{
- const int ierr = MPI_Isend(sparsity_line.second.data(),
- sparsity_line.second.size(),
- DEAL_II_DOF_INDEX_MPI_TYPE,
- sparsity_line.first,
- mpi_tag,
- mpi_comm,
- &requests[idx++]);
+ const int ierr = MPI_Isend(
+ sparsity_line.second.data(),
+ sparsity_line.second.size(),
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ sparsity_line.first,
+ mpi_tag,
+ mpi_comm,
+ &requests[idx++]);
AssertThrowMPI(ierr);
}
}
AssertThrowMPI(ierr);
int len;
- ierr = MPI_Get_count(&status, DEAL_II_DOF_INDEX_MPI_TYPE, &len);
+ ierr = MPI_Get_count(
+ &status,
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ &len);
AssertThrowMPI(ierr);
recv_buf.resize(len);
- ierr = MPI_Recv(recv_buf.data(),
- len,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- status.MPI_SOURCE,
- status.MPI_TAG,
- mpi_comm,
- &status);
+ ierr = MPI_Recv(
+ recv_buf.data(),
+ len,
+ Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
+ status.MPI_SOURCE,
+ status.MPI_TAG,
+ mpi_comm,
+ &status);
AssertThrowMPI(ierr);
std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator
// The local particle start index is the number of all particles
// generated on lower MPI ranks.
- const int ierr = MPI_Exscan(&n_particles_to_generate,
- &particle_index,
- 1,
- DEAL_II_PARTICLE_INDEX_MPI_TYPE,
- MPI_SUM,
- tria->get_mpi_communicator());
+ const int ierr = MPI_Exscan(
+ &n_particles_to_generate,
+ &particle_index,
+ 1,
+ Utilities::MPI::mpi_type_id_for_type<types::particle_index>,
+ MPI_SUM,
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
}
#endif
MPI_Scan(&particles_to_add_locally,
&local_start_index,
1,
- DEAL_II_PARTICLE_INDEX_MPI_TYPE,
+ Utilities::MPI::mpi_type_id_for_type<types::particle_index>,
MPI_SUM,
parallel_triangulation->get_mpi_communicator());
AssertThrowMPI(ierr);