We use the later name more often than the former.
LinearAlgebra::distributed::Vector<double> copy_vec(solution);
solution.reinit(dof_handler.locally_owned_dofs(),
locally_relevant_dofs,
- triangulation.get_communicator());
+ triangulation.get_mpi_communicator());
solution.copy_locally_owned_data_from(copy_vec);
constraints.distribute(solution);
solution.update_ghost_values();
TrilinosWrappers::SparsityPattern dsp(
dof_handler.locally_owned_dofs(),
- dof_handler.get_triangulation().get_communicator());
+ dof_handler.get_triangulation().get_mpi_communicator());
DoFTools::make_sparsity_pattern(dof_handler, dsp, this->constraints);
{
support_points.reinit(dof_handler.locally_owned_dofs(),
DoFTools::extract_locally_active_dofs(dof_handler),
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
const auto &fe = dof_handler.get_fe();
data_out.build_patches(mapping, degree, DataOut<dim>::curved_inner_cells);
data_out.write_vtu_in_parallel(name_prefix + ".vtu",
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
}
} // namespace HelperFunctions
(cell->vertex(1) - cell->vertex(0)).norm_square());
h_local_min = std::sqrt(h_local_min);
const double h_min =
- Utilities::MPI::min(h_local_min, dof_handler.get_communicator());
+ Utilities::MPI::min(h_local_min, dof_handler.get_mpi_communicator());
const double dt =
cr * HelperFunctions::compute_dt_cfl(h_min, degree, speed_of_sound);
(void)sort_data;
#else
static CollectiveMutex mutex;
- CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator());
+ CollectiveMutex::ScopedLock lock(mutex, tria->get_mpi_communicator());
const unsigned int my_rank =
- Utilities::MPI::this_mpi_process(tria->get_communicator());
+ Utilities::MPI::this_mpi_process(tria->get_mpi_communicator());
// allocate memory for output and buffer
output.resize(point_ptrs.back() * n_components);
(send_ptrs[i + 1] - send_ptrs[i]) * n_components),
send_ranks[i],
internal::Tags::remote_point_evaluation,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
send_buffers_packed,
send_requests);
}
MPI_Status status;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
internal::Tags::remote_point_evaluation,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
(recv_ptrs[j + 1] - recv_ptrs[j]) * n_components);
internal::recv_and_unpack(recv_buffer,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
status,
recv_buffer_packed);
(void)sort_data;
#else
static CollectiveMutex mutex;
- CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator());
+ CollectiveMutex::ScopedLock lock(mutex, tria->get_mpi_communicator());
const unsigned int my_rank =
- Utilities::MPI::this_mpi_process(tria->get_communicator());
+ Utilities::MPI::this_mpi_process(tria->get_mpi_communicator());
// allocate memory for buffer
const auto &point_ptrs = this->get_point_ptrs();
(recv_ptrs[i + 1] - recv_ptrs[i]) * n_components),
recv_ranks[i],
internal::Tags::remote_point_evaluation,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
send_buffers_packed,
send_requests);
}
MPI_Status status;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
internal::Tags::remote_point_evaluation,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
(send_ptrs[j + 1] - send_ptrs[j]) * n_components);
internal::recv_and_unpack(recv_buffer,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
status,
recv_buffer_packed);
* Return MPI communicator used by this triangulation.
*/
virtual MPI_Comm
- get_communicator() const override;
+ get_mpi_communicator() const override;
/**
* Return if multilevel hierarchy is supported and has been constructed.
* Return MPI communicator used by the underlying triangulation.
*/
MPI_Comm
+ get_mpi_communicator() const;
+
+ /**
+ * Return MPI communicator used by the underlying triangulation.
+ *
+ * @deprecated Use get_mpi_communicator() instead.
+ */
+ DEAL_II_DEPRECATED_EARLY_WITH_COMMENT(
+ "Access the MPI communicator with get_mpi_communicator() instead.")
+ MPI_Comm
get_communicator() const;
/**
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
-inline MPI_Comm DoFHandler<dim, spacedim>::get_communicator() const
+inline MPI_Comm DoFHandler<dim, spacedim>::get_mpi_communicator() const
{
Assert(tria != nullptr,
ExcMessage("This DoFHandler object has not been associated "
"with a triangulation."));
- return tria->get_communicator();
+ return tria->get_mpi_communicator();
+}
+
+
+
+template <int dim, int spacedim>
+DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
+inline MPI_Comm DoFHandler<dim, spacedim>::get_communicator() const
+{
+ return get_mpi_communicator();
}
ExcMessage(
"Extrapolate in parallel only works for parallel distributed triangulations!"));
- communicator = tr->get_communicator();
+ communicator = tr->get_mpi_communicator();
compute_all_non_local_data(dof2, u2_relevant);
Assert(parallel_tria != nullptr, ExcNotImplemented());
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
- vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+ vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
}
#endif // DEAL_II_WITH_PETSC
Assert(parallel_tria != nullptr, ExcNotImplemented());
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
- vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+ vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
}
Assert(parallel_tria != nullptr, ExcNotImplemented());
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
- vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+ vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
}
# endif
Assert(parallel_tria != nullptr, ExcNotImplemented());
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
- vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+ vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
}
# endif
#endif // DEAL_II_WITH_TRILINOS
Assert(parallel_tria != nullptr, ExcNotImplemented());
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
- vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+ vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
}
DoFTools::extract_locally_relevant_dofs(dh);
vector.reinit(locally_owned_dofs,
locally_relevant_dofs,
- parallel_tria->get_communicator());
+ parallel_tria->get_mpi_communicator());
}
#endif // DEAL_II_WITH_PETSC
DoFTools::extract_locally_relevant_dofs(dh);
vector.reinit(locally_owned_dofs,
locally_relevant_dofs,
- parallel_tria->get_communicator());
+ parallel_tria->get_mpi_communicator());
}
#endif // DEAL_II_WITH_TRILINOS
DoFTools::extract_locally_relevant_dofs(dh);
vector.reinit(locally_owned_dofs,
locally_relevant_dofs,
- parallel_tria->get_communicator());
+ parallel_tria->get_mpi_communicator());
}
// a mutex:
static Utilities::MPI::CollectiveMutex mutex;
Utilities::MPI::CollectiveMutex::ScopedLock lock(
- mutex, tria->get_communicator());
+ mutex, tria->get_mpi_communicator());
const int mpi_tag =
Utilities::MPI::internal::Tags::exchange_cell_data_request;
MPI_BYTE,
it.first,
mpi_tag,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&requests[idx]);
AssertThrowMPI(ierr);
++idx;
MPI_Status status;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
mpi_tag,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
MPI_BYTE,
status.MPI_SOURCE,
status.MPI_TAG,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
MPI_BYTE,
status.MPI_SOURCE,
mpi_tag_reply,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&reply_requests[idx]);
AssertThrowMPI(ierr);
}
MPI_Status status;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
mpi_tag_reply,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
MPI_BYTE,
status.MPI_SOURCE,
status.MPI_TAG,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
clear();
/**
- * Return MPI communicator used by this triangulation. In the case of
- * a serial Triangulation object, MPI_COMM_SELF is returned.
+ * Return the MPI communicator used by this triangulation. In the case of a
+ * serial Triangulation object, MPI_COMM_SELF is returned.
*/
virtual MPI_Comm
+ get_mpi_communicator() const;
+
+ /**
+ * Return the MPI communicator used by this triangulation. In the case of
+ * a serial Triangulation object, MPI_COMM_SELF is returned.
+ *
+ * @deprecated Use get_mpi_communicator() instead.
+ */
+ DEAL_II_DEPRECATED_EARLY_WITH_COMMENT(
+ "Access the MPI communicator with get_mpi_communicator() instead.")
+ MPI_Comm
get_communicator() const;
/**
if (const dealii::parallel::TriangulationBase<dim> *ptria =
dynamic_cast<const dealii::parallel::TriangulationBase<dim>
*>(&triangulation))
- comm = ptria->get_communicator();
+ comm = ptria->get_mpi_communicator();
MPI_Status status;
unsigned int mysize = inner_face.second.shared_faces.size();
task_info.allow_ghosted_vectors_in_loops =
additional_data.allow_ghosted_vectors_in_loops;
- task_info.communicator = dof_handler[0]->get_communicator();
+ task_info.communicator = dof_handler[0]->get_mpi_communicator();
task_info.communicator_sm = additional_data.communicator_sm;
task_info.my_pid =
Utilities::MPI::this_mpi_process(task_info.communicator);
quad,
iterator_filter,
std::make_shared<const MPI_Comm>(
- parallel_triangulation->get_communicator()),
+ parallel_triangulation->get_mpi_communicator()),
additional_data);
else
internal_reinit(mapping,
const SparsityPatternType &sp,
const DoFHandler<dim, spacedim> &dh)
{
- const MPI_Comm communicator = dh.get_communicator();
+ const MPI_Comm communicator = dh.get_mpi_communicator();
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
const SparsityPatternType &sp,
const DoFHandler<dim, spacedim> &dh)
{
- const MPI_Comm communicator = dh.get_communicator();
+ const MPI_Comm communicator = dh.get_mpi_communicator();
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
const SparsityPatternType &sp,
const DoFHandler<dim, spacedim> &dh)
{
- const MPI_Comm communicator = dh.get_communicator();
+ const MPI_Comm communicator = dh.get_mpi_communicator();
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
const SparsityPatternType &sp,
const DoFHandler<dim, spacedim> &dh)
{
- const MPI_Comm communicator = dh.get_communicator();
+ const MPI_Comm communicator = dh.get_mpi_communicator();
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
dh.locally_owned_mg_dofs(level),
const SparsityPatternType &sp,
const DoFHandler<dim, spacedim> &dh)
{
- const MPI_Comm communicator = dh.get_communicator();
+ const MPI_Comm communicator = dh.get_mpi_communicator();
// Reinit PETSc matrix
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
for (unsigned int level = v.min_level(); level <= v.max_level(); ++level)
{
v[level].reinit(dof_handler.locally_owned_mg_dofs(level),
- tria->get_communicator());
+ tria->get_mpi_communicator());
}
}
#endif
for (unsigned int level = v.min_level(); level <= v.max_level(); ++level)
{
v[level].reinit(dof_handler.locally_owned_mg_dofs(level),
- tria->get_communicator());
+ tria->get_mpi_communicator());
}
}
#endif
internal::MGTransfer::reinit_vector(dof_handler, component_to_block_map, dst);
#ifdef DEBUG_OUTPUT
std::cout << "copy_to_mg src " << src.l2_norm() << std::endl;
- int ierr = MPI_Barrier(dof_handler.get_communicator());
+ int ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
AssertThrowMPI(ierr);
#endif
{
--level;
#ifdef DEBUG_OUTPUT
- ierr = MPI_Barrier(dof_handler.get_communicator());
+ ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
AssertThrowMPI(ierr);
#endif
dst_level.compress(VectorOperation::insert);
#ifdef DEBUG_OUTPUT
- ierr = MPI_Barrier(dof_handler.get_communicator());
+ ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
AssertThrowMPI(ierr);
std::cout << "copy_to_mg dst " << level << ' ' << dst_level.l2_norm()
<< std::endl;
for (unsigned int level = src.min_level(); level <= src.max_level(); ++level)
{
#ifdef DEBUG_OUTPUT
- int ierr = MPI_Barrier(dof_handler.get_communicator());
+ int ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
AssertThrowMPI(ierr);
std::cout << "copy_from_mg src " << level << ' ' << src[level].l2_norm()
<< std::endl;
- ierr = MPI_Barrier(dof_handler.get_communicator());
+ ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
AssertThrowMPI(ierr);
#endif
#ifdef DEBUG_OUTPUT
{
dst.compress(VectorOperation::insert);
- ierr = MPI_Barrier(dof_handler.get_communicator());
+ ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
AssertThrowMPI(ierr);
std::cout << "copy_from_mg level=" << level << ' ' << dst.l2_norm()
<< std::endl;
}
dst.compress(VectorOperation::insert);
#ifdef DEBUG_OUTPUT
- const int ierr = MPI_Barrier(dof_handler.get_communicator());
+ const int ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
AssertThrowMPI(ierr);
std::cout << "copy_from_mg " << dst.l2_norm() << std::endl;
#endif
dst[level].reinit(ghosted_level_vector[level], false);
else
dst[level].reinit(dof_handler.locally_owned_mg_dofs(level),
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
}
else if ((perform_plain_copy == false &&
perform_renumbered_plain_copy == false) ||
, dof_handler_coarse(dof_handler_coarse)
, mg_level_fine(mg_level_fine)
, communicator(
- dof_handler_fine.get_communicator() /*TODO: fix for different comms*/)
+ dof_handler_fine
+ .get_mpi_communicator() /*TODO: fix for different comms*/)
, cell_id_translator(
dof_handler_fine.get_triangulation().n_global_coarse_cells(),
dof_handler_fine.get_triangulation().n_global_levels())
});
return Utilities::MPI::min(static_cast<unsigned int>(flag),
- dof_handler_fine.get_communicator()) == 1;
+ dof_handler_fine.get_mpi_communicator()) ==
+ 1;
}
else
{
dof_handler_coarse.locally_owned_dofs() :
dof_handler_coarse.locally_owned_mg_dofs(mg_level_coarse),
locally_relevant_dofs,
- dof_handler_coarse.get_communicator());
+ dof_handler_coarse.get_mpi_communicator());
}
cell->active_fe_index());
});
- const auto comm = dof_handler_fine.get_communicator();
+ const auto comm = dof_handler_fine.get_mpi_communicator();
- Assert(comm == dof_handler_coarse.get_communicator(),
+ Assert(comm == dof_handler_coarse.get_mpi_communicator(),
ExcNotImplemented());
ArrayView<unsigned int> temp_min(min_active_fe_indices);
{
transfer.partitioner_coarse = transfer.constraint_info_coarse.finalize(
- dof_handler_coarse.get_communicator());
+ dof_handler_coarse.get_mpi_communicator());
transfer.vec_coarse.reinit(transfer.partitioner_coarse);
transfer.partitioner_fine = transfer.constraint_info_fine.finalize(
- dof_handler_fine.get_communicator());
+ dof_handler_fine.get_mpi_communicator());
transfer.vec_fine.reinit(transfer.partitioner_fine);
}
{
transfer.partitioner_coarse = transfer.constraint_info_coarse.finalize(
- dof_handler_coarse.get_communicator());
+ dof_handler_coarse.get_mpi_communicator());
transfer.vec_coarse.reinit(transfer.partitioner_coarse);
transfer.partitioner_fine = transfer.constraint_info_fine.finalize(
- dof_handler_fine.get_communicator());
+ dof_handler_fine.get_mpi_communicator());
transfer.vec_fine.reinit(transfer.partitioner_fine);
}
&fine_triangulation_in))
return std::make_shared<
parallel::distributed::Triangulation<dim, spacedim>>(
- fine_triangulation->get_communicator());
+ fine_triangulation->get_mpi_communicator());
else
#endif
#ifdef DEAL_II_WITH_MPI
const parallel::shared::Triangulation<dim, spacedim> *>(
&fine_triangulation_in))
return std::make_shared<parallel::shared::Triangulation<dim, spacedim>>(
- fine_triangulation->get_communicator(),
+ fine_triangulation->get_mpi_communicator(),
Triangulation<dim, spacedim>::none,
fine_triangulation->with_artificial_cells());
else
Assert(fine_triangulation, ExcNotImplemented());
- const auto comm = fine_triangulation->get_communicator();
+ const auto comm = fine_triangulation->get_mpi_communicator();
if (keep_fine_triangulation == true &&
repartition_fine_triangulation == false)
IteratorFilters::LocallyOwnedCell())
is_locally_owned_coarse.add_index(cell_id_translator.translate(cell));
- const MPI_Comm communicator = dof_handler_fine.get_communicator();
+ const MPI_Comm communicator = dof_handler_fine.get_mpi_communicator();
std::vector<unsigned int> owning_ranks(
is_locally_owned_coarse.n_elements());
this->perform_plain_copy =
Utilities::MPI::max(this->perform_plain_copy ? 1 : 0,
- dof_handler_out.get_communicator()) != 0;
+ dof_handler_out.get_mpi_communicator()) != 0;
if (this->perform_plain_copy)
{
const Utilities::MPI::Partitioner partitioner_support_points(
dof_handler_support_points.locally_owned_dofs(),
- dof_handler_support_points.get_communicator());
+ dof_handler_support_points.get_mpi_communicator());
const Utilities::MPI::Partitioner partitioner_dof(
dof_handler.locally_owned_dofs(),
DoFTools::extract_locally_relevant_dofs(dof_handler),
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
std::vector<bool> dof_processed(partitioner_dof.locally_owned_size() +
partitioner_dof.n_ghost_indices(),
this->partitioner_fine.reset(
new Utilities::MPI::Partitioner(dof_handler_fine.locally_owned_dofs(),
locally_relevant_dofs,
- dof_handler_fine.get_communicator()));
+ dof_handler_fine.get_mpi_communicator()));
this->vec_fine.reinit(this->partitioner_fine);
}
const parallel::TriangulationBase<dim> *ptria =
dynamic_cast<const parallel::TriangulationBase<dim> *>(&tria);
Assert(ptria != nullptr, ExcInternalError());
- return ptria->get_communicator();
+ return ptria->get_mpi_communicator();
}
dst.block(b).reinit(locally_owned_dofs_b[b],
locally_relevant_dofs_b[b],
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
copy_locally_owned_data_from(src.block(b), dst.block(b));
}
dst.block(0).reinit(locally_owned_dofs,
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
copy_locally_owned_data_from(src, dst.block(0));
dst.collect_sizes();
}
#endif
- const MPI_Comm comm = tria.get_communicator();
+ const MPI_Comm comm = tria.get_mpi_communicator();
switch (norm)
{
u.reinit(locally_owned_dofs,
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
}
3,
MPI_DOUBLE,
MPI_SUM,
- p_triangulation->get_communicator());
+ p_triangulation->get_mpi_communicator());
AssertThrowMPI(ierr);
internal::set_possibly_complex_number(global_values[0],
template <typename VectorType>
const MPI_Comm &
- get_communicator(N_Vector v);
+ get_mpi_communicator(N_Vector v);
# if DEAL_II_SUNDIALS_VERSION_GTE(7, 0, 0)
/**
*/
template <typename VectorType>
inline SUNComm
- get_communicator_by_value(N_Vector v);
+ get_mpi_communicator_by_value(N_Vector v);
# else
/**
* Sundials likes a void* but we want to use the above functions
*/
template <typename VectorType>
inline void *
- get_communicator_as_void_ptr(N_Vector v);
+ get_mpi_communicator_as_void_ptr(N_Vector v);
# endif
} // namespace NVectorOperations
} // namespace internal
template <typename VectorType>
const MPI_Comm &
- get_communicator(N_Vector v)
+ get_mpi_communicator(N_Vector v)
{
Assert(v != nullptr, ExcInternalError());
Assert(v->content != nullptr, ExcInternalError());
# if DEAL_II_SUNDIALS_VERSION_GTE(7, 0, 0)
template <typename VectorType>
SUNComm
- get_communicator_by_value(N_Vector v)
+ get_mpi_communicator_by_value(N_Vector v)
{
# ifndef DEAL_II_WITH_MPI
(void)v;
//
// Further, we need to cast away const here, as SUNDIALS demands the
// communicator by value.
- return const_cast<SUNComm>(get_communicator<VectorType>(v));
+ return const_cast<SUNComm>(get_mpi_communicator<VectorType>(v));
else
return SUN_COMM_NULL;
# endif
# else
template <typename VectorType>
void *
- get_communicator_as_void_ptr(N_Vector v)
+ get_mpi_communicator_as_void_ptr(N_Vector v)
{
# ifndef DEAL_II_WITH_MPI
(void)v;
if (is_serial_vector<VectorType>::value == false)
// We need to cast away const here, as SUNDIALS demands a pure
// `void*`.
- return &(const_cast<MPI_Comm &>(get_communicator<VectorType>(v)));
+ return &(const_cast<MPI_Comm &>(get_mpi_communicator<VectorType>(v)));
else
return nullptr;
# endif
{
ArrayView<realtype> products(d, nv);
Utilities::MPI::sum(products,
- get_communicator<VectorType>(x),
+ get_mpi_communicator<VectorType>(x),
products);
return 0;
}
local_elements.end(),
indexed_less_than);
return Utilities::MPI::min((*vector)[local_min],
- get_communicator<VectorType>(x));
+ get_mpi_communicator<VectorType>(x));
}
}
return Utilities::MPI::min(proc_local_min,
- get_communicator<VectorType>(x));
+ get_mpi_communicator<VectorType>(x));
}
// v->ops->nvspace = undef;
# if DEAL_II_SUNDIALS_VERSION_GTE(7, 0, 0)
v->ops->nvgetcommunicator =
- &NVectorOperations::get_communicator_by_value<VectorType>;
+ &NVectorOperations::get_mpi_communicator_by_value<VectorType>;
# else
v->ops->nvgetcommunicator =
- &NVectorOperations::get_communicator_as_void_ptr<VectorType>;
+ &NVectorOperations::get_mpi_communicator_as_void_ptr<VectorType>;
# endif
v->ops->nvgetlength = &NVectorOperations::get_global_length<VectorType>;
const auto n_owning_processes_global =
Utilities::MPI::all_reduce<std::tuple<unsigned int, unsigned int>>(
n_owning_processes_local,
- tria.get_communicator(),
+ tria.get_mpi_communicator(),
[&](const auto &a,
const auto &b) -> std::tuple<unsigned int, unsigned int> {
if (a == n_owning_processes_default)
Vector<Number> locally_owned_indicators(n_locally_owned_active_cells(tria));
get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
- MPI_Comm mpi_communicator = tria.get_communicator();
+ MPI_Comm mpi_communicator = tria.get_mpi_communicator();
// figure out the global max and min of the indicators. we don't need it
// here, but it's a collective communication call
n_locally_owned_active_cells(tria));
get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
- MPI_Comm mpi_communicator = tria.get_communicator();
+ MPI_Comm mpi_communicator = tria.get_mpi_communicator();
// figure out the global max and min of the indicators. we don't need it
// here, but it's a collective communication call
return {};
#else
- const auto comm = tria->get_communicator();
+ const auto comm = tria->get_mpi_communicator();
const unsigned int process_has_active_locally_owned_cells =
tria->n_locally_owned_active_cells() > 0;
FirstChildPolicy<dim, spacedim>::partition(
const Triangulation<dim, spacedim> &tria_coarse_in) const
{
- const auto communicator = tria_coarse_in.get_communicator();
+ const auto communicator = tria_coarse_in.get_mpi_communicator();
const internal::CellIDTranslator<dim> cell_id_translator(n_coarse_cells,
n_global_levels);
tria_in.end()),
[](const auto &cell) { return cell.is_locally_owned(); });
- const auto comm = tria_in.get_communicator();
+ const auto comm = tria_in.get_mpi_communicator();
if (Utilities::MPI::min(n_locally_owned_active_cells, comm) >= n_min_cells)
return {}; // all processes have enough cells
std::vector<unsigned int> weights(partitioner->locally_owned_size());
- const auto mpi_communicator = tria_in.get_communicator();
+ const auto mpi_communicator = tria_in.get_mpi_communicator();
const auto n_subdomains = Utilities::MPI::n_mpi_processes(mpi_communicator);
// determine weight of each cell
// weight
const auto [process_local_weight_offset, total_weight] =
Utilities::MPI::partial_and_total_sum(process_local_weight,
- tria->get_communicator());
+ tria->get_mpi_communicator());
// set up partition
LinearAlgebra::distributed::Vector<double> partition(partitioner);
// Check that all meshes are the same (or at least have the same
// total number of active cells):
const unsigned int max_active_cells =
- Utilities::MPI::max(this->n_active_cells(), this->get_communicator());
+ Utilities::MPI::max(this->n_active_cells(),
+ this->get_mpi_communicator());
Assert(
max_active_cells == this->n_active_cells(),
ExcMessage(
[](const auto &i) { return (i.is_locally_owned()); });
const unsigned int total_cells =
- Utilities::MPI::sum(n_my_cells, this->get_communicator());
+ Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
Assert(total_cells == this->n_active_cells(),
ExcMessage("Not all cells are assigned to a processor."));
}
const unsigned int total_cells =
- Utilities::MPI::sum(n_my_cells, this->get_communicator());
+ Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
Assert(total_cells == this->n_cells(),
ExcMessage("Not all cells are assigned to a processor."));
}
}
Utilities::MPI::max(refinement_configurations,
- this->get_communicator(),
+ this->get_mpi_communicator(),
refinement_configurations);
for (const auto &cell : this->active_cell_iterators())
this->local_cell_relations,
this->cell_attached_data.pack_callbacks_fixed,
this->cell_attached_data.pack_callbacks_variable,
- this->get_communicator());
+ this->get_mpi_communicator());
}
// finally copy back from local part of tree to deal.II
this->local_cell_relations,
this->cell_attached_data.pack_callbacks_fixed,
this->cell_attached_data.pack_callbacks_variable,
- this->get_communicator());
+ this->get_mpi_communicator());
}
try
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
- MPI_Comm TriangulationBase<dim, spacedim>::get_communicator() const
+ MPI_Comm TriangulationBase<dim, spacedim>::get_mpi_communicator() const
{
return mpi_communicator;
}
cell->active_fe_index();
Utilities::MPI::sum(active_fe_indices,
- tr->get_communicator(),
+ tr->get_mpi_communicator(),
active_fe_indices);
// now go back and fill the active FE index on all other
.hp_cell_future_fe_indices[cell->level()][cell->index()];
Utilities::MPI::sum(future_fe_indices,
- tr->get_communicator(),
+ tr->get_mpi_communicator(),
future_fe_indices);
for (const auto &cell : dof_handler.active_cell_iterators())
Assert(tr != nullptr, ExcInternalError());
const unsigned int n_procs =
- Utilities::MPI::n_mpi_processes(tr->get_communicator());
+ Utilities::MPI::n_mpi_processes(tr->get_mpi_communicator());
// If an underlying shared::Tria allows artificial cells, we need to
// restore the true cell owners temporarily.
"is set in the constructor."));
const unsigned int n_procs =
- Utilities::MPI::n_mpi_processes(tr->get_communicator());
+ Utilities::MPI::n_mpi_processes(tr->get_mpi_communicator());
const unsigned int n_levels = tr->n_global_levels();
std::vector<NumberCache> number_caches;
Utilities::MPI::internal::all_reduce<bool>(
MPI_LAND,
ArrayView<const bool>(&uses_sequential_numbering, 1),
- tr->get_communicator(),
+ tr->get_mpi_communicator(),
ArrayView<bool>(&all_use_sequential_numbering, 1));
if (all_use_sequential_numbering)
{
this->dof_handler->locally_owned_dofs().n_elements(),
ExcInternalError());
const unsigned int n_cpu =
- Utilities::MPI::n_mpi_processes(tr->get_communicator());
+ Utilities::MPI::n_mpi_processes(tr->get_mpi_communicator());
std::vector<types::global_dof_index> gathered_new_numbers(
this->dof_handler->n_dofs(), 0);
- Assert(Utilities::MPI::this_mpi_process(tr->get_communicator()) ==
+ Assert(Utilities::MPI::this_mpi_process(
+ tr->get_mpi_communicator()) ==
this->dof_handler->get_triangulation()
.locally_owned_subdomain(),
ExcInternalError());
rcounts.data(),
1,
MPI_INT,
- tr->get_communicator());
+ tr->get_mpi_communicator());
AssertThrowMPI(ierr);
// compute the displacements (relative to recvbuf)
Assert(new_numbers_copy.size() ==
static_cast<unsigned int>(
rcounts[Utilities::MPI::this_mpi_process(
- tr->get_communicator())]),
+ tr->get_mpi_communicator())]),
ExcInternalError());
ierr = MPI_Allgatherv(new_numbers_copy.data(),
new_numbers_copy.size(),
rcounts.data(),
displacements.data(),
DEAL_II_DOF_INDEX_MPI_TYPE,
- tr->get_communicator());
+ tr->get_mpi_communicator());
AssertThrowMPI(ierr);
}
std::vector<unsigned int> flag_2(this->dof_handler->n_dofs(), 0);
std::vector<IndexSet> locally_owned_dofs_per_processor =
Utilities::MPI::all_gather(
- tr->get_communicator(),
+ tr->get_mpi_communicator(),
this->dof_handler->locally_owned_dofs());
for (unsigned int i = 0; i < n_cpu; ++i)
{
// range of indices
const auto [my_shift, n_global_dofs] =
Utilities::MPI::partial_and_total_sum(
- n_locally_owned_dofs, triangulation->get_communicator());
+ n_locally_owned_dofs, triangulation->get_mpi_communicator());
// make dof indices globally consecutive
const auto [my_shift, n_global_dofs] =
Utilities::MPI::partial_and_total_sum(
level_number_cache.n_locally_owned_dofs,
- triangulation->get_communicator());
+ triangulation->get_mpi_communicator());
level_number_cache.n_global_dofs = n_global_dofs;
// assign appropriate indices
// If we don't have a renumbering (i.e., when there is 1 component) then
// return
if (Utilities::MPI::max(renumbering.size(),
- dof_handler.get_communicator()) == 0)
+ dof_handler.get_mpi_communicator()) == 0)
return;
// verify that the last numbered
// If we don't have a renumbering (i.e., when there is 1 component) then
// return
if (Utilities::MPI::max(renumbering.size(),
- dof_handler.get_communicator()) == 0)
+ dof_handler.get_mpi_communicator()) == 0)
return;
// verify that the last numbered
n_buckets,
DEAL_II_DOF_INDEX_MPI_TYPE,
MPI_SUM,
- tria->get_communicator());
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
std::vector<types::global_dof_index> global_dof_count(n_buckets);
Utilities::MPI::sum(local_dof_count,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
global_dof_count);
// calculate shifts
ExcInternalError());
if (Utilities::MPI::max(renumbering.size(),
- dof_handler.get_communicator()) > 0)
+ dof_handler.get_mpi_communicator()) > 0)
dof_handler.renumber_dofs(level, renumbering);
}
n_buckets,
DEAL_II_DOF_INDEX_MPI_TYPE,
MPI_SUM,
- tria->get_communicator());
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
std::vector<types::global_dof_index> global_dof_count(n_buckets);
Utilities::MPI::sum(local_dof_count,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
global_dof_count);
// calculate shifts
1,
DEAL_II_DOF_INDEX_MPI_TYPE,
MPI_SUM,
- tria->get_communicator());
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
#endif
}
// If there is only one component then there is nothing to do, so check
// first:
if (Utilities::MPI::max(renumbering.size(),
- dof_handler.get_communicator()) > 0)
+ dof_handler.get_mpi_communicator()) > 0)
dof_handler.renumber_dofs(renumbering);
}
Utilities::MPI::n_mpi_processes(
dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&dof_handler.get_triangulation())
- ->get_communicator()));
+ ->get_mpi_communicator()));
Assert(n_subdomains > *std::max_element(subdomain_association.begin(),
subdomain_association.end()),
ExcInternalError());
n_target_components,
DEAL_II_DOF_INDEX_MPI_TYPE,
MPI_SUM,
- tria->get_communicator());
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
}
#endif
n_target_blocks,
DEAL_II_DOF_INDEX_MPI_TYPE,
MPI_SUM,
- tria->get_communicator());
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
}
#endif
TriangulationBase<dim, spacedim> &>(
coarse_to_fine_grid_map.get_destination_grid()
.get_triangulation());
- communicator = tria.get_communicator();
+ communicator = tria.get_mpi_communicator();
is_called_in_parallel = true;
}
catch (std::bad_cast &)
DoFTools::extract_locally_relevant_dofs(dof_handler);
vector_ghosted.reinit(dof_handler.locally_owned_dofs(),
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
copy_locally_owned_data_from(vector, vector_ghosted);
vector_ghosted.update_ghost_values();
DoFTools::extract_locally_relevant_level_dofs(dof_handler, l);
vectors_ghosted[l].reinit(dof_handler.locally_owned_mg_dofs(l),
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
copy_locally_owned_data_from(vectors[l], vectors_ghosted[l]);
vectors_ghosted[l].update_ghost_values();
}
else
pos += 1;
const unsigned int n_procs =
- Utilities::MPI::n_mpi_processes(tr->get_communicator());
+ Utilities::MPI::n_mpi_processes(tr->get_mpi_communicator());
for (unsigned int i = 0; i < n_procs; ++i)
filenames.push_back(filename_without_extension.substr(pos) +
".proc" + Utilities::int_to_string(i, 4) +
}
// Get the size of the largest CellID string
- max_cellid_size =
- Utilities::MPI::max(max_cellid_size, triangulation.get_communicator());
+ max_cellid_size = Utilities::MPI::max(max_cellid_size,
+ triangulation.get_mpi_communicator());
// Make indices global by getting the number of vertices owned by each
// processors and shifting the indices accordingly
1,
DEAL_II_VERTEX_INDEX_MPI_TYPE,
MPI_SUM,
- triangulation.get_communicator());
+ triangulation.get_mpi_communicator());
AssertThrowMPI(ierr);
for (auto &global_index_it : local_to_global_vertex_index)
DEAL_II_VERTEX_INDEX_MPI_TYPE,
destination,
mpi_tag,
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
&first_requests[i]);
AssertThrowMPI(ierr);
}
DEAL_II_VERTEX_INDEX_MPI_TYPE,
source,
mpi_tag,
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
}
MPI_CHAR,
destination,
mpi_tag2,
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
&second_requests[i]);
AssertThrowMPI(ierr);
}
MPI_CHAR,
source,
mpi_tag2,
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
}
dynamic_cast<parallel::shared::Triangulation<dim, spacedim> *>(
&triangulation))
Utilities::MPI::sum(cell_weights,
- shared_tria->get_communicator(),
+ shared_tria->get_mpi_communicator(),
cell_weights);
// verify that the global sum of weights is larger than 0
dynamic_cast<parallel::shared::Triangulation<dim, spacedim> *>(
&triangulation))
Utilities::MPI::sum(cell_weights,
- shared_tria->get_communicator(),
+ shared_tria->get_mpi_communicator(),
cell_weights);
// verify that the global sum of weights is larger than 0
cell_id.get_coarse_cell_id(),
&p4est_cell,
Utilities::MPI::this_mpi_process(
- triangulation.get_communicator()));
+ triangulation.get_mpi_communicator()));
Assert(owner >= 0, ExcMessage("p4est should know the owner."));
&cache.get_locally_owned_cell_bounding_boxes_rtree());
const unsigned int my_rank = Utilities::MPI::this_mpi_process(
- cache.get_triangulation().get_communicator());
+ cache.get_triangulation().get_mpi_communicator());
cell_hint = first_cell.first;
if (cell_hint.state() == IteratorState::valid)
auto &send_components = result.send_components;
auto &recv_components = result.recv_components;
- const auto comm = cache.get_triangulation().get_communicator();
+ const auto comm = cache.get_triangulation().get_mpi_communicator();
const auto potential_owners = internal::guess_owners_of_entities(
comm, global_bboxes, points, tolerance);
// indices assigned at recv side needed to fill send_components
indices_of_rank = communicate_indices(result.recv_components,
- tria.get_communicator());
+ tria.get_mpi_communicator());
}
for (const auto &send_component : send_components)
structdim,
spacedim>::IntersectionType;
- const auto comm = cache.get_triangulation().get_communicator();
+ const auto comm = cache.get_triangulation().get_mpi_communicator();
DistributedComputeIntersectionLocationsInternal<structdim, spacedim>
result;
&(*tria)))
{
covering_rtree[level] = GridTools::build_global_description_tree(
- boxes, tria_mpi->get_communicator());
+ boxes, tria_mpi->get_mpi_communicator());
}
else
{
}
const double global_volume =
- Utilities::MPI::sum(local_volume, triangulation.get_communicator());
+ Utilities::MPI::sum(local_volume, triangulation.get_mpi_communicator());
return global_volume;
}
min_diameter = std::min(min_diameter, cell->diameter(mapping));
const double global_min_diameter =
- Utilities::MPI::min(min_diameter, triangulation.get_communicator());
+ Utilities::MPI::min(min_diameter, triangulation.get_mpi_communicator());
return global_min_diameter;
}
max_diameter = std::max(max_diameter, cell->diameter(mapping));
const double global_max_diameter =
- Utilities::MPI::max(max_diameter, triangulation.get_communicator());
+ Utilities::MPI::max(max_diameter, triangulation.get_mpi_communicator());
return global_max_diameter;
}
} /* namespace GridTools */
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
-MPI_Comm Triangulation<dim, spacedim>::get_communicator() const
+MPI_Comm Triangulation<dim, spacedim>::get_mpi_communicator() const
{
return MPI_COMM_SELF;
}
+template <int dim, int spacedim>
+DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
+MPI_Comm Triangulation<dim, spacedim>::get_communicator() const
+{
+ return get_mpi_communicator();
+}
+
+
+
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
std::weak_ptr<const Utilities::MPI::Partitioner> Triangulation<dim, spacedim>::
this->local_cell_relations,
this->cell_attached_data.pack_callbacks_fixed,
this->cell_attached_data.pack_callbacks_variable,
- this->get_communicator());
+ this->get_mpi_communicator());
// dummy copy of data
this->data_serializer.dest_data_fixed =
tria->local_cell_relations,
tria->cell_attached_data.pack_callbacks_fixed,
tria->cell_attached_data.pack_callbacks_variable,
- this->get_communicator());
+ this->get_mpi_communicator());
// then store buffers in file
tria->data_serializer.save(global_first_cell,
global_num_cells,
file_basename,
- this->get_communicator());
+ this->get_mpi_communicator());
// and release the memory afterwards
tria->data_serializer.clear();
file_basename,
n_attached_deserialize_fixed,
n_attached_deserialize_variable,
- this->get_communicator());
+ this->get_mpi_communicator());
this->data_serializer.unpack_cell_status(this->local_cell_relations);
dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&tria))
{
- Assert(comm == ptria->get_communicator(),
+ Assert(comm == ptria->get_mpi_communicator(),
ExcMessage("MPI communicators do not match."));
Assert(my_rank_in == numbers::invalid_unsigned_int ||
my_rank_in == dealii::Utilities::MPI::this_mpi_process(comm),
const TriangulationDescription::Settings settings_in)
{
#ifdef DEAL_II_WITH_MPI
- if (tria.get_communicator() == MPI_COMM_NULL)
+ if (tria.get_mpi_communicator() == MPI_COMM_NULL)
AssertDimension(partition.locally_owned_size(), 0);
#endif
if (partition.size() == 0)
{
AssertDimension(partitions_mg.size(), 0);
- return create_description_from_triangulation(tria,
- tria.get_communicator(),
- settings_in);
+ return create_description_from_triangulation(
+ tria, tria.get_mpi_communicator(), settings_in);
}
// Update partitioner ghost elements because we will later want
mg_cell_to_future_owner,
coinciding_vertex_groups,
vertex_to_coinciding_vertex_group,
- tria.get_communicator(),
+ tria.get_mpi_communicator(),
rank,
settings));
{
max_criterion_refine =
Utilities::MPI::max(max_criterion_refine,
- parallel_tria->get_communicator());
+ parallel_tria->get_mpi_communicator());
min_criterion_refine =
Utilities::MPI::min(min_criterion_refine,
- parallel_tria->get_communicator());
+ parallel_tria->get_mpi_communicator());
max_criterion_coarsen =
Utilities::MPI::max(max_criterion_coarsen,
- parallel_tria->get_communicator());
+ parallel_tria->get_mpi_communicator());
min_criterion_coarsen =
Utilities::MPI::min(min_criterion_coarsen,
- parallel_tria->get_communicator());
+ parallel_tria->get_mpi_communicator());
}
// Absent any better strategies, we will set the threshold by linear
// parallel implementation with distributed memory
//
- MPI_Comm mpi_communicator = parallel_tria->get_communicator();
+ MPI_Comm mpi_communicator = parallel_tria->get_mpi_communicator();
// 2.) Communicate the number of cells scheduled for p-adaptation
// globally.
levels_changed_in_cycle =
Utilities::MPI::logical_or(levels_changed_in_cycle,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
levels_changed |= levels_changed_in_cycle;
}
while (levels_changed_in_cycle);
if (const parallel::TriangulationBase<dim, spacedim> *ptria =
dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&mg_dof.get_triangulation()))
- perform_plain_copy = (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
- ptria->get_communicator()) == 1);
+ perform_plain_copy =
+ (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
+ ptria->get_mpi_communicator()) == 1);
else
perform_plain_copy = my_perform_plain_copy;
}
MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>::
fill_and_communicate_copy_indices(const DoFHandler<dim, spacedim> &mg_dof)
{
- const MPI_Comm mpi_communicator = mg_dof.get_communicator();
+ const MPI_Comm mpi_communicator = mg_dof.get_mpi_communicator();
fill_internal(mg_dof,
mg_constrained_dofs,
if (const parallel::TriangulationBase<dim, spacedim> *tr =
dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
&tria))
- global_min = Utilities::MPI::min(min_level, tr->get_communicator());
+ global_min = Utilities::MPI::min(min_level, tr->get_mpi_communicator());
AssertIndexRange(global_min, tria.n_global_levels());
workload_imbalance(const Triangulation<dim, spacedim> &tria)
{
return internal::workload_imbalance(local_workload(tria),
- tria.get_communicator());
+ tria.get_mpi_communicator());
}
&trias)
{
return internal::workload_imbalance(local_workload(trias),
- trias.back()->get_communicator());
+ trias.back()->get_mpi_communicator());
}
std::vector<std::pair<types::global_dof_index, types::global_dof_index>>
cells(n_global_levels);
- const MPI_Comm communicator = tria.get_communicator();
+ const MPI_Comm communicator = tria.get_mpi_communicator();
const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
std::vector<std::pair<types::global_dof_index, types::global_dof_index>>
cells(n_global_levels);
- const MPI_Comm communicator = trias.back()->get_communicator();
+ const MPI_Comm communicator = trias.back()->get_mpi_communicator();
const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
vertical_communication_efficiency(const Triangulation<dim, spacedim> &tria)
{
return internal::vertical_communication_efficiency(
- local_vertical_communication_cost(tria), tria.get_communicator());
+ local_vertical_communication_cost(tria), tria.get_mpi_communicator());
}
{
return internal::vertical_communication_efficiency(
local_vertical_communication_cost(trias),
- trias.back()->get_communicator());
+ trias.back()->get_mpi_communicator());
}
} // namespace MGTools
#ifdef DEAL_II_WITH_MPI
if (tria && Utilities::MPI::sum(send_data_temp.size(),
- tria->get_communicator()) > 0)
+ tria->get_mpi_communicator()) > 0)
{
const std::set<types::subdomain_id> &neighbors =
tria->level_ghost_owners();
AssertThrow(level_dof_indices.size() == is_ghost.n_elements(),
ExcMessage("Size does not match!"));
- const auto index_owner =
- Utilities::MPI::compute_index_owner(owned_level_dofs,
- is_ghost,
- tria->get_communicator());
+ const auto index_owner = Utilities::MPI::compute_index_owner(
+ owned_level_dofs, is_ghost, tria->get_mpi_communicator());
AssertThrow(level_dof_indices.size() == index_owner.size(),
ExcMessage("Size does not match!"));
// Protect the send/recv logic with a mutex:
static Utilities::MPI::CollectiveMutex mutex;
Utilities::MPI::CollectiveMutex::ScopedLock lock(
- mutex, tria->get_communicator());
+ mutex, tria->get_mpi_communicator());
const int mpi_tag =
Utilities::MPI::internal::Tags::mg_transfer_fill_copy_indices;
MPI_BYTE,
dest,
mpi_tag,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&*requests.rbegin());
AssertThrowMPI(ierr);
}
MPI_Status status;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
mpi_tag,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
int len;
MPI_BYTE,
status.MPI_SOURCE,
status.MPI_TAG,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
continue;
MPI_BYTE,
status.MPI_SOURCE,
status.MPI_TAG,
- tria->get_communicator(),
+ tria->get_mpi_communicator(),
&status);
AssertThrowMPI(ierr);
// Make sure in debug mode, that everybody sent/received all packages
// on this level. If a deadlock occurs here, the list of expected
// senders is not computed correctly.
- const int ierr = MPI_Barrier(tria->get_communicator());
+ const int ierr = MPI_Barrier(tria->get_mpi_communicator());
AssertThrowMPI(ierr);
# endif
}
external_partitioners.empty() ?
nullptr :
external_partitioners[level],
- tria.get_communicator(),
+ tria.get_mpi_communicator(),
target_partitioners[level],
copy_indices_global_mine[level]);
external_partitioners.empty() ?
nullptr :
external_partitioners[0],
- tria.get_communicator(),
+ tria.get_mpi_communicator(),
target_partitioners[0],
copy_indices_global_mine[0]);
::dealii::SparsityTools::distribute_sparsity_pattern(
dsp,
dof_handler.locally_owned_mg_dofs(level + 1),
- dof_handler.get_communicator(),
+ dof_handler.get_mpi_communicator(),
dsp.row_index_set());
}
#endif
1,
DEAL_II_PARTICLE_INDEX_MPI_TYPE,
MPI_SUM,
- tria->get_communicator());
+ tria->get_mpi_communicator());
AssertThrowMPI(ierr);
}
#endif
&triangulation))
{
const unsigned int my_rank =
- Utilities::MPI::this_mpi_process(tria->get_communicator());
+ Utilities::MPI::this_mpi_process(tria->get_mpi_communicator());
combined_seed += my_rank;
}
std::mt19937 random_number_generator(combined_seed);
&triangulation))
{
std::tie(local_start_weight, global_weight_integral) =
- Utilities::MPI::partial_and_total_sum(local_weight_integral,
- tria->get_communicator());
+ Utilities::MPI::partial_and_total_sum(
+ local_weight_integral, tria->get_mpi_communicator());
}
else
{
global_number_of_particles =
dealii::Utilities::MPI::sum(number_of_locally_owned_particles,
- triangulation->get_communicator());
+ triangulation->get_mpi_communicator());
if (global_number_of_particles == 0)
{
}
else
{
- Utilities::MPI::max(result, triangulation->get_communicator(), result);
+ Utilities::MPI::max(result,
+ triangulation->get_mpi_communicator(),
+ result);
next_free_particle_index = result[1] + 1;
global_max_particles_per_cell = result[0];
&*triangulation))
{
types::particle_index particles_to_add_locally = positions.size();
- const int ierr = MPI_Scan(&particles_to_add_locally,
- &local_start_index,
- 1,
- DEAL_II_PARTICLE_INDEX_MPI_TYPE,
- MPI_SUM,
- parallel_triangulation->get_communicator());
+ const int ierr =
+ MPI_Scan(&particles_to_add_locally,
+ &local_start_index,
+ 1,
+ DEAL_II_PARTICLE_INDEX_MPI_TYPE,
+ MPI_SUM,
+ parallel_triangulation->get_mpi_communicator());
AssertThrowMPI(ierr);
local_start_index -= particles_to_add_locally;
}
if (!ids.empty())
AssertDimension(ids.size(), positions.size());
- const auto comm = triangulation->get_communicator();
+ const auto comm = triangulation->get_mpi_communicator();
const auto n_mpi_processes = Utilities::MPI::n_mpi_processes(comm);
&*triangulation))
{
if (dealii::Utilities::MPI::n_mpi_processes(
- parallel_triangulation->get_communicator()) > 1)
+ parallel_triangulation->get_mpi_communicator()) > 1)
send_recv_particles(moved_particles, moved_cells);
}
#endif
if (parallel_triangulation != nullptr)
{
if (dealii::Utilities::MPI::n_mpi_processes(
- parallel_triangulation->get_communicator()) == 1)
+ parallel_triangulation->get_mpi_communicator()) == 1)
return;
}
else
&*triangulation);
if (parallel_triangulation == nullptr ||
dealii::Utilities::MPI::n_mpi_processes(
- parallel_triangulation->get_communicator()) == 1)
+ parallel_triangulation->get_mpi_communicator()) == 1)
{
return;
}
std::vector<MPI_Request> n_requests(2 * n_neighbors);
for (unsigned int i = 0; i < n_neighbors; ++i)
{
- const int ierr = MPI_Irecv(&(n_recv_data[i]),
- 1,
- MPI_UNSIGNED,
- neighbors[i],
- mpi_tag,
- parallel_triangulation->get_communicator(),
- &(n_requests[2 * i]));
+ const int ierr =
+ MPI_Irecv(&(n_recv_data[i]),
+ 1,
+ MPI_UNSIGNED,
+ neighbors[i],
+ mpi_tag,
+ parallel_triangulation->get_mpi_communicator(),
+ &(n_requests[2 * i]));
AssertThrowMPI(ierr);
}
for (unsigned int i = 0; i < n_neighbors; ++i)
{
- const int ierr = MPI_Isend(&(n_send_data[i]),
- 1,
- MPI_UNSIGNED,
- neighbors[i],
- mpi_tag,
- parallel_triangulation->get_communicator(),
- &(n_requests[2 * i + 1]));
+ const int ierr =
+ MPI_Isend(&(n_send_data[i]),
+ 1,
+ MPI_UNSIGNED,
+ neighbors[i],
+ mpi_tag,
+ parallel_triangulation->get_mpi_communicator(),
+ &(n_requests[2 * i + 1]));
AssertThrowMPI(ierr);
}
const int ierr =
MPI_CHAR,
neighbors[i],
mpi_tag,
- parallel_triangulation->get_communicator(),
+ parallel_triangulation->get_mpi_communicator(),
&(requests[send_ops]));
AssertThrowMPI(ierr);
++send_ops;
MPI_CHAR,
neighbors[i],
mpi_tag,
- parallel_triangulation->get_communicator(),
+ parallel_triangulation->get_mpi_communicator(),
&(requests[send_ops + recv_ops]));
AssertThrowMPI(ierr);
++recv_ops;
MPI_CHAR,
neighbors[i],
mpi_tag,
- parallel_triangulation->get_communicator(),
+ parallel_triangulation->get_mpi_communicator(),
&(requests[send_ops]));
AssertThrowMPI(ierr);
++send_ops;
MPI_CHAR,
neighbors[i],
mpi_tag,
- parallel_triangulation->get_communicator(),
+ parallel_triangulation->get_mpi_communicator(),
&(requests[send_ops + recv_ops]));
AssertThrowMPI(ierr);
++recv_ops;
affine_constraints.close();
TrilinosWrappers::SparsityPattern dsp(dof_handler.locally_owned_dofs(),
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
DoFTools::make_sparsity_pattern(dof_handler, dsp, affine_constraints);
dsp.compress();
false);
Teuchos::RCP<Epetra_MultiVector> B, X;
- LinearAlgebra::EpetraWrappers::Vector x_(dof_handler.locally_owned_dofs(),
- dof_handler.get_communicator());
+ LinearAlgebra::EpetraWrappers::Vector x_(
+ dof_handler.locally_owned_dofs(), dof_handler.get_mpi_communicator());
LinearAlgebra::ReadWriteVector<Number> x_temp(
dof_handler.locally_owned_dofs());
x_temp.import_elements(x, VectorOperation::insert);
x_.import_elements(x_temp, VectorOperation::insert);
- LinearAlgebra::EpetraWrappers::Vector r_(dof_handler.locally_owned_dofs(),
- dof_handler.get_communicator());
+ LinearAlgebra::EpetraWrappers::Vector r_(
+ dof_handler.locally_owned_dofs(), dof_handler.get_mpi_communicator());
LinearAlgebra::ReadWriteVector<Number> r_temp(
dof_handler.locally_owned_dofs());
r_temp.import_elements(r, VectorOperation::insert);
test(DoFHandler<2> &dof_handler, const hp::MappingCollection<2> &mappings)
{
DoFRenumbering::support_point_wise(dof_handler);
- const MPI_Comm comm = dof_handler.get_communicator();
+ const MPI_Comm comm = dof_handler.get_mpi_communicator();
const IndexSet &local_dofs = dof_handler.locally_owned_dofs();
deallog << "new case with locally owned dofs = ";
const std::string &label)
{
deallog.push(label);
- const auto comm = trias.front()->get_communicator();
+ const auto comm = trias.front()->get_mpi_communicator();
const auto my_rank = Utilities::MPI::this_mpi_process(comm);
for (unsigned int i = 0; i < trias.size(); ++i)
partition_distributed_triangulation(const Triangulation<dim, spacedim> &tria_in,
const MPI_Comm comm)
{
- const auto comm_tria = tria_in.get_communicator();
+ const auto comm_tria = tria_in.get_mpi_communicator();
const auto n_global_active_cells = Utilities::MPI::max(
comm_tria == MPI_COMM_SELF ? 0 : tria_in.n_global_active_cells(), comm);
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
"data_background_" +
std::to_string(n_subdivisions),
0,
- triangulation.get_communicator());
+ triangulation.get_mpi_communicator());
}
}
constraints.make_consistent_in_parallel(dof_handler.locally_owned_dofs(),
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
const auto b = collect_lines(constraints, dof_handler.n_dofs());
b.print(deallog.get_file_stream());
TrilinosWrappers::SparsityPattern &sparsity_pattern)
{
sparsity_pattern.reinit(dof_handler.locally_owned_dofs(),
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
}
template <int dim,
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
DoFTools::extract_locally_relevant_level_dofs(dh, level);
level_vectors[level].reinit(dh.locally_owned_mg_dofs(level),
relevant_dofs,
- tria.get_communicator());
+ tria.get_mpi_communicator());
std::vector<types::global_dof_index> dof_indices(fe.dofs_per_cell);
for (const auto &cell : dh.mg_cell_iterators_on_level(level))
if (cell->level_subdomain_id() != numbers::artificial_subdomain_id)
LinearAlgebra::distributed::Vector<double> vector(
dof_handler.locally_owned_dofs(),
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
VectorTools::interpolate(dof_handler, fu, vector);
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- Utilities::MPI::all_gather(tr.get_communicator(),
+ Utilities::MPI::all_gather(tr.get_mpi_communicator(),
tr.n_locally_owned_active_cells());
if (myid == 0)
for (unsigned int p = 0; p < numproc; ++p)
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- Utilities::MPI::all_gather(tr.get_communicator(),
+ Utilities::MPI::all_gather(tr.get_mpi_communicator(),
tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- Utilities::MPI::all_gather(tr.get_communicator(),
+ Utilities::MPI::all_gather(tr.get_mpi_communicator(),
tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
tr.refine_global(1);
const auto n_locally_owned_active_cells_per_processor =
- Utilities::MPI::all_gather(tr.get_communicator(),
+ Utilities::MPI::all_gather(tr.get_mpi_communicator(),
tr.n_locally_owned_active_cells());
if (myid == 0)
for (unsigned int p = 0; p < numproc; ++p)
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- Utilities::MPI::all_gather(tr.get_communicator(),
+ Utilities::MPI::all_gather(tr.get_mpi_communicator(),
tr.n_locally_owned_active_cells());
if (myid == 0)
for (unsigned int p = 0; p < numproc; ++p)
tr.refine_global(1);
const auto n_locally_owned_active_cells_per_processor =
- Utilities::MPI::all_gather(tr.get_communicator(),
+ Utilities::MPI::all_gather(tr.get_mpi_communicator(),
tr.n_locally_owned_active_cells());
if (myid == 0)
for (unsigned int p = 0; p < numproc; ++p)
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- Utilities::MPI::all_gather(tr.get_communicator(),
+ Utilities::MPI::all_gather(tr.get_mpi_communicator(),
tr.n_locally_owned_active_cells());
if (myid == 0)
for (unsigned int p = 0; p < numproc; ++p)
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- Utilities::MPI::all_gather(tr.get_communicator(),
+ Utilities::MPI::all_gather(tr.get_mpi_communicator(),
tr.n_locally_owned_active_cells());
if (myid == 0)
for (unsigned int p = 0; p < numproc; ++p)
}
const IndexSet relevant_set = DoFTools::extract_locally_relevant_dofs(dofh);
- TrilinosWrappers::MPI::Vector x_rel(relevant_set, dofh.get_communicator());
+ TrilinosWrappers::MPI::Vector x_rel(relevant_set,
+ dofh.get_mpi_communicator());
{
TrilinosWrappers::MPI::Vector interpolated(dofh.locally_owned_dofs(),
- dofh.get_communicator());
+ dofh.get_mpi_communicator());
VectorTools::interpolate(dofh, LinearFunction<dim>(), interpolated);
x_rel = interpolated;
}
// ------ verify -----
std::vector<IndexSet> locally_owned_dofs_per_processor =
- Utilities::MPI::all_gather(dh.get_communicator(), dh.locally_owned_dofs());
+ Utilities::MPI::all_gather(dh.get_mpi_communicator(),
+ dh.locally_owned_dofs());
const IndexSet locally_active_dofs =
DoFTools::extract_locally_active_dofs(dh);
// ------ verify -----
std::vector<IndexSet> locally_owned_dofs_per_processor =
- Utilities::MPI::all_gather(dh.get_communicator(), dh.locally_owned_dofs());
+ Utilities::MPI::all_gather(dh.get_mpi_communicator(),
+ dh.locally_owned_dofs());
const IndexSet locally_active_dofs =
DoFTools::extract_locally_active_dofs(dh);
// ------ verify -----
std::vector<IndexSet> locally_owned_dofs_per_processor =
- Utilities::MPI::all_gather(dh.get_communicator(), dh.locally_owned_dofs());
+ Utilities::MPI::all_gather(dh.get_mpi_communicator(),
+ dh.locally_owned_dofs());
const IndexSet locally_active_dofs =
DoFTools::extract_locally_active_dofs(dh);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
for (const auto &cell :
dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
count[cell->active_fe_index()]++;
- Utilities::MPI::sum(count, tria.get_communicator(), count);
+ Utilities::MPI::sum(count, tria.get_mpi_communicator(), count);
deallog << "fe count:" << count << std::endl;
#ifdef DEBUG
for (const auto &cell :
dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
count[cell->active_fe_index()]++;
- Utilities::MPI::sum(count, tria.get_communicator(), count);
+ Utilities::MPI::sum(count, tria.get_mpi_communicator(), count);
deallog << "cycle:" << i << ", fe count:" << count << std::endl;
}
1,
MPI_INT,
MPI_SUM,
- triangulation.get_communicator());
+ triangulation.get_mpi_communicator());
Assert(sum_of_pairs_global > 0, ExcInternalError());
for (it = face_map.begin(); it != face_map.end(); ++it)
{
// We have not dealt with chains of constraints on ghost cells yet.
// Thus, we are content with verifying their consistency for now.
std::vector<IndexSet> locally_owned_dofs_per_processor =
- Utilities::MPI::all_gather(dof_handler.get_communicator(),
+ Utilities::MPI::all_gather(dof_handler.get_mpi_communicator(),
dof_handler.locally_owned_dofs());
const IndexSet locally_active_dofs =
LinearAlgebra::distributed::Vector<double> solution;
solution.reinit(locally_owned_dofs,
locally_relevant_dofs,
- dofh.get_communicator());
+ dofh.get_mpi_communicator());
for (unsigned int i = 0; i < solution.size(); ++i)
if (locally_owned_dofs.is_element(i))
solution.update_ghost_values();
double l1_norm = solution.l1_norm();
- if (Utilities::MPI::this_mpi_process(dofh.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(dofh.get_mpi_communicator()) == 0)
deallog << "pre refinement l1=" << l1_norm << std::endl;
// set refine/coarsen flags manually
solution.reinit(locally_owned_dofs,
locally_relevant_dofs,
- dofh.get_communicator());
+ dofh.get_mpi_communicator());
soltrans.interpolate(solution);
l1_norm = solution.l1_norm();
- if (Utilities::MPI::this_mpi_process(dofh.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(dofh.get_mpi_communicator()) == 0)
deallog << "post refinement l1=" << l1_norm << std::endl;
// make sure no processor is hanging
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
<< " ";
const auto n_locally_owned_active_cells_per_processor =
Utilities::MPI::all_gather(
- triangulation.get_communicator(),
+ triangulation.get_mpi_communicator(),
triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
// We did not think about hp-constraints on ghost cells yet.
// Thus, we are content with verifying their consistency for now.
std::vector<IndexSet> locally_owned_dofs_per_processor =
- Utilities::MPI::all_gather(dof_handler.get_communicator(),
+ Utilities::MPI::all_gather(dof_handler.get_mpi_communicator(),
dof_handler.locally_owned_dofs());
const IndexSet locally_active_dofs =
#if 0
data_out.write_vtu_with_pvtu_record(
- "./", "result", counter++, tria.get_communicator(), 3, 1);
+ "./", "result", counter++, tria.get_mpi_communicator(), 3, 1);
#else
deallog << std::endl;
data_out.write_vtk(deallog.get_file_stream());
return std::make_shared<Utilities::MPI::Partitioner>(
dof_handler.locally_owned_dofs(),
DoFTools::extract_locally_active_dofs(dof_handler),
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
}
&(dof_handler.get_triangulation()));
MPI_Comm comm =
- dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
+ dist_tria != nullptr ? dist_tria->get_mpi_communicator() : MPI_COMM_SELF;
vec.reinit(level == numbers::invalid_unsigned_int ?
dof_handler.locally_owned_dofs() :
DoFHandler<dim> dof_handler(tria);
dof_handler.distribute_dofs(fe);
- const auto mpi_comm = dof_handler.get_communicator();
+ const auto mpi_comm = dof_handler.get_mpi_communicator();
const IndexSet &owned_dofs = dof_handler.locally_owned_dofs();
const IndexSet relevant_dofs =
deallog << "n_dofs=" << dof_handler.n_dofs() << std::endl;
- const MPI_Comm mpi_communicator = triangulation.get_communicator();
+ const MPI_Comm mpi_communicator = triangulation.get_mpi_communicator();
const IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
const IndexSet locally_relevant_dofs =
DoFTools::extract_locally_relevant_dofs(dof_handler);
2,
0);
- if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
{
auto particle_it = particle_handler.insert_particle(particle1, cell1);
particle_it->set_properties(properties);
Point<spacedim> position;
Point<dim> reference_position;
- if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
for (unsigned int i = 0; i < dim; ++i)
position[i] = 0.475;
else
Particles::Particle<dim, spacedim> particle(
position,
reference_position,
- Utilities::MPI::this_mpi_process(tr.get_communicator()));
+ Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()));
// We give a local random cell hint to check that sorting and
// transferring ghost particles works.
deallog << "Before ghost exchange: "
<< particle_handler.n_locally_owned_particles()
<< " locally owned particles on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
deallog << "Before ghost exchange: "
<< particle_handler.get_property_pool().n_registered_slots()
<< " stored particles on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
particle_handler.exchange_ghost_particles();
deallog << "After ghost exchange: "
<< particle_handler.n_locally_owned_particles()
<< " locally owned particles on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
deallog << "After ghost exchange: "
<< particle_handler.get_property_pool().n_registered_slots()
<< " stored particles on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
}
Point<spacedim> position;
Point<dim> reference_position;
- if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
position[0] = 0.001;
else
position[0] = 0.999;
Particles::Particle<dim, spacedim> particle(
position,
reference_position,
- Utilities::MPI::this_mpi_process(tr.get_communicator()));
+ Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()));
// We give a local random cell hint to check that sorting and
// transferring ghost particles works.
// particles
Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping);
- if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
{
std::vector<Point<spacedim>> position(2);
std::vector<Point<dim>> reference_position(2);
deallog << "Before sort particle id " << particle.get_id()
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::flush << std::endl;
}
deallog << "After sort particle id " << particle.get_id()
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::flush << std::endl;
// Move all points up by 0.5. This will change cell for particle 1, and will
deallog << "After shift particle id " << particle.get_id()
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::flush << std::endl;
}
Point<spacedim> position;
Point<dim> reference_position;
- if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
for (unsigned int i = 0; i < dim; ++i)
position[i] = 0.475;
else
Particles::Particle<dim, spacedim> particle(
position,
reference_position,
- Utilities::MPI::this_mpi_process(tr.get_communicator()));
+ Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()));
// We give a local random cell hint to check that sorting and
// transferring ghost particles works.
++particle)
deallog << "Particle id " << particle->get_id()
<< " is local particle on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
for (auto particle = particle_handler.begin_ghost();
++particle)
deallog << "Particle id " << particle->get_id()
<< " is ghost particle on process "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
}
Point<spacedim> position;
Point<dim> reference_position;
- if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
for (unsigned int i = 0; i < dim; ++i)
position[i] = 0.475;
else
Particles::Particle<dim, spacedim> particle(
position,
reference_position,
- Utilities::MPI::this_mpi_process(tr.get_communicator()));
+ Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()));
typename Triangulation<dim, spacedim>::active_cell_iterator cell =
tr.begin_active();
while (!cell->is_locally_owned())
++particle)
{
particle->get_properties()[0] =
- 10 + Utilities::MPI::this_mpi_process(tr.get_communicator());
+ 10 + Utilities::MPI::this_mpi_process(tr.get_mpi_communicator());
particle->get_properties()[1] =
- 100 + Utilities::MPI::this_mpi_process(tr.get_communicator());
+ 100 + Utilities::MPI::this_mpi_process(tr.get_mpi_communicator());
}
<< " location : " << particle->get_location()
<< " property : " << particle->get_properties()[0] << " and "
<< particle->get_properties()[1] << " is local on process : "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
for (auto particle = particle_handler.begin_ghost();
<< " location : " << particle->get_location()
<< " property : " << particle->get_properties()[0] << " and "
<< particle->get_properties()[1] << " is ghost on process : "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
deallog << "Modifying particles positions and properties" << std::endl;
<< " location : " << particle->get_location()
<< " property : " << particle->get_properties()[0] << " and "
<< particle->get_properties()[1] << " is local on process : "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
for (auto particle = particle_handler.begin_ghost();
<< " location : " << particle->get_location()
<< " property : " << particle->get_properties()[0] << " and "
<< particle->get_properties()[1] << " is ghost on process : "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
}
for (unsigned int p = 0; p < n_particles; ++p)
{
- if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
{
for (unsigned int i = 0; i < dim; ++i)
position[i] = 0.410 + 0.01 * p;
Particles::Particle<dim, spacedim> particle(
position,
reference_position,
- Utilities::MPI::this_mpi_process(tr.get_communicator()) *
+ Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) *
n_particles +
p);
typename Triangulation<dim, spacedim>::active_cell_iterator cell =
++particle)
{
particle->get_properties()[0] =
- 1000 + 100 * Utilities::MPI::this_mpi_process(tr.get_communicator()) +
+ 1000 +
+ 100 * Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) +
10 * particle->get_id();
particle->get_properties()[1] =
- 2000 + 100 * Utilities::MPI::this_mpi_process(tr.get_communicator()) +
+ 2000 +
+ 100 * Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) +
10 * particle->get_id();
counter++;
}
<< " location : " << particle->get_location()
<< " property : " << particle->get_properties()[0] << " and "
<< particle->get_properties()[1] << " is local on process : "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
for (auto particle = particle_handler.begin_ghost();
<< " location : " << particle->get_location()
<< " property : " << particle->get_properties()[0] << " and "
<< particle->get_properties()[1] << " is ghost on process : "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
deallog << "Modifying particles positions and properties" << std::endl;
<< " location : " << particle->get_location()
<< " property : " << particle->get_properties()[0] << " and "
<< particle->get_properties()[1] << " is local on process : "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
for (auto particle = particle_handler.begin_ghost();
<< " location : " << particle->get_location()
<< " property : " << particle->get_properties()[0] << " and "
<< particle->get_properties()[1] << " is ghost on process : "
- << Utilities::MPI::this_mpi_process(tr.get_communicator())
+ << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
<< std::endl;
}
Particles::ParticleHandler<dim, spacedim> particle_handler(tria_pft,
mapping);
- if (Utilities::MPI::this_mpi_process(tria_pft.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tria_pft.get_mpi_communicator()) == 0)
{
std::vector<Point<spacedim>> position(2);
std::vector<Point<dim>> reference_position(2);
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
<< Utilities::MPI::this_mpi_process(
- tria_pft.get_communicator())
+ tria_pft.get_mpi_communicator())
<< std::flush << std::endl;
}
deallog << "After sort particle id " << particle.get_id()
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
- << Utilities::MPI::this_mpi_process(tria_pft.get_communicator())
+ << Utilities::MPI::this_mpi_process(
+ tria_pft.get_mpi_communicator())
<< std::flush << std::endl;
// Move all points up by 0.5. This will change cell for particle 1 and will
deallog << "After shift particle id " << particle.get_id()
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
- << Utilities::MPI::this_mpi_process(tria_pft.get_communicator())
+ << Utilities::MPI::this_mpi_process(
+ tria_pft.get_mpi_communicator())
<< std::flush << std::endl;
}
Particles::ParticleHandler<dim, spacedim> particle_handler(tria_shared,
mapping);
- if (Utilities::MPI::this_mpi_process(tria_shared.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tria_shared.get_mpi_communicator()) ==
+ 0)
{
std::vector<Point<spacedim>> position(2);
std::vector<Point<dim>> reference_position(2);
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
<< Utilities::MPI::this_mpi_process(
- tria_shared.get_communicator())
+ tria_shared.get_mpi_communicator())
<< std::flush << std::endl;
}
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
<< Utilities::MPI::this_mpi_process(
- tria_shared.get_communicator())
+ tria_shared.get_mpi_communicator())
<< std::flush << std::endl;
// Move all points up by 0.5. This will change cell for particle 1 and will
<< " is in cell " << particle.get_surrounding_cell()
<< " on process "
<< Utilities::MPI::this_mpi_process(
- tria_shared.get_communicator())
+ tria_shared.get_mpi_communicator())
<< std::flush << std::endl;
}
std::vector<Point<spacedim>> position(1);
- if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+ if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
for (unsigned int i = 0; i < dim; ++i)
position[0][i] = 2;
GridTools::compute_mesh_predicate_bounding_box(
triangulation, IteratorFilters::LocallyOwnedCell());
const auto global_bounding_boxes =
- Utilities::MPI::all_gather(triangulation.get_communicator(),
+ Utilities::MPI::all_gather(triangulation.get_mpi_communicator(),
local_bounding_box);
Particles::Generators::quadrature_points(triangulation,
return std::make_shared<const Utilities::MPI::Partitioner>(
dof_handler.locally_owned_dofs(),
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
}
return std::make_shared<const Utilities::MPI::Partitioner>(
dof_handler.locally_owned_dofs(),
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
}
// gather bounding boxes of other processes
const auto global_bboxes =
- Utilities::MPI::all_gather(tria.get_communicator(), local_reduced_box);
+ Utilities::MPI::all_gather(tria.get_mpi_communicator(), local_reduced_box);
const GridTools::Cache<dim> cache(tria, mapping);
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
return std::make_shared<const Utilities::MPI::Partitioner>(
dof_handler.locally_owned_dofs(),
locally_relevant_dofs,
- dof_handler.get_communicator());
+ dof_handler.get_mpi_communicator());
}
void
MeshType::space_dimension> *>(
&(mesh.get_triangulation()));
- return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+ return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
MPI_COMM_SELF;
}
for (const auto &cell :
dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
count[cell->active_fe_index()]++;
- Utilities::MPI::sum(count, tria.get_communicator(), count);
+ Utilities::MPI::sum(count, tria.get_mpi_communicator(), count);
deallog << "fe count:" << count << std::endl;
#ifdef DEBUG
for (const auto &cell :
dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
count[cell->active_fe_index()]++;
- Utilities::MPI::sum(count, tria.get_communicator(), count);
+ Utilities::MPI::sum(count, tria.get_mpi_communicator(), count);
deallog << "cycle:" << i << ", fe count:" << count << std::endl;
}
{
if (auto tria_ =
dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(&tria))
- return tria_->get_communicator();
+ return tria_->get_mpi_communicator();
return MPI_COMM_SELF;
}
template <typename VectorType,
std::enable_if_t<is_serial_vector<VectorType>::value, int> = 0>
void
-test_get_communicator()
+test_get_mpi_communicator()
{
auto vector = create_test_vector<VectorType>();
auto n_vector = make_nvector_view(vector
template <typename VectorType,
std::enable_if_t<!is_serial_vector<VectorType>::value, int> = 0>
void
-test_get_communicator()
+test_get_mpi_communicator()
{
auto vector = create_test_vector<VectorType>();
auto n_vector = make_nvector_view(vector
// test vector operations
test_clone<VectorType>();
test_destroy<VectorType>();
- test_get_communicator<VectorType>();
+ test_get_mpi_communicator<VectorType>();
test_length<VectorType>();
test_linear_sum<VectorType>();
test_linear_combination<VectorType>();
for (unsigned int level = v.min_level(); level <= v.max_level(); ++level)
{
v[level].reinit(mg_dof.locally_owned_mg_dofs(level),
- tria->get_communicator());
+ tria->get_mpi_communicator());
}
}