write_hdf5_parallel(const std::vector<Patch<dim, spacedim>> &patches,
const DataOutFilter & data_filter,
const std::string & filename,
- MPI_Comm comm);
+ const MPI_Comm & comm);
/**
* Write the data in @p data_filter to HDF5 file(s). If @p write_mesh_file is
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string &solution_filename,
- MPI_Comm comm);
+ const MPI_Comm & comm);
/**
* DataOutFilter is an intermediate data format that reduces the amount of
* DataOutInterface::write_vtu().
*/
void
- write_vtu_in_parallel(const std::string &filename, MPI_Comm comm) const;
+ write_vtu_in_parallel(const std::string &filename,
+ const MPI_Comm & comm) const;
/**
* Some visualization programs, such as ParaView, can read several separate
create_xdmf_entry(const DataOutBase::DataOutFilter &data_filter,
const std::string & h5_filename,
const double cur_time,
- MPI_Comm comm) const;
+ const MPI_Comm & comm) const;
/**
* Create an XDMFEntry based on the data in the data_filter. This assumes
const std::string & h5_mesh_filename,
const std::string & h5_solution_filename,
const double cur_time,
- MPI_Comm comm) const;
+ const MPI_Comm & comm) const;
/**
* Write an XDMF file based on the provided vector of XDMFEntry objects.
void
write_xdmf_file(const std::vector<XDMFEntry> &entries,
const std::string & filename,
- MPI_Comm comm) const;
+ const MPI_Comm & comm) const;
/**
* Write the data in @p data_filter to a single HDF5 file containing both the
void
write_hdf5_parallel(const DataOutBase::DataOutFilter &data_filter,
const std::string & filename,
- MPI_Comm comm) const;
+ const MPI_Comm & comm) const;
/**
* Write the data in data_filter to HDF5 file(s). If write_mesh_file is
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string & solution_filename,
- MPI_Comm comm) const;
+ const MPI_Comm & comm) const;
/**
* DataOutFilter is an intermediate data format that reduces the amount of
* MPI support (several processes access the same HDF5 file).
* File::File(const std::string &, const FileAccessMode)
* opens/creates an HDF5 file for serial operations.
- * File::File(const std::string &, const FileAccessMode, const MPI_Comm)
+ * File::File(const std::string &, const FileAccessMode, const MPI_Comm &)
* creates or opens an HDF5 file in parallel using MPI. The HDF5 calls that
* modify the structure of the file are always collective, whereas writing
* and reading raw data in a dataset can be done independently or collectively.
*/
File(const std::string & name,
const FileAccessMode mode,
- const MPI_Comm mpi_communicator);
+ const MPI_Comm & mpi_communicator);
private:
/**
* Delegation internal constructor.
- * File(const std::string &, const MPI_Comm, const Mode);
+ * File(const std::string &, const MPI_Comm &, const Mode);
* and
* File(const std::string &, const Mode)
* should be used to open or create HDF5 files.
File(const std::string & name,
const FileAccessMode mode,
const bool mpi,
- const MPI_Comm mpi_communicator);
+ const MPI_Comm & mpi_communicator);
};
namespace internal
* in the communicator.
*/
void
- lock(MPI_Comm comm);
+ lock(const MPI_Comm &comm);
/**
* Release the lock.
* in the communicator.
*/
void
- unlock(MPI_Comm comm);
+ unlock(const MPI_Comm &comm);
private:
/**
*/
Partitioner(const types::global_dof_index local_size,
const types::global_dof_index ghost_size,
- const MPI_Comm communicator);
+ const MPI_Comm & communicator);
/**
* Constructor with index set arguments. This constructor creates a
*/
Partitioner(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices_in,
- const MPI_Comm communicator_in);
+ const MPI_Comm &communicator_in);
/**
* Constructor with one index set argument. This constructor creates a
* constructor with two index sets.
*/
Partitioner(const IndexSet &locally_owned_indices,
- const MPI_Comm communicator_in);
+ const MPI_Comm &communicator_in);
/**
* Reinitialize the communication pattern. The first argument
* number of cores
* in the @p mpi_communicator.
*/
- ProcessGrid(MPI_Comm mpi_communicator,
+ ProcessGrid(const MPI_Comm & mpi_communicator,
const unsigned int n_rows,
const unsigned int n_columns);
* and the @p mpi_communicator with 11 cores will result in the $3x3$
* process grid.
*/
- ProcessGrid(MPI_Comm mpi_communicator,
+ ProcessGrid(const MPI_Comm & mpi_communicator,
const unsigned int n_rows_matrix,
const unsigned int n_columns_matrix,
const unsigned int row_block_size,
* A private constructor which takes grid dimensions as an
* <code>std::pair</code>.
*/
- ProcessGrid(MPI_Comm mpi_communicator,
+ ProcessGrid(const MPI_Comm & mpi_communicator,
const std::pair<unsigned int, unsigned int> &grid_dimensions);
/**
* communicator occurs; the extra cost of the synchronization is not
* measured.
*/
- Timer(MPI_Comm mpi_communicator, const bool sync_lap_times = false);
+ Timer(const MPI_Comm &mpi_communicator, const bool sync_lap_times = false);
/**
* Return a reference to the data structure containing basic statistics on
* <code>MPI_Barrier</code> call before starting and stopping the timer for
* each section.
*/
- TimerOutput(MPI_Comm mpi_comm,
+ TimerOutput(const MPI_Comm & mpi_comm,
std::ostream & stream,
const OutputFrequency output_frequency,
const OutputType output_type);
* <code>MPI_Barrier</code> call before starting and stopping the timer for
* each section.)
*/
- TimerOutput(MPI_Comm mpi_comm,
+ TimerOutput(const MPI_Comm & mpi_comm,
ConditionalOStream & stream,
const OutputFrequency output_frequency,
const OutputType output_type);
* median is given).
*/
void
- print_wall_time_statistics(const MPI_Comm mpi_comm,
- const double print_quantile = 0.) const;
+ print_wall_time_statistics(const MPI_Comm &mpi_comm,
+ const double print_quantile = 0.) const;
/**
* By calling this function, all output can be disabled. This function
* @param mpi_communicator The MPI communicator to be used for the
* triangulation.
*/
- explicit Triangulation(MPI_Comm mpi_communicator);
+ explicit Triangulation(const MPI_Comm &mpi_communicator);
/**
* Destructor.
std::pair<number, number>
compute_global_min_and_max_at_root(
const dealii::Vector<number> &criteria,
- MPI_Comm mpi_communicator);
+ const MPI_Comm & mpi_communicator);
namespace RefineAndCoarsenFixedNumber
{
compute_threshold(const dealii::Vector<number> & criteria,
const std::pair<double, double> &global_min_and_max,
const types::global_cell_index n_target_cells,
- MPI_Comm mpi_communicator);
+ const MPI_Comm & mpi_communicator);
} // namespace RefineAndCoarsenFixedNumber
namespace RefineAndCoarsenFixedFraction
compute_threshold(const dealii::Vector<number> & criteria,
const std::pair<double, double> &global_min_and_max,
const double target_error,
- MPI_Comm mpi_communicator);
+ const MPI_Comm & mpi_communicator);
} // namespace RefineAndCoarsenFixedFraction
} // namespace GridRefinement
} // namespace distributed
* Otherwise all non-locally owned cells are considered ghost.
*/
Triangulation(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing =
(dealii::Triangulation<dim, spacedim>::none),
const bool allow_artificial_cells = false,
* triangulation is partitioned.
*/
explicit Triangulation(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
const Settings settings = default_setting);
class DataTransfer
{
public:
- DataTransfer(MPI_Comm mpi_communicator);
+ DataTransfer(const MPI_Comm &mpi_communicator);
/**
* Prepare data transfer by calling the pack callback functions on each
* the triangulation.
*/
Triangulation(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<1, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<1, spacedim>::none),
const Settings settings = default_setting);
* Constructor.
*/
TriangulationBase(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
const bool check_for_distorted_cells = false);
* Constructor.
*/
DistributedTriangulationBase(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
const bool check_for_distorted_cells = false);
*/
std::vector<types::global_dof_index>
get_n_locally_owned_dofs_per_processor(
- const MPI_Comm mpi_communicator) const;
+ const MPI_Comm &mpi_communicator) const;
/**
* Return a representation of @p locally_owned_dofs_per_processor both
*/
std::vector<IndexSet>
get_locally_owned_dofs_per_processor(
- const MPI_Comm mpi_communicator) const;
+ const MPI_Comm &mpi_communicator) const;
/**
* Total number of dofs, accumulated over all processors that may
std::vector<std::vector<BoundingBox<spacedim>>>
exchange_local_bounding_boxes(
const std::vector<BoundingBox<spacedim>> &local_bboxes,
- MPI_Comm mpi_communicator);
+ const MPI_Comm & mpi_communicator);
/**
* In this collective operation each process provides a vector
RTree<std::pair<BoundingBox<spacedim>, unsigned int>>
build_global_description_tree(
const std::vector<BoundingBox<spacedim>> &local_description,
- MPI_Comm mpi_communicator);
+ const MPI_Comm & mpi_communicator);
/**
* Collect for a given triangulation all locally relevant vertices that
Description<dim, spacedim>
create_description_from_triangulation(
const dealii::Triangulation<dim, spacedim> &tria,
- const MPI_Comm comm,
+ const MPI_Comm & comm,
const TriangulationDescription::Settings settings =
TriangulationDescription::Settings::default_setting,
const unsigned int my_rank_in = numbers::invalid_unsigned_int);
const std::function<void(dealii::Triangulation<dim, spacedim> &)>
& serial_grid_generator,
const std::function<void(dealii::Triangulation<dim, spacedim> &,
- const MPI_Comm,
+ const MPI_Comm &,
const unsigned int)> &serial_grid_partitioner,
- const MPI_Comm comm,
+ const MPI_Comm & comm,
const int group_size = 1,
const typename Triangulation<dim, spacedim>::MeshSmoothing smoothing =
dealii::Triangulation<dim, spacedim>::none,
bool
is_consistent_in_parallel(const std::vector<IndexSet> &locally_owned_dofs,
const IndexSet & locally_active_dofs,
- const MPI_Comm mpi_communicator,
+ const MPI_Comm & mpi_communicator,
const bool verbose = false) const;
/**
AffineConstraints<number>::is_consistent_in_parallel(
const std::vector<IndexSet> &locally_owned_dofs,
const IndexSet & locally_active_dofs,
- const MPI_Comm mpi_communicator,
+ const MPI_Comm & mpi_communicator,
const bool verbose) const
{
// Helper to return a ConstraintLine object that belongs to row @p row.
*/
BlockVector(const std::vector<IndexSet> &local_ranges,
const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm communicator);
+ const MPI_Comm & communicator);
/**
* Same as above but the ghost indices are assumed to be empty.
*/
BlockVector(const std::vector<IndexSet> &local_ranges,
- const MPI_Comm communicator);
+ const MPI_Comm & communicator);
/**
* Destructor.
template <typename Number>
BlockVector<Number>::BlockVector(const std::vector<IndexSet> &local_ranges,
const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm communicator)
+ const MPI_Comm & communicator)
{
std::vector<size_type> sizes(local_ranges.size());
for (unsigned int i = 0; i < local_ranges.size(); ++i)
template <typename Number>
BlockVector<Number>::BlockVector(const std::vector<IndexSet> &local_ranges,
- const MPI_Comm communicator)
+ const MPI_Comm & communicator)
{
std::vector<size_type> sizes(local_ranges.size());
for (unsigned int i = 0; i < local_ranges.size(); ++i)
*/
Vector(const IndexSet &local_range,
const IndexSet &ghost_indices,
- const MPI_Comm communicator);
+ const MPI_Comm &communicator);
/**
* Same constructor as above but without any ghost indices.
*/
- Vector(const IndexSet &local_range, const MPI_Comm communicator);
+ Vector(const IndexSet &local_range, const MPI_Comm &communicator);
/**
* Create the vector based on the parallel partitioning described in @p
void
reinit(const IndexSet &local_range,
const IndexSet &ghost_indices,
- const MPI_Comm communicator);
+ const MPI_Comm &communicator);
/**
* Same as above, but without ghost entries.
*/
void
- reinit(const IndexSet &local_range, const MPI_Comm communicator);
+ reinit(const IndexSet &local_range, const MPI_Comm &communicator);
/**
* Initialize the vector given to the parallel partitioning described in
Vector<Number, MemorySpaceType>::reinit(
const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
- const MPI_Comm communicator)
+ const MPI_Comm &communicator)
{
// set up parallel partitioner with index sets and communicator
reinit(std::make_shared<Utilities::MPI::Partitioner>(
void
Vector<Number, MemorySpaceType>::reinit(
const IndexSet &locally_owned_indices,
- const MPI_Comm communicator)
+ const MPI_Comm &communicator)
{
// set up parallel partitioner with index sets and communicator
reinit(
template <typename Number, typename MemorySpaceType>
Vector<Number, MemorySpaceType>::Vector(const IndexSet &local_range,
const IndexSet &ghost_indices,
- const MPI_Comm communicator)
+ const MPI_Comm &communicator)
: allocated_size(0)
, vector_is_ghosted(false)
, comm_sm(MPI_COMM_SELF)
template <typename Number, typename MemorySpaceType>
Vector<Number, MemorySpaceType>::Vector(const IndexSet &local_range,
- const MPI_Comm communicator)
+ const MPI_Comm &communicator)
: allocated_size(0)
, vector_is_ghosted(false)
, comm_sm(MPI_COMM_SELF)
* Intended to be used with SLEPc objects.
*/
PreconditionJacobi(
- const MPI_Comm communicator,
+ const MPI_Comm & communicator,
const AdditionalData &additional_data = AdditionalData());
/**
* Intended to be used with SLEPc objects.
*/
PreconditionBlockJacobi(
- const MPI_Comm communicator,
+ const MPI_Comm & communicator,
const AdditionalData &additional_data = AdditionalData());
* Intended to be used with SLEPc objects.
*/
PreconditionBoomerAMG(
- const MPI_Comm communicator,
+ const MPI_Comm & communicator,
const AdditionalData &additional_data = AdditionalData());
* @param mpi_comm MPI communicator
*/
ARKode(const AdditionalData &data = AdditionalData(),
- const MPI_Comm mpi_comm = MPI_COMM_WORLD);
+ const MPI_Comm & mpi_comm = MPI_COMM_WORLD);
/**
* Destructor.
* @param mpi_comm MPI communicator
*/
IDA(const AdditionalData &data = AdditionalData(),
- const MPI_Comm mpi_comm = MPI_COMM_WORLD);
+ const MPI_Comm & mpi_comm = MPI_COMM_WORLD);
/**
* Destructor.
* @param mpi_comm MPI communicator
*/
KINSOL(const AdditionalData &data = AdditionalData(),
- const MPI_Comm mpi_comm = MPI_COMM_WORLD);
+ const MPI_Comm & mpi_comm = MPI_COMM_WORLD);
/**
* Destructor.
void
DataOutInterface<dim, spacedim>::write_vtu_in_parallel(
const std::string &filename,
- MPI_Comm comm) const
+ const MPI_Comm & comm) const
{
#ifndef DEAL_II_WITH_MPI
// without MPI fall back to the normal way to write a vtu file:
const DataOutBase::DataOutFilter &data_filter,
const std::string & h5_filename,
const double cur_time,
- MPI_Comm comm) const
+ const MPI_Comm & comm) const
{
return create_xdmf_entry(
data_filter, h5_filename, h5_filename, cur_time, comm);
const std::string & h5_mesh_filename,
const std::string & h5_solution_filename,
const double cur_time,
- MPI_Comm comm) const
+ const MPI_Comm & comm) const
{
unsigned int local_node_cell_count[2], global_node_cell_count[2];
DataOutInterface<dim, spacedim>::write_xdmf_file(
const std::vector<XDMFEntry> &entries,
const std::string & filename,
- MPI_Comm comm) const
+ const MPI_Comm & comm) const
{
#ifdef DEAL_II_WITH_MPI
const int myrank = Utilities::MPI::this_mpi_process(comm);
DataOutInterface<dim, spacedim>::write_hdf5_parallel(
const DataOutBase::DataOutFilter &data_filter,
const std::string & filename,
- MPI_Comm comm) const
+ const MPI_Comm & comm) const
{
DataOutBase::write_hdf5_parallel(get_patches(), data_filter, filename, comm);
}
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string & solution_filename,
- MPI_Comm comm) const
+ const MPI_Comm & comm) const
{
DataOutBase::write_hdf5_parallel(get_patches(),
data_filter,
const std::vector<Patch<dim, spacedim>> &patches,
const DataOutBase::DataOutFilter & data_filter,
const std::string & filename,
- MPI_Comm comm)
+ const MPI_Comm & comm)
{
write_hdf5_parallel(patches, data_filter, true, filename, filename, comm);
}
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string & solution_filename,
- MPI_Comm comm)
+ const MPI_Comm & comm)
{
AssertThrow(
spacedim >= 2,
& patches,
const DataOutFilter &data_filter,
const std::string & filename,
- MPI_Comm comm);
+ const MPI_Comm & comm);
template void
write_filtered_data(
File::File(const std::string & name,
const FileAccessMode mode,
- const MPI_Comm mpi_communicator)
+ const MPI_Comm & mpi_communicator)
: File(name, mode, true, mpi_communicator)
{}
File::File(const std::string & name,
const FileAccessMode mode,
const bool mpi,
- const MPI_Comm mpi_communicator)
+ const MPI_Comm & mpi_communicator)
: Group(name, mpi)
{
hdf5_reference = std::shared_ptr<hid_t>(new hid_t, [](hid_t *pointer) {
void
- CollectiveMutex::lock(MPI_Comm comm)
+ CollectiveMutex::lock(const MPI_Comm &comm)
{
(void)comm;
void
- CollectiveMutex::unlock(MPI_Comm comm)
+ CollectiveMutex::unlock(const MPI_Comm &comm)
{
(void)comm;
Partitioner::Partitioner(const types::global_dof_index local_size,
const types::global_dof_index ghost_size,
- const MPI_Comm communicator)
+ const MPI_Comm & communicator)
: global_size(Utilities::MPI::sum<types::global_dof_index>(local_size,
communicator))
, locally_owned_range_data(global_size)
Partitioner::Partitioner(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices_in,
- const MPI_Comm communicator_in)
+ const MPI_Comm &communicator_in)
: global_size(
static_cast<types::global_dof_index>(locally_owned_indices.size()))
, n_ghost_indices_data(0)
Partitioner::Partitioner(const IndexSet &locally_owned_indices,
- const MPI_Comm communicator_in)
+ const MPI_Comm &communicator_in)
: global_size(
static_cast<types::global_dof_index>(locally_owned_indices.size()))
, n_ghost_indices_data(0)
* https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91
*/
inline std::pair<int, int>
- compute_processor_grid_sizes(MPI_Comm mpi_comm,
+ compute_processor_grid_sizes(const MPI_Comm & mpi_comm,
const unsigned int m,
const unsigned int n,
const unsigned int block_size_m,
namespace MPI
{
ProcessGrid::ProcessGrid(
- MPI_Comm mpi_comm,
+ const MPI_Comm & mpi_comm,
const std::pair<unsigned int, unsigned int> &grid_dimensions)
: mpi_communicator(mpi_comm)
, this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
- ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
+ ProcessGrid::ProcessGrid(const MPI_Comm & mpi_comm,
const unsigned int n_rows_matrix,
const unsigned int n_columns_matrix,
const unsigned int row_block_size,
- ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
+ ProcessGrid::ProcessGrid(const MPI_Comm & mpi_comm,
const unsigned int n_rows,
const unsigned int n_columns)
: ProcessGrid(mpi_comm, std::make_pair(n_rows, n_columns))
-Timer::Timer(MPI_Comm mpi_communicator, const bool sync_lap_times_)
+Timer::Timer(const MPI_Comm &mpi_communicator, const bool sync_lap_times_)
: running(false)
, mpi_communicator(mpi_communicator)
, sync_lap_times(sync_lap_times_)
-TimerOutput::TimerOutput(MPI_Comm mpi_communicator,
+TimerOutput::TimerOutput(const MPI_Comm & mpi_communicator,
std::ostream & stream,
const OutputFrequency output_frequency,
const OutputType output_type)
-TimerOutput::TimerOutput(MPI_Comm mpi_communicator,
+TimerOutput::TimerOutput(const MPI_Comm & mpi_communicator,
ConditionalOStream & stream,
const OutputFrequency output_frequency,
const OutputType output_type)
void
-TimerOutput::print_wall_time_statistics(const MPI_Comm mpi_comm,
- const double quantile) const
+TimerOutput::print_wall_time_statistics(const MPI_Comm &mpi_comm,
+ const double quantile) const
{
// we are going to change the precision and width of output below. store the
// old values so the get restored when exiting this function
namespace fullydistributed
{
template <int dim, int spacedim>
- Triangulation<dim, spacedim>::Triangulation(MPI_Comm mpi_communicator)
+ Triangulation<dim, spacedim>::Triangulation(
+ const MPI_Comm &mpi_communicator)
: parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
, settings(TriangulationDescription::Settings::default_setting)
, partitioner([](dealii::Triangulation<dim, spacedim> &tria,
template <typename number>
double
compute_global_sum(const dealii::Vector<number> &criteria,
- MPI_Comm mpi_communicator)
+ const MPI_Comm & mpi_communicator)
{
double my_sum =
std::accumulate(criteria.begin(),
std::pair<number, number>
compute_global_min_and_max_at_root(
const dealii::Vector<number> &criteria,
- MPI_Comm mpi_communicator)
+ const MPI_Comm & mpi_communicator)
{
// we'd like to compute the global max and min from the local ones in
// one MPI communication. we can do that by taking the elementwise
compute_threshold(const dealii::Vector<number> & criteria,
const std::pair<double, double> &global_min_and_max,
const types::global_cell_index n_target_cells,
- MPI_Comm mpi_communicator)
+ const MPI_Comm & mpi_communicator)
{
double interesting_range[2] = {global_min_and_max.first,
global_min_and_max.second};
compute_threshold(const dealii::Vector<number> & criteria,
const std::pair<double, double> &global_min_and_max,
const double target_error,
- MPI_Comm mpi_communicator)
+ const MPI_Comm & mpi_communicator)
{
double interesting_range[2] = {global_min_and_max.first,
global_min_and_max.second};
\{
template std::pair<S, S>
compute_global_min_and_max_at_root<S>(const dealii::Vector<S> &,
- MPI_Comm);
+ const MPI_Comm &);
namespace RefineAndCoarsenFixedNumber
\{
compute_threshold<S>(const dealii::Vector<S> &,
const std::pair<double, double> &,
const types::global_cell_index,
- MPI_Comm);
+ const MPI_Comm &);
\}
namespace RefineAndCoarsenFixedFraction
\{
compute_threshold<S>(const dealii::Vector<S> &,
const std::pair<double, double> &,
const double,
- MPI_Comm);
+ const MPI_Comm &);
\}
\}
\}
{
template <int dim, int spacedim>
Triangulation<dim, spacedim>::Triangulation(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const bool allow_artificial_cells,
template <int dim, int spacedim>
Triangulation<dim, spacedim>::DataTransfer::DataTransfer(
- MPI_Comm mpi_communicator)
+ const MPI_Comm &mpi_communicator)
: mpi_communicator(mpi_communicator)
, variable_size_data_stored(false)
{}
template <int dim, int spacedim>
Triangulation<dim, spacedim>::Triangulation(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const Settings settings)
template <int spacedim>
Triangulation<1, spacedim>::Triangulation(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<1, spacedim>::MeshSmoothing
smooth_grid,
const Settings /*settings*/)
{
template <int dim, int spacedim>
TriangulationBase<dim, spacedim>::TriangulationBase(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const bool check_for_distorted_cells)
template <int dim, int spacedim>
DistributedTriangulationBase<dim, spacedim>::DistributedTriangulationBase(
- MPI_Comm mpi_communicator,
+ const MPI_Comm &mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const bool check_for_distorted_cells)
std::vector<types::global_dof_index>
NumberCache::get_n_locally_owned_dofs_per_processor(
- const MPI_Comm mpi_communicator) const
+ const MPI_Comm &mpi_communicator) const
{
if (n_global_dofs == 0)
return std::vector<types::global_dof_index>();
std::vector<IndexSet>
NumberCache::get_locally_owned_dofs_per_processor(
- const MPI_Comm mpi_communicator) const
+ const MPI_Comm &mpi_communicator) const
{
AssertDimension(locally_owned_dofs.size(), n_global_dofs);
if (n_global_dofs == 0)
std::vector<std::vector<BoundingBox<spacedim>>>
exchange_local_bounding_boxes(
const std::vector<BoundingBox<spacedim>> &local_bboxes,
- MPI_Comm mpi_communicator)
+ const MPI_Comm & mpi_communicator)
{
#ifndef DEAL_II_WITH_MPI
(void)local_bboxes;
RTree<std::pair<BoundingBox<spacedim>, unsigned int>>
build_global_description_tree(
const std::vector<BoundingBox<spacedim>> &local_description,
- MPI_Comm mpi_communicator)
+ const MPI_Comm & mpi_communicator)
{
#ifndef DEAL_II_WITH_MPI
(void)mpi_communicator;
template std::vector<std::vector<BoundingBox<deal_II_space_dimension>>>
GridTools::exchange_local_bounding_boxes(
- const std::vector<BoundingBox<deal_II_space_dimension>> &, MPI_Comm);
+ const std::vector<BoundingBox<deal_II_space_dimension>> &,
+ const MPI_Comm &);
template std::tuple<std::vector<std::vector<unsigned int>>,
std::map<unsigned int, unsigned int>,
template RTree<
std::pair<BoundingBox<deal_II_space_dimension>, unsigned int>>
GridTools::build_global_description_tree(
- const std::vector<BoundingBox<deal_II_space_dimension>> &, MPI_Comm);
+ const std::vector<BoundingBox<deal_II_space_dimension>> &,
+ const MPI_Comm &);
template Vector<double> GridTools::compute_aspect_ratio_of_cells(
const Mapping<deal_II_space_dimension> &,
Description<dim, spacedim>
create_description_from_triangulation(
const dealii::Triangulation<dim, spacedim> &tria,
- const MPI_Comm comm,
+ const MPI_Comm & comm,
const TriangulationDescription::Settings settings,
const unsigned int my_rank_in)
{
const std::function<void(dealii::Triangulation<dim, spacedim> &,
const MPI_Comm,
const unsigned int)> &serial_grid_partitioner,
- const MPI_Comm comm,
+ const MPI_Comm & comm,
const int group_size,
const typename Triangulation<dim, spacedim>::MeshSmoothing smoothing,
const TriangulationDescription::Settings settings)
create_description_from_triangulation(
const dealii::Triangulation<deal_II_dimension,
deal_II_space_dimension> &tria,
- const MPI_Comm comm,
+ const MPI_Comm & comm,
const TriangulationDescription::Settings settings,
const unsigned int my_rank_in);
dealii::Triangulation<deal_II_dimension, deal_II_space_dimension> &,
const MPI_Comm,
const unsigned int)> &serial_grid_partitioner,
- const MPI_Comm comm,
+ const MPI_Comm & comm,
const int group_size,
const typename Triangulation<deal_II_dimension,
deal_II_space_dimension>::MeshSmoothing
/* ----------------- PreconditionJacobi -------------------- */
- PreconditionJacobi::PreconditionJacobi(const MPI_Comm comm,
+ PreconditionJacobi::PreconditionJacobi(const MPI_Comm & comm,
const AdditionalData &additional_data_)
{
additional_data = additional_data_;
/* ----------------- PreconditionBlockJacobi -------------------- */
PreconditionBlockJacobi::PreconditionBlockJacobi(
- const MPI_Comm comm,
+ const MPI_Comm & comm,
const AdditionalData &additional_data_)
{
additional_data = additional_data_;
PreconditionBoomerAMG::PreconditionBoomerAMG(
- const MPI_Comm comm,
+ const MPI_Comm & comm,
const AdditionalData &additional_data_)
{
additional_data = additional_data_;
const MGConstrainedDoFs,
MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>>
mg_constrained_dofs,
- const MPI_Comm mpi_communicator,
+ const MPI_Comm & mpi_communicator,
const bool transfer_solution_vectors,
std::vector<Table<2, unsigned int>> & copy_indices,
std::vector<Table<2, unsigned int>> & copy_indices_global_mine,
template <typename VectorType>
ARKode<VectorType>::ARKode(const AdditionalData &data,
- const MPI_Comm mpi_comm)
+ const MPI_Comm & mpi_comm)
: data(data)
, arkode_mem(nullptr)
, yy(nullptr)
} // namespace
template <typename VectorType>
- IDA<VectorType>::IDA(const AdditionalData &data, const MPI_Comm mpi_comm)
+ IDA<VectorType>::IDA(const AdditionalData &data, const MPI_Comm &mpi_comm)
: data(data)
, ida_mem(nullptr)
, yy(nullptr)
template <typename VectorType>
KINSOL<VectorType>::KINSOL(const AdditionalData &data,
- const MPI_Comm mpi_comm)
+ const MPI_Comm & mpi_comm)
: data(data)
, kinsol_mem(nullptr)
, solution(nullptr)