InverseMatrix(const MatrixType & m,
const PreconditionerType &preconditioner,
const IndexSet & locally_owned,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
void vmult(TrilinosWrappers::MPI::Vector & dst,
const TrilinosWrappers::MPI::Vector &src) const;
const MatrixType & m,
const PreconditionerType &preconditioner,
const IndexSet & locally_owned,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
: matrix(&m)
, preconditioner(&preconditioner)
, mpi_communicator(&mpi_communicator)
const InverseMatrix<TrilinosWrappers::SparseMatrix,
PreconditionerType> & A_inverse,
const IndexSet & owned_pres,
- const MPI_Comm &mpi_communicator);
+ const MPI_Comm mpi_communicator);
void vmult(TrilinosWrappers::MPI::Vector & dst,
const TrilinosWrappers::MPI::Vector &src) const;
const InverseMatrix<TrilinosWrappers::SparseMatrix, PreconditionerType>
& A_inverse,
const IndexSet &owned_vel,
- const MPI_Comm &mpi_communicator)
+ const MPI_Comm mpi_communicator)
: system_matrix(&system_matrix)
, A_inverse(&A_inverse)
, tmp1(owned_vel, mpi_communicator)
*/
template <int dim, typename Number>
DistributedTree(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::vector<BoundingBox<dim, Number>> &bounding_boxes);
/**
* in @p points are local to the MPI process.
*/
template <int dim, typename Number>
- DistributedTree(const MPI_Comm & comm,
+ DistributedTree(const MPI_Comm comm,
const std::vector<Point<dim, Number>> &points);
/**
template <int dim, typename Number>
DistributedTree::DistributedTree(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::vector<BoundingBox<dim, Number>> &bounding_boxes)
: distributed_tree(comm,
Kokkos::DefaultHostExecutionSpace{},
template <int dim, typename Number>
DistributedTree::DistributedTree(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::vector<Point<dim, Number>> &points)
: distributed_tree(comm, Kokkos::DefaultHostExecutionSpace{}, points)
{}
* the destructor is called.
*/
void
- replicate_across_communicator(const MPI_Comm & communicator,
+ replicate_across_communicator(const MPI_Comm communicator,
const unsigned int root_process);
/**
template <class T>
inline void
-AlignedVector<T>::replicate_across_communicator(const MPI_Comm & communicator,
+AlignedVector<T>::replicate_across_communicator(const MPI_Comm communicator,
const unsigned int root_process)
{
# ifdef DEAL_II_WITH_MPI
virtual void
reinit(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator) = 0;
+ const MPI_Comm communicator) = 0;
/**
* Return the underlying MPI communicator.
& nonscalar_data_ranges,
const Deal_II_IntermediateFlags &flags,
const std::string & filename,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const CompressionLevel compression);
/**
const DataOutFilter & data_filter,
const DataOutBase::Hdf5Flags & flags,
const std::string & filename,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* Write the data in @p data_filter to HDF5 file(s). If @p write_mesh_file is
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string &solution_filename,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* DataOutFilter is an intermediate data format that reduces the amount of
* DataOutInterface::write_vtu().
*/
void
- write_vtu_in_parallel(const std::string &filename,
- const MPI_Comm & comm) const;
+ write_vtu_in_parallel(const std::string &filename, const MPI_Comm comm) const;
/**
* Some visualization programs, such as ParaView, can read several separate
const std::string &directory,
const std::string &filename_without_extension,
const unsigned int counter,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const unsigned int n_digits_for_counter = numbers::invalid_unsigned_int,
const unsigned int n_groups = 0) const;
void
write_deal_II_intermediate_in_parallel(
const std::string & filename,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const DataOutBase::CompressionLevel compression) const;
/**
create_xdmf_entry(const DataOutBase::DataOutFilter &data_filter,
const std::string & h5_filename,
const double cur_time,
- const MPI_Comm & comm) const;
+ const MPI_Comm comm) const;
/**
* Create an XDMFEntry based on the data in the data_filter. This assumes
const std::string & h5_mesh_filename,
const std::string & h5_solution_filename,
const double cur_time,
- const MPI_Comm & comm) const;
+ const MPI_Comm comm) const;
/**
* Write an XDMF file based on the provided vector of XDMFEntry objects.
void
write_xdmf_file(const std::vector<XDMFEntry> &entries,
const std::string & filename,
- const MPI_Comm & comm) const;
+ const MPI_Comm comm) const;
/**
* Write the data in @p data_filter to a single HDF5 file containing both the
void
write_hdf5_parallel(const DataOutBase::DataOutFilter &data_filter,
const std::string & filename,
- const MPI_Comm & comm) const;
+ const MPI_Comm comm) const;
/**
* Write the data in data_filter to HDF5 file(s). If write_mesh_file is
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string & solution_filename,
- const MPI_Comm & comm) const;
+ const MPI_Comm comm) const;
/**
* DataOutFilter is an intermediate data format that reduces the amount of
* MPI support (several processes access the same HDF5 file).
* File::File(const std::string &, const FileAccessMode)
* opens/creates an HDF5 file for serial operations.
- * File::File(const std::string &, const FileAccessMode, const MPI_Comm &)
+ * File::File(const std::string &, const FileAccessMode, const MPI_Comm )
* creates or opens an HDF5 file in parallel using MPI. The HDF5 calls that
* modify the structure of the file are always collective, whereas writing
* and reading raw data in a dataset can be done independently or collectively.
*/
File(const std::string & name,
const FileAccessMode mode,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
private:
/**
* Delegation internal constructor.
- * File(const std::string &, const MPI_Comm &, const Mode);
+ * File(const std::string &, const MPI_Comm , const Mode);
* and
* File(const std::string &, const Mode)
* should be used to open or create HDF5 files.
File(const std::string & name,
const FileAccessMode mode,
const bool mpi,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
};
namespace internal
* is complete.
*/
bool
- is_ascending_and_one_to_one(const MPI_Comm &communicator) const;
+ is_ascending_and_one_to_one(const MPI_Comm communicator) const;
/**
* Return the number of elements stored in this index set.
* vector, e.g. for extracting only certain solution components.
*/
Epetra_Map
- make_trilinos_map(const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool overlapping = false) const;
+ make_trilinos_map(const MPI_Comm communicator = MPI_COMM_WORLD,
+ const bool overlapping = false) const;
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
Tpetra::Map<int, types::signed_global_dof_index>
- make_tpetra_map(const MPI_Comm &communicator = MPI_COMM_WORLD,
- const bool overlapping = false) const;
+ make_tpetra_map(const MPI_Comm communicator = MPI_COMM_WORLD,
+ const bool overlapping = false) const;
# endif
#endif
#ifdef DEAL_II_WITH_PETSC
IS
- make_petsc_is(const MPI_Comm &communicator = MPI_COMM_WORLD) const;
+ make_petsc_is(const MPI_Comm communicator = MPI_COMM_WORLD) const;
#endif
* only one process and the function returns 1.
*/
unsigned int
- n_mpi_processes(const MPI_Comm &mpi_communicator);
+ n_mpi_processes(const MPI_Comm mpi_communicator);
/**
* Return the
* than) the number of all processes (given by get_n_mpi_processes()).
*/
unsigned int
- this_mpi_process(const MPI_Comm &mpi_communicator);
+ this_mpi_process(const MPI_Comm mpi_communicator);
/**
* Return a vector of the ranks (within @p comm_large) of a subset of
* processes specified by @p comm_small.
*/
const std::vector<unsigned int>
- mpi_processes_within_communicator(const MPI_Comm &comm_large,
- const MPI_Comm &comm_small);
+ mpi_processes_within_communicator(const MPI_Comm comm_large,
+ const MPI_Comm comm_small);
/**
* Consider an unstructured communication pattern where every process in
*/
std::vector<unsigned int>
compute_point_to_point_communication_pattern(
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::vector<unsigned int> &destinations);
/**
*/
unsigned int
compute_n_point_to_point_communications(
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::vector<unsigned int> &destinations);
/**
* <code>MPI_Comm_dup(mpi_communicator, &return_value);</code>.
*/
MPI_Comm
- duplicate_communicator(const MPI_Comm &mpi_communicator);
+ duplicate_communicator(const MPI_Comm mpi_communicator);
/**
* Free the given
* <code>MPI_Comm_free(&mpi_communicator);</code>.
*/
void
- free_communicator(MPI_Comm &mpi_communicator);
+ free_communicator(MPI_Comm mpi_communicator);
/**
* Helper class to automatically duplicate and free an MPI
/**
* Create a duplicate of the given @p communicator.
*/
- explicit DuplicatedCommunicator(const MPI_Comm &communicator)
+ explicit DuplicatedCommunicator(const MPI_Comm communicator)
: comm(duplicate_communicator(communicator))
{}
/**
* Constructor. Blocks until it can acquire the lock.
*/
- explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
+ explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
: mutex(mutex)
, comm(comm)
{
* in the communicator.
*/
void
- lock(const MPI_Comm &comm);
+ lock(const MPI_Comm comm);
/**
* Release the lock.
* in the communicator.
*/
void
- unlock(const MPI_Comm &comm);
+ unlock(const MPI_Comm comm);
private:
/**
*/
#ifdef DEAL_II_WITH_MPI
DEAL_II_DEPRECATED int
- create_group(const MPI_Comm & comm,
+ create_group(const MPI_Comm comm,
const MPI_Group &group,
const int tag,
MPI_Comm * new_comm);
*/
std::vector<IndexSet>
create_ascending_partitioning(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const types::global_dof_index locally_owned_size);
/**
*/
IndexSet
create_evenly_distributed_partitioning(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const types::global_dof_index total_size);
#ifdef DEAL_II_WITH_MPI
*/
template <class Iterator, typename Number = long double>
std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
- mean_and_standard_deviation(const Iterator begin,
- const Iterator end,
- const MPI_Comm &comm);
+ mean_and_standard_deviation(const Iterator begin,
+ const Iterator end,
+ const MPI_Comm comm);
#endif
*/
template <typename T>
T
- sum(const T &t, const MPI_Comm &mpi_communicator);
+ sum(const T &t, const MPI_Comm mpi_communicator);
/**
* Like the previous function, but take the sums over the elements of an
*/
template <typename T, typename U>
void
- sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
+ sum(const T &values, const MPI_Comm mpi_communicator, U &sums);
/**
* Like the previous function, but take the sums over the elements of an
template <typename T>
void
sum(const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & sums);
/**
template <int rank, int dim, typename Number>
SymmetricTensor<rank, dim, Number>
sum(const SymmetricTensor<rank, dim, Number> &local,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
/**
* Perform an MPI sum of the entries of a tensor.
template <int rank, int dim, typename Number>
Tensor<rank, dim, Number>
sum(const Tensor<rank, dim, Number> &local,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
/**
* Perform an MPI sum of the entries of a SparseMatrix.
template <typename Number>
void
sum(const SparseMatrix<Number> &local,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
SparseMatrix<Number> & global);
/**
*/
template <typename T>
T
- max(const T &t, const MPI_Comm &mpi_communicator);
+ max(const T &t, const MPI_Comm mpi_communicator);
/**
* Like the previous function, but take the maximum over the elements of an
*/
template <typename T, typename U>
void
- max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
+ max(const T &values, const MPI_Comm mpi_communicator, U &maxima);
/**
* Like the previous function, but take the maximum over the elements of an
template <typename T>
void
max(const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & maxima);
/**
*/
template <typename T>
T
- min(const T &t, const MPI_Comm &mpi_communicator);
+ min(const T &t, const MPI_Comm mpi_communicator);
/**
* Like the previous function, but take the minima over the elements of an
*/
template <typename T, typename U>
void
- min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
+ min(const T &values, const MPI_Comm mpi_communicator, U &minima);
/**
* Like the previous function, but take the minimum over the elements of an
template <typename T>
void
min(const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & minima);
/**
*/
template <typename T>
T
- logical_or(const T &t, const MPI_Comm &mpi_communicator);
+ logical_or(const T &t, const MPI_Comm mpi_communicator);
/**
* Like the previous function, but performs the <i>logical or</i> operation
*/
template <typename T, typename U>
void
- logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results);
+ logical_or(const T &values, const MPI_Comm mpi_communicator, U &results);
/**
* Like the previous function, but performs the <i>logical or</i> operation
template <typename T>
void
logical_or(const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & results);
/**
* everywhere.
*/
MinMaxAvg
- min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
+ min_max_avg(const double my_value, const MPI_Comm mpi_communicator);
/**
* Same as above but returning the sum, average, minimum, maximum,
*/
std::vector<MinMaxAvg>
min_max_avg(const std::vector<double> &my_value,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
/**
void
min_max_avg(const ArrayView<const double> &my_values,
const ArrayView<MinMaxAvg> & result,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
/**
*/
template <typename T>
std::map<unsigned int, T>
- some_to_some(const MPI_Comm & comm,
+ some_to_some(const MPI_Comm comm,
const std::map<unsigned int, T> &objects_to_send);
/**
*/
template <typename T>
std::vector<T>
- all_gather(const MPI_Comm &comm, const T &object_to_send);
+ all_gather(const MPI_Comm comm, const T &object_to_send);
/**
* A generalization of the classic MPI_Gather function, that accepts
*/
template <typename T>
std::vector<T>
- gather(const MPI_Comm & comm,
+ gather(const MPI_Comm comm,
const T & object_to_send,
const unsigned int root_process = 0);
*/
template <typename T>
T
- scatter(const MPI_Comm & comm,
+ scatter(const MPI_Comm comm,
const std::vector<T> &objects_to_send,
const unsigned int root_process = 0);
*/
template <typename T>
std::enable_if_t<is_mpi_type<T> == false, T>
- broadcast(const MPI_Comm & comm,
+ broadcast(const MPI_Comm comm,
const T & object_to_send,
const unsigned int root_process = 0);
*/
template <typename T>
std::enable_if_t<is_mpi_type<T> == true, T>
- broadcast(const MPI_Comm & comm,
+ broadcast(const MPI_Comm comm,
const T & object_to_send,
const unsigned int root_process = 0);
broadcast(T * buffer,
const size_t count,
const unsigned int root,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* A function that combines values @p local_value from all processes
template <typename T>
T
reduce(const T & local_value,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::function<T(const T &, const T &)> &combiner,
const unsigned int root_process = 0);
template <typename T>
T
all_reduce(const T & local_value,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::function<T(const T &, const T &)> &combiner);
std::vector<unsigned int>
compute_index_owner(const IndexSet &owned_indices,
const IndexSet &indices_to_look_up,
- const MPI_Comm &comm);
+ const MPI_Comm comm);
/**
* Compute the union of the input vectors @p vec of all processes in the
*/
template <typename T>
std::vector<T>
- compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
+ compute_set_union(const std::vector<T> &vec, const MPI_Comm comm);
/**
* The same as above but for std::set.
*/
template <typename T>
std::set<T>
- compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
+ compute_set_union(const std::set<T> &set, const MPI_Comm comm);
void
all_reduce(const MPI_Op & mpi_op,
const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & output);
} // namespace internal
template <typename T, unsigned int N>
void
- sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
+ sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N])
{
internal::all_reduce(MPI_SUM,
ArrayView<const T>(values, N),
template <typename T, unsigned int N>
void
- max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
+ max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N])
{
internal::all_reduce(MPI_MAX,
ArrayView<const T>(values, N),
template <typename T, unsigned int N>
void
- min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
+ min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N])
{
internal::all_reduce(MPI_MIN,
ArrayView<const T>(values, N),
template <typename T, unsigned int N>
void
logical_or(const T (&values)[N],
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
T (&results)[N])
{
static_assert(std::is_integral<T>::value,
template <typename T>
std::map<unsigned int, T>
- some_to_some(const MPI_Comm & comm,
+ some_to_some(const MPI_Comm comm,
const std::map<unsigned int, T> &objects_to_send)
{
# ifndef DEAL_II_WITH_MPI
template <typename T>
std::vector<T>
- all_gather(const MPI_Comm &comm, const T &object)
+ all_gather(const MPI_Comm comm, const T &object)
{
if (job_supports_mpi() == false)
return {object};
template <typename T>
std::vector<T>
- gather(const MPI_Comm & comm,
+ gather(const MPI_Comm comm,
const T & object_to_send,
const unsigned int root_process)
{
template <typename T>
T
- scatter(const MPI_Comm & comm,
+ scatter(const MPI_Comm comm,
const std::vector<T> &objects_to_send,
const unsigned int root_process)
{
broadcast(T * buffer,
const size_t count,
const unsigned int root,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
# ifndef DEAL_II_WITH_MPI
(void)buffer;
template <typename T>
std::enable_if_t<is_mpi_type<T> == false, T>
- broadcast(const MPI_Comm & comm,
+ broadcast(const MPI_Comm comm,
const T & object_to_send,
const unsigned int root_process)
{
template <typename T>
std::enable_if_t<is_mpi_type<T> == true, T>
- broadcast(const MPI_Comm & comm,
+ broadcast(const MPI_Comm comm,
const T & object_to_send,
const unsigned int root_process)
{
# ifdef DEAL_II_WITH_MPI
template <class Iterator, typename Number>
std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
- mean_and_standard_deviation(const Iterator begin,
- const Iterator end,
- const MPI_Comm &comm)
+ mean_and_standard_deviation(const Iterator begin,
+ const Iterator end,
+ const MPI_Comm comm)
{
// below we do simple and straight-forward implementation. More elaborate
// options are:
void
all_reduce(const MPI_Op & mpi_op,
const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & output)
{
AssertDimension(values.size(), output.size());
void
all_reduce(const MPI_Op & mpi_op,
const ArrayView<const std::complex<T>> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<std::complex<T>> & output)
{
AssertDimension(values.size(), output.size());
template <typename T>
T
- sum(const T &t, const MPI_Comm &mpi_communicator)
+ sum(const T &t, const MPI_Comm mpi_communicator)
{
T return_value{};
internal::all_reduce(MPI_SUM,
template <typename T, typename U>
void
- sum(const T &values, const MPI_Comm &mpi_communicator, U &sums)
+ sum(const T &values, const MPI_Comm mpi_communicator, U &sums)
{
static_assert(std::is_same<typename std::decay<T>::type,
typename std::decay<U>::type>::value,
template <typename T>
void
sum(const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & sums)
{
internal::all_reduce(MPI_SUM, values, mpi_communicator, sums);
template <int rank, int dim, typename Number>
Tensor<rank, dim, Number>
- sum(const Tensor<rank, dim, Number> &t, const MPI_Comm &mpi_communicator)
+ sum(const Tensor<rank, dim, Number> &t, const MPI_Comm mpi_communicator)
{
// Copy the tensor into a C-style array with which we can then
// call the other sum() function.
template <int rank, int dim, typename Number>
SymmetricTensor<rank, dim, Number>
sum(const SymmetricTensor<rank, dim, Number> &local,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
// Copy the tensor into a C-style array with which we can then
// call the other sum() function.
template <typename Number>
void
sum(const SparseMatrix<Number> &local,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
SparseMatrix<Number> & global)
{
Assert(
template <typename T>
T
- max(const T &t, const MPI_Comm &mpi_communicator)
+ max(const T &t, const MPI_Comm mpi_communicator)
{
T return_value{};
internal::all_reduce(MPI_MAX,
template <typename T, typename U>
void
- max(const T &values, const MPI_Comm &mpi_communicator, U &maxima)
+ max(const T &values, const MPI_Comm mpi_communicator, U &maxima)
{
static_assert(std::is_same<typename std::decay<T>::type,
typename std::decay<U>::type>::value,
template <typename T>
void
max(const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & maxima)
{
internal::all_reduce(MPI_MAX, values, mpi_communicator, maxima);
template <typename T>
T
- min(const T &t, const MPI_Comm &mpi_communicator)
+ min(const T &t, const MPI_Comm mpi_communicator)
{
T return_value{};
internal::all_reduce(MPI_MIN,
template <typename T, typename U>
void
- min(const T &values, const MPI_Comm &mpi_communicator, U &minima)
+ min(const T &values, const MPI_Comm mpi_communicator, U &minima)
{
static_assert(std::is_same<typename std::decay<T>::type,
typename std::decay<U>::type>::value,
template <typename T>
void
min(const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & minima)
{
internal::all_reduce(MPI_MIN, values, mpi_communicator, minima);
template <typename T>
T
- logical_or(const T &t, const MPI_Comm &mpi_communicator)
+ logical_or(const T &t, const MPI_Comm mpi_communicator)
{
static_assert(std::is_integral<T>::value,
"The MPI_LOR operation only allows integral data types.");
template <typename T, typename U>
void
- logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results)
+ logical_or(const T &values, const MPI_Comm mpi_communicator, U &results)
{
static_assert(std::is_same<typename std::decay<T>::type,
typename std::decay<U>::type>::value,
template <typename T>
void
logical_or(const ArrayView<const T> &values,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const ArrayView<T> & results)
{
static_assert(std::is_integral<T>::value,
template <typename T>
T
reduce(const T & vec,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::function<T(const T &, const T &)> &combiner,
const unsigned int root_process)
{
template <typename T>
T
all_reduce(const T & vec,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::function<T(const T &, const T &)> &combiner)
{
if (job_supports_mpi() && n_mpi_processes(comm) > 1)
template <typename T>
std::vector<T>
- compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm)
+ compute_set_union(const std::vector<T> &vec, const MPI_Comm comm)
{
return Utilities::MPI::all_reduce<std::vector<T>>(
vec, comm, [](const auto &set_1, const auto &set_2) {
template <typename T>
std::set<T>
- compute_set_union(const std::set<T> &set_in, const MPI_Comm &comm)
+ compute_set_union(const std::set<T> &set_in, const MPI_Comm comm)
{
// convert vector to set
std::vector<T> vector_in(set_in.begin(), set_in.end());
* ranges to the owner of the dictionary part.
*/
void
- reinit(const IndexSet &owned_indices, const MPI_Comm &comm);
+ reinit(const IndexSet &owned_indices, const MPI_Comm comm);
/**
* Translate a global dof index to the MPI rank in the dictionary
* the number of ranks.
*/
void
- partition(const IndexSet &owned_indices, const MPI_Comm &comm);
+ partition(const IndexSet &owned_indices, const MPI_Comm comm);
};
*/
ConsensusAlgorithmsPayload(const IndexSet &owned_indices,
const IndexSet &indices_to_look_up,
- const MPI_Comm &comm,
+ const MPI_Comm comm,
std::vector<unsigned int> &owning_ranks,
const bool track_index_requests = false);
*/
DEAL_II_DEPRECATED
Interface(Process<RequestType, AnswerType> &process,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* Destructor. Made `virtual` to ensure that one can work with
* that takes a number of `std::function` arguments.
*/
std::vector<unsigned int>
- run(Process<RequestType, AnswerType> &process, const MPI_Comm &comm);
+ run(Process<RequestType, AnswerType> &process, const MPI_Comm comm);
/**
* Run the consensus algorithm and return a vector of process ranks
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm) = 0;
+ & process_answer,
+ const MPI_Comm comm) = 0;
private:
/**
* function that takes an argument.
*/
DEAL_II_DEPRECATED
- NBX(Process<RequestType, AnswerType> &process, const MPI_Comm &comm);
+ NBX(Process<RequestType, AnswerType> &process, const MPI_Comm comm);
/**
* Destructor.
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm) override;
+ & process_answer,
+ const MPI_Comm comm) override;
private:
#ifdef DEAL_II_WITH_MPI
bool
all_locally_originated_receives_are_completed(
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm);
+ & process_answer,
+ const MPI_Comm comm);
/**
* Signal to all other ranks that this rank has received all request
* answers via entering IBarrier.
*/
void
- signal_finish(const MPI_Comm &comm);
+ signal_finish(const MPI_Comm comm);
/**
* Check whether all of the requests for answers that were created by
maybe_answer_one_request(
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* Start to send all requests via ISend and post IRecvs for the incoming
start_communication(
const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* After all rank has received all answers, the MPI data structures can
* be freed and the received answers can be processed.
*/
void
- clean_up_and_end_communication(const MPI_Comm &comm);
+ clean_up_and_end_communication(const MPI_Comm comm);
};
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm);
+ & process_answer,
+ const MPI_Comm comm);
/**
* This function provides a specialization of the one above for
nbx(const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
const std::function<void(const unsigned int, const RequestType &)>
- & process_request,
- const MPI_Comm &comm);
+ & process_request,
+ const MPI_Comm comm);
/**
* This class implements a concrete algorithm for the
* function that takes an argument.
*/
DEAL_II_DEPRECATED
- PEX(Process<RequestType, AnswerType> &process, const MPI_Comm &comm);
+ PEX(Process<RequestType, AnswerType> &process, const MPI_Comm comm);
/**
* Destructor.
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm) override;
+ & process_answer,
+ const MPI_Comm comm) override;
private:
#ifdef DEAL_II_WITH_MPI
start_communication(
const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* The `index`th request message from another rank has been received:
const unsigned int index,
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* Receive and process all of the incoming responses to the
process_incoming_answers(
const unsigned int n_targets,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm);
+ & process_answer,
+ const MPI_Comm comm);
/**
* After all answers have been exchanged, the MPI data structures can be
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm);
+ & process_answer,
+ const MPI_Comm comm);
/**
* This function provides a specialization of the one above for
pex(const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
const std::function<void(const unsigned int, const RequestType &)>
- & process_request,
- const MPI_Comm &comm);
+ & process_request,
+ const MPI_Comm comm);
/**
* function that takes an argument.
*/
DEAL_II_DEPRECATED
- Serial(Process<RequestType, AnswerType> &process, const MPI_Comm &comm);
+ Serial(Process<RequestType, AnswerType> &process, const MPI_Comm comm);
// Import the declarations from the base class.
using Interface<RequestType, AnswerType>::run;
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm) override;
+ & process_answer,
+ const MPI_Comm comm) override;
};
const std::function<AnswerType(const unsigned int, const RequestType &)>
&answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm);
+ & process_answer,
+ const MPI_Comm comm);
/**
* This function provides a specialization of the one above for
const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
const std::function<void(const unsigned int, const RequestType &)>
- & process_request,
- const MPI_Comm &comm);
+ & process_request,
+ const MPI_Comm comm);
*/
DEAL_II_DEPRECATED
Selector(Process<RequestType, AnswerType> &process,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
/**
* Destructor.
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm) override;
+ & process_answer,
+ const MPI_Comm comm) override;
private:
// Pointer to the actual ConsensusAlgorithms::Interface implementation.
const std::function<AnswerType(const unsigned int, const RequestType &)>
&answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm);
+ & process_answer,
+ const MPI_Comm comm);
/**
* This function provides a specialization of the one above for
const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
const std::function<void(const unsigned int, const RequestType &)>
- & process_request,
- const MPI_Comm &comm);
+ & process_request,
+ const MPI_Comm comm);
/**
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
return NBX<RequestType, AnswerType>().run(
targets, create_request, answer_request, process_answer, comm);
nbx(const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
const std::function<void(const unsigned int, const RequestType &)>
- & process_request,
- const MPI_Comm &comm)
+ & process_request,
+ const MPI_Comm comm)
{
// TODO: For the moment, simply implement this special case by
// forwarding to the other function with rewritten function
const std::function<AnswerType(const unsigned int,
const RequestType &)> &answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
return PEX<RequestType, AnswerType>().run(
targets, create_request, answer_request, process_answer, comm);
pex(const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
const std::function<void(const unsigned int, const RequestType &)>
- & process_request,
- const MPI_Comm &comm)
+ & process_request,
+ const MPI_Comm comm)
{
// TODO: For the moment, simply implement this special case by
// forwarding to the other function with rewritten function
const std::function<AnswerType(const unsigned int, const RequestType &)>
&answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
return Serial<RequestType, AnswerType>().run(
targets, create_request, answer_request, process_answer, comm);
const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
const std::function<void(const unsigned int, const RequestType &)>
- & process_request,
- const MPI_Comm &comm)
+ & process_request,
+ const MPI_Comm comm)
{
// TODO: For the moment, simply implement this special case by
// forwarding to the other function with rewritten function
const std::function<AnswerType(const unsigned int, const RequestType &)>
&answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
return Selector<RequestType, AnswerType>().run(
targets, create_request, answer_request, process_answer, comm);
const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
const std::function<void(const unsigned int, const RequestType &)>
- & process_request,
- const MPI_Comm &comm)
+ & process_request,
+ const MPI_Comm comm)
{
// TODO: For the moment, simply implement this special case by
// forwarding to the other function with rewritten function
* Handle exceptions inside the ConsensusAlgorithm::run() functions.
*/
inline void
- handle_exception(std::exception_ptr &&exception, const MPI_Comm &comm)
+ handle_exception(std::exception_ptr &&exception, const MPI_Comm comm)
{
# ifdef DEAL_II_WITH_MPI
// an exception within a ConsensusAlgorithm likely causes an
template <typename RequestType, typename AnswerType>
Interface<RequestType, AnswerType>::Interface(
Process<RequestType, AnswerType> &process,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
: process(&process)
, comm(comm)
{}
std::vector<unsigned int>
Interface<RequestType, AnswerType>::run(
Process<RequestType, AnswerType> &process,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
// Unpack the 'process' object and call the function that takes
// function objects for all operations.
template <typename RequestType, typename AnswerType>
NBX<RequestType, AnswerType>::NBX(
Process<RequestType, AnswerType> &process,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
: Interface<RequestType, AnswerType>(process, comm)
{}
const std::function<AnswerType(const unsigned int, const RequestType &)>
&answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
Assert(has_unique_elements(targets),
ExcMessage("The consensus algorithms expect that each process "
NBX<RequestType, AnswerType>::start_communication(
const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
# ifdef DEAL_II_WITH_MPI
// 1)
NBX<RequestType, AnswerType>::
all_locally_originated_receives_are_completed(
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
# ifdef DEAL_II_WITH_MPI
// We know that all requests have come in when we have pending
void
NBX<RequestType, AnswerType>::maybe_answer_one_request(
const std::function<AnswerType(const unsigned int, const RequestType &)>
- & answer_request,
- const MPI_Comm &comm)
+ & answer_request,
+ const MPI_Comm comm)
{
# ifdef DEAL_II_WITH_MPI
template <typename RequestType, typename AnswerType>
void
- NBX<RequestType, AnswerType>::signal_finish(const MPI_Comm &comm)
+ NBX<RequestType, AnswerType>::signal_finish(const MPI_Comm comm)
{
# ifdef DEAL_II_WITH_MPI
const auto ierr = MPI_Ibarrier(comm, &barrier_request);
template <typename RequestType, typename AnswerType>
void
NBX<RequestType, AnswerType>::clean_up_and_end_communication(
- const MPI_Comm &comm)
+ const MPI_Comm comm)
{
(void)comm;
# ifdef DEAL_II_WITH_MPI
template <typename RequestType, typename AnswerType>
PEX<RequestType, AnswerType>::PEX(
Process<RequestType, AnswerType> &process,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
: Interface<RequestType, AnswerType>(process, comm)
{}
const std::function<AnswerType(const unsigned int, const RequestType &)>
&answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
Assert(has_unique_elements(targets),
ExcMessage("The consensus algorithms expect that each process "
PEX<RequestType, AnswerType>::start_communication(
const std::vector<unsigned int> & targets,
const std::function<RequestType(const unsigned int)> &create_request,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
# ifdef DEAL_II_WITH_MPI
const int tag_request = Utilities::MPI::internal::Tags::
PEX<RequestType, AnswerType>::answer_one_request(
const unsigned int index,
const std::function<AnswerType(const unsigned int, const RequestType &)>
- & answer_request,
- const MPI_Comm &comm)
+ & answer_request,
+ const MPI_Comm comm)
{
# ifdef DEAL_II_WITH_MPI
const int tag_request = Utilities::MPI::internal::Tags::
PEX<RequestType, AnswerType>::process_incoming_answers(
const unsigned int n_targets,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
# ifdef DEAL_II_WITH_MPI
const int tag_deliver = Utilities::MPI::internal::Tags::
template <typename RequestType, typename AnswerType>
Serial<RequestType, AnswerType>::Serial(
Process<RequestType, AnswerType> &process,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
: Interface<RequestType, AnswerType>(process, comm)
{}
const std::function<AnswerType(const unsigned int, const RequestType &)>
&answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
(void)comm;
Assert((Utilities::MPI::job_supports_mpi() == false) ||
template <typename RequestType, typename AnswerType>
Selector<RequestType, AnswerType>::Selector(
Process<RequestType, AnswerType> &process,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
: Interface<RequestType, AnswerType>(process, comm)
{}
const std::function<AnswerType(const unsigned int, const RequestType &)>
&answer_request,
const std::function<void(const unsigned int, const AnswerType &)>
- & process_answer,
- const MPI_Comm &comm)
+ & process_answer,
+ const MPI_Comm comm)
{
// Depending on the number of processes we switch between
// implementations. We reduce the threshold for debug mode to be
*/
NoncontiguousPartitioner(const IndexSet &indexset_locally_owned,
const IndexSet &indexset_ghost,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Constructor. Same as above but for vectors of indices @p indices_locally_owned
NoncontiguousPartitioner(
const std::vector<types::global_dof_index> &indices_locally_owned,
const std::vector<types::global_dof_index> &indices_ghost,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Fill the vector @p ghost_array according to the precomputed communication
void
reinit(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator) override;
+ const MPI_Comm communicator) override;
/**
* Initialize the inner data structures using explicit sets of
void
reinit(const std::vector<types::global_dof_index> &locally_owned_indices,
const std::vector<types::global_dof_index> &ghost_indices,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
private:
/**
*/
Partitioner(const types::global_dof_index local_size,
const types::global_dof_index ghost_size,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Constructor with index set arguments. This constructor creates a
*/
Partitioner(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices_in,
- const MPI_Comm &communicator_in);
+ const MPI_Comm communicator_in);
/**
* Constructor with one index set argument. This constructor creates a
* constructor with two index sets.
*/
Partitioner(const IndexSet &locally_owned_indices,
- const MPI_Comm &communicator_in);
+ const MPI_Comm communicator_in);
/**
* Reinitialize the communication pattern. The first argument
virtual void
reinit(const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator) override;
+ const MPI_Comm communicator) override;
/**
* Set the locally owned indices. Used in the constructor.
* number of cores
* in the @p mpi_communicator.
*/
- ProcessGrid(const MPI_Comm & mpi_communicator,
+ ProcessGrid(const MPI_Comm mpi_communicator,
const unsigned int n_rows,
const unsigned int n_columns);
* and the @p mpi_communicator with 11 cores will result in the $3x3$
* process grid.
*/
- ProcessGrid(const MPI_Comm & mpi_communicator,
+ ProcessGrid(const MPI_Comm mpi_communicator,
const unsigned int n_rows_matrix,
const unsigned int n_columns_matrix,
const unsigned int row_block_size,
* A private constructor which takes grid dimensions as an
* <code>std::pair</code>.
*/
- ProcessGrid(const MPI_Comm & mpi_communicator,
+ ProcessGrid(const MPI_Comm mpi_communicator,
const std::pair<unsigned int, unsigned int> &grid_dimensions);
/**
* the destructor is called.
*/
void
- replicate_across_communicator(const MPI_Comm & communicator,
+ replicate_across_communicator(const MPI_Comm communicator,
const unsigned int root_process);
/**
template <int N, typename T>
inline void
-TableBase<N, T>::replicate_across_communicator(const MPI_Comm & communicator,
+TableBase<N, T>::replicate_across_communicator(const MPI_Comm communicator,
const unsigned int root_process)
{
// Replicate first the actual data, then also exchange the
* communicator occurs; the extra cost of the synchronization is not
* measured.
*/
- Timer(const MPI_Comm &mpi_communicator, const bool sync_lap_times = false);
+ Timer(const MPI_Comm mpi_communicator, const bool sync_lap_times = false);
/**
* Return a reference to the data structure containing basic statistics on
* <code>MPI_Barrier</code> call before starting and stopping the timer for
* each section.
*/
- TimerOutput(const MPI_Comm & mpi_comm,
+ TimerOutput(const MPI_Comm mpi_comm,
std::ostream & stream,
const OutputFrequency output_frequency,
const OutputType output_type);
* <code>MPI_Barrier</code> call before starting and stopping the timer for
* each section.)
*/
- TimerOutput(const MPI_Comm & mpi_comm,
+ TimerOutput(const MPI_Comm mpi_comm,
ConditionalOStream & stream,
const OutputFrequency output_frequency,
const OutputType output_type);
* median is given).
*/
void
- print_wall_time_statistics(const MPI_Comm &mpi_comm,
- const double print_quantile = 0.) const;
+ print_wall_time_statistics(const MPI_Comm mpi_comm,
+ const double print_quantile = 0.) const;
/**
* By calling this function, all output can be disabled. This function
* @param mpi_communicator The MPI communicator to be used for the
* triangulation.
*/
- explicit Triangulation(const MPI_Comm &mpi_communicator);
+ explicit Triangulation(const MPI_Comm mpi_communicator);
/**
* Destructor.
std::pair<number, number>
compute_global_min_and_max_at_root(
const dealii::Vector<number> &criteria,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
namespace RefineAndCoarsenFixedNumber
{
compute_threshold(const dealii::Vector<number> & criteria,
const std::pair<double, double> &global_min_and_max,
const types::global_cell_index n_target_cells,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
} // namespace RefineAndCoarsenFixedNumber
namespace RefineAndCoarsenFixedFraction
compute_threshold(const dealii::Vector<number> & criteria,
const std::pair<double, double> &global_min_and_max,
const double target_error,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
} // namespace RefineAndCoarsenFixedFraction
} // namespace GridRefinement
} // namespace distributed
* consider enabling artificial cells.
*/
Triangulation(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing =
(dealii::Triangulation<dim, spacedim>::none),
const bool allow_artificial_cells = false,
* triangulation is partitioned.
*/
explicit Triangulation(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
const Settings settings = default_setting);
* the triangulation.
*/
Triangulation(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<1, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<1, spacedim>::none),
const Settings settings = default_setting);
* constructed (see also the class documentation).
*/
explicit Triangulation(
- const MPI_Comm & /*mpi_communicator*/,
+ const MPI_Comm /*mpi_communicator*/,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
/*smooth_grid*/
= (dealii::Triangulation<dim, spacedim>::none),
* Constructor.
*/
TriangulationBase(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
const bool check_for_distorted_cells = false);
* Constructor.
*/
DistributedTriangulationBase(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
const bool check_for_distorted_cells = false);
class DataTransfer
{
public:
- DataTransfer(const MPI_Comm &mpi_communicator);
+ DataTransfer(const MPI_Comm mpi_communicator);
/**
* Prepare data transfer by calling the pack callback functions on each
*/
std::vector<types::global_dof_index>
get_n_locally_owned_dofs_per_processor(
- const MPI_Comm &mpi_communicator) const;
+ const MPI_Comm mpi_communicator) const;
/**
* Return a representation of @p locally_owned_dofs_per_processor both
*/
std::vector<IndexSet>
get_locally_owned_dofs_per_processor(
- const MPI_Comm &mpi_communicator) const;
+ const MPI_Comm mpi_communicator) const;
/**
* Total number of dofs, accumulated over all processors that may
{
if (u1.n_blocks() == 0)
return;
- const MPI_Comm &mpi_communicator = u1.block(0).get_mpi_communicator();
+ const MPI_Comm mpi_communicator = u1.block(0).get_mpi_communicator();
const IndexSet &dof2_locally_owned_dofs = dof2.locally_owned_dofs();
IndexSet dof2_locally_relevant_dofs;
DoFTools::extract_locally_relevant_dofs(dof2, dof2_locally_relevant_dofs);
std::vector<std::vector<BoundingBox<spacedim>>>
exchange_local_bounding_boxes(
const std::vector<BoundingBox<spacedim>> &local_bboxes,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
/**
* In this collective operation each process provides a vector
RTree<std::pair<BoundingBox<spacedim>, unsigned int>>
build_global_description_tree(
const std::vector<BoundingBox<spacedim>> &local_description,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
/**
* Collect for a given triangulation all locally relevant vertices that
Description<dim, spacedim>
create_description_from_triangulation(
const dealii::Triangulation<dim, spacedim> &tria,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const TriangulationDescription::Settings settings =
TriangulationDescription::Settings::default_setting,
const unsigned int my_rank_in = numbers::invalid_unsigned_int);
const std::function<void(dealii::Triangulation<dim, spacedim> &)>
& serial_grid_generator,
const std::function<void(dealii::Triangulation<dim, spacedim> &,
- const MPI_Comm &,
+ const MPI_Comm,
const unsigned int)> &serial_grid_partitioner,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const int group_size = 1,
const typename Triangulation<dim, spacedim>::MeshSmoothing smoothing =
dealii::Triangulation<dim, spacedim>::none,
* AffineConstraints was created for the DG case.
*/
bool
- is_closed(const MPI_Comm &comm) const;
+ is_closed(const MPI_Comm comm) const;
/**
* Merge the constraints represented by the object given as argument into
bool
is_consistent_in_parallel(const std::vector<IndexSet> &locally_owned_dofs,
const IndexSet & locally_active_dofs,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const bool verbose = false) const;
/**
AffineConstraints<number>::is_consistent_in_parallel(
const std::vector<IndexSet> &locally_owned_dofs,
const IndexSet & locally_active_dofs,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const bool verbose) const
{
// Helper to return a ConstraintLine object that belongs to row @p row.
template <typename number>
bool
-AffineConstraints<number>::is_closed(const MPI_Comm &comm) const
+AffineConstraints<number>::is_closed(const MPI_Comm comm) const
{
return Utilities::MPI::min(static_cast<unsigned int>(is_closed()), comm) == 1;
}
* to be saved in each block.
*/
BlockSparsityPattern(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Initialize the pattern with two arrays of index sets that specify rows
const std::vector<IndexSet> &row_parallel_partitioning,
const std::vector<IndexSet> &column_parallel_partitioning,
const std::vector<IndexSet> &writeable_rows,
- const MPI_Comm & communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Resize the matrix to a tensor product of matrices with dimensions
*/
void
reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Resize the matrix to a rectangular block matrices. This method allows
void
reinit(const std::vector<IndexSet> &row_parallel_partitioning,
const std::vector<IndexSet> &column_parallel_partitioning,
- const MPI_Comm & communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Resize the matrix to a rectangular block matrices that furthermore
reinit(const std::vector<IndexSet> &row_parallel_partitioning,
const std::vector<IndexSet> &column_parallel_partitioning,
const std::vector<IndexSet> &writeable_rows,
- const MPI_Comm & communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Allow the use of the reinit functions of the base class as well.
*/
BlockVector(const std::vector<IndexSet> &local_ranges,
const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Same as above but the ghost indices are assumed to be empty.
*/
BlockVector(const std::vector<IndexSet> &local_ranges,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Destructor.
void
reinit(const std::vector<IndexSet> &local_ranges,
const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Same as above, but without ghost entries.
*/
void
reinit(const std::vector<IndexSet> &local_ranges,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* This function copies the data that has accumulated in the data buffer
template <typename Number>
BlockVector<Number>::BlockVector(const std::vector<IndexSet> &local_ranges,
const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
reinit(local_ranges, ghost_indices, communicator);
}
template <typename Number>
BlockVector<Number>::BlockVector(const std::vector<IndexSet> &local_ranges,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
reinit(local_ranges, communicator);
}
void
BlockVector<Number>::reinit(const std::vector<IndexSet> &local_ranges,
const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
AssertDimension(local_ranges.size(), ghost_indices.size());
template <typename Number>
void
BlockVector<Number>::reinit(const std::vector<IndexSet> &local_ranges,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
// update the number of blocks
this->block_indices.reinit(local_ranges.size(), 0);
*/
Vector(const IndexSet &local_range,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Same constructor as above but without any ghost indices.
*/
- Vector(const IndexSet &local_range, const MPI_Comm &communicator);
+ Vector(const IndexSet &local_range, const MPI_Comm communicator);
/**
* Create the vector based on the parallel partitioning described in @p
void
reinit(const IndexSet &local_range,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Same as above, but without ghost entries.
*/
void
- reinit(const IndexSet &local_range, const MPI_Comm &communicator);
+ reinit(const IndexSet &local_range, const MPI_Comm communicator);
/**
* Initialize the vector given to the parallel partitioning described in
void
reinit(
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
- const MPI_Comm &comm_sm = MPI_COMM_SELF);
+ const MPI_Comm comm_sm = MPI_COMM_SELF);
/**
* Initialize vector with @p local_size locally-owned and @p ghost_size
void
reinit(const types::global_dof_index local_size,
const types::global_dof_index ghost_size,
- const MPI_Comm & comm,
- const MPI_Comm & comm_sm = MPI_COMM_SELF);
+ const MPI_Comm comm,
+ const MPI_Comm comm_sm = MPI_COMM_SELF);
/**
* Swap the contents of this vector and the other vector @p v. One could
*/
void
resize_val(const size_type new_allocated_size,
- const MPI_Comm &comm_sm = MPI_COMM_SELF);
+ const MPI_Comm comm_sm = MPI_COMM_SELF);
// Make all other vector types friends.
template <typename Number2, typename MemorySpace2>
types::global_dof_index & /*allocated_size*/,
::dealii::MemorySpace::MemorySpaceData<Number, MemorySpaceType>
& /*data*/,
- const MPI_Comm & /*comm_sm*/)
+ const MPI_Comm /*comm_sm*/)
{}
static void
types::global_dof_index & allocated_size,
::dealii::MemorySpace::
MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data,
- const MPI_Comm &comm_shared)
+ const MPI_Comm comm_shared)
{
if (comm_shared == MPI_COMM_SELF)
{
types::global_dof_index & allocated_size,
::dealii::MemorySpace::MemorySpaceData<Number,
::dealii::MemorySpace::Default>
- & data,
- const MPI_Comm &comm_sm)
+ & data,
+ const MPI_Comm comm_sm)
{
(void)comm_sm;
template <typename Number, typename MemorySpaceType>
void
Vector<Number, MemorySpaceType>::resize_val(const size_type new_alloc_size,
- const MPI_Comm &comm_sm)
+ const MPI_Comm comm_sm)
{
internal::la_parallel_vector_templates_functions<
Number,
Vector<Number, MemorySpaceType>::reinit(
const types::global_dof_index local_size,
const types::global_dof_index ghost_size,
- const MPI_Comm & comm,
- const MPI_Comm & comm_sm)
+ const MPI_Comm comm,
+ const MPI_Comm comm_sm)
{
clear_mpi_requests();
Vector<Number, MemorySpaceType>::reinit(
const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
// set up parallel partitioner with index sets and communicator
reinit(std::make_shared<Utilities::MPI::Partitioner>(
void
Vector<Number, MemorySpaceType>::reinit(
const IndexSet &locally_owned_indices,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
// set up parallel partitioner with index sets and communicator
reinit(
void
Vector<Number, MemorySpaceType>::reinit(
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_in,
- const MPI_Comm & comm_sm)
+ const MPI_Comm comm_sm)
{
clear_mpi_requests();
template <typename Number, typename MemorySpaceType>
Vector<Number, MemorySpaceType>::Vector(const IndexSet &local_range,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
: allocated_size(0)
, vector_is_ghosted(false)
, comm_sm(MPI_COMM_SELF)
template <typename Number, typename MemorySpaceType>
Vector<Number, MemorySpaceType>::Vector(const IndexSet &local_range,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
: allocated_size(0)
, vector_is_ghosted(false)
, comm_sm(MPI_COMM_SELF)
* Constructor.
*/
PArpackSolver(SolverControl & control,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
/**
template <typename VectorType>
PArpackSolver<VectorType>::PArpackSolver(SolverControl & control,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: solver_control(control)
, additional_data(data)
reinit(const std::vector<IndexSet> & rows,
const std::vector<IndexSet> & cols,
const BlockDynamicSparsityPattern &bdsp,
- const MPI_Comm & com);
+ const MPI_Comm com);
/**
void
reinit(const std::vector<IndexSet> & sizes,
const BlockDynamicSparsityPattern &bdsp,
- const MPI_Comm & com);
+ const MPI_Comm com);
/**
* present process.
*/
explicit BlockVector(const unsigned int n_blocks,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const size_type block_size,
const size_type locally_owned_size);
* process.
*/
BlockVector(const std::vector<size_type> &block_sizes,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &local_elements);
/**
* initialized with the given IndexSet.
*/
explicit BlockVector(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Same as above, but include ghost elements
*/
BlockVector(const std::vector<IndexSet> ¶llel_partitioning,
const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Create a BlockVector with a PETSc Vec
*/
void
reinit(const unsigned int n_blocks,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const size_type block_size,
const size_type locally_owned_size,
const bool omit_zeroing_entries = false);
*/
void
reinit(const std::vector<size_type> &block_sizes,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &locally_owned_sizes,
const bool omit_zeroing_entries = false);
*/
void
reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Same as above but include ghost entries.
void
reinit(const std::vector<IndexSet> ¶llel_partitioning,
const std::vector<IndexSet> &ghost_entries,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* This function collects the sizes of the sub-objects and stores them
inline BlockVector::BlockVector(const unsigned int n_blocks,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const size_type block_size,
const size_type locally_owned_size)
: BlockVector()
inline BlockVector::BlockVector(
const std::vector<size_type> &block_sizes,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &local_elements)
: BlockVector()
{
inline BlockVector::BlockVector(
const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
: BlockVector()
{
reinit(parallel_partitioning, communicator);
inline BlockVector::BlockVector(
const std::vector<IndexSet> ¶llel_partitioning,
const std::vector<IndexSet> &ghost_indices,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
: BlockVector()
{
reinit(parallel_partitioning, ghost_indices, communicator);
inline void
BlockVector::reinit(const unsigned int n_blocks,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const size_type block_size,
const size_type locally_owned_size,
const bool omit_zeroing_entries)
inline void
BlockVector::reinit(const std::vector<size_type> &block_sizes,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &locally_owned_sizes,
const bool omit_zeroing_entries)
{
inline void
BlockVector::reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
// update the number of blocks
this->block_indices.reinit(parallel_partitioning.size(), 0);
inline void
BlockVector::reinit(const std::vector<IndexSet> ¶llel_partitioning,
const std::vector<IndexSet> &ghost_entries,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
AssertDimension(parallel_partitioning.size(), ghost_entries.size());
virtual void
reinit(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator) override;
+ const MPI_Comm communicator) override;
/**
* Reinitialize the communication pattern. The argument @p indices_locally_owned
void
reinit(const std::vector<types::global_dof_index> &indices_locally_owned,
const std::vector<types::global_dof_index> &indices_want,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Reinitialization that takes the number of locally-owned degrees of
void
reinit(const types::global_dof_index local_size,
const IndexSet & ghost_indices,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Fill the vector @p ghost_array according to the precomputed communication
const std::vector<PetscInt> &inloc,
const std::vector<PetscInt> &outidx,
const std::vector<PetscInt> &outloc,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
};
/**
virtual void
reinit(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator) override;
+ const MPI_Comm communicator) override;
/**
* Reinitialize the partitioner. As for the Utilities::MPI::Partitioner,
reinit(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
const IndexSet &larger_ghost_indices,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Return the actual number of ghost indices.
* any estimation of non_zero entries and has no option
* <tt>is_symmetric</tt>.
*/
- MatrixFree(const MPI_Comm & communicator,
+ MatrixFree(const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const unsigned int local_rows,
* any estimation of non_zero entries and has no option
* <tt>is_symmetric</tt>.
*/
- MatrixFree(const MPI_Comm & communicator,
+ MatrixFree(const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const std::vector<unsigned int> &local_rows_per_process,
* the same argument list as the present function.
*/
void
- reinit(const MPI_Comm & communicator,
+ reinit(const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const unsigned int local_rows,
* the same argument list as the present function.
*/
void
- reinit(const MPI_Comm & communicator,
+ reinit(const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const std::vector<unsigned int> &local_rows_per_process,
* previous matrix is left to the caller.
*/
void
- do_reinit(const MPI_Comm & comm,
+ do_reinit(const MPI_Comm comm,
const unsigned int m,
const unsigned int n,
const unsigned int local_rows,
/**
* Constructor.
*/
- explicit PreconditionBase(const MPI_Comm &mpi_communicator);
+ explicit PreconditionBase(const MPI_Comm mpi_communicator);
/**
* Constructor.
* Internal function to create the PETSc preconditioner object.
*/
void
- create_pc_with_comm(const MPI_Comm &);
+ create_pc_with_comm(const MPI_Comm);
};
* Intended to be used with SLEPc objects.
*/
PreconditionJacobi(
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const AdditionalData &additional_data = AdditionalData());
/**
* Intended to be used with SLEPc objects.
*/
PreconditionBlockJacobi(
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const AdditionalData &additional_data = AdditionalData());
* Intended to be used with SLEPc objects.
*/
PreconditionBoomerAMG(
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const AdditionalData &additional_data = AdditionalData());
/**
* Same as above but without setting a matrix to form the preconditioner.
*/
- PreconditionShell(const MPI_Comm &communicator);
+ PreconditionShell(const MPI_Comm communicator);
/**
* The callback for the application of the preconditioner.
* matrix. This function sets up the PCSHELL preconditioner
*/
void
- initialize(const MPI_Comm &comm);
+ initialize(const MPI_Comm comm);
/**
* Initialize the preconditioner object with a particular
* Constructor.
*/
NonlinearSolver(const NonlinearSolverData &data = NonlinearSolverData(),
- const MPI_Comm & mpi_comm = PETSC_COMM_WORLD);
+ const MPI_Comm mpi_comm = PETSC_COMM_WORLD);
/**
* Destructor.
std::constructible_from<VectorType, Vec>))
NonlinearSolver<VectorType, PMatrixType, AMatrixType>::NonlinearSolver(
const NonlinearSolverData &data,
- const MPI_Comm & mpi_comm)
+ const MPI_Comm mpi_comm)
{
AssertPETSc(SNESCreate(mpi_comm, &snes));
AssertPETSc(SNESSetApplicationContext(snes, this));
* Utility to create the KSP object and attach convergence test.
*/
void
- initialize_ksp_with_comm(const MPI_Comm &comm);
+ initialize_ksp_with_comm(const MPI_Comm comm);
/**
* %Function that takes a Krylov Subspace Solver context object, and sets
*/
DEAL_II_DEPRECATED_EARLY
SolverRichardson(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverChebychev(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverCG(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverBiCG(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverGMRES(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverBicgstab(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverCGS(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverTFQMR(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverTCQMR(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverCR(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverLSQR(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SolverPreOnly(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
*/
DEAL_II_DEPRECATED_EARLY
SparseDirectMUMPS(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
/**
* efficient to get memory allocation right from the start.
*/
template <typename SparsityPatternType>
- SparseMatrix(const MPI_Comm & communicator,
+ SparseMatrix(const MPI_Comm communicator,
const SparsityPatternType & sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
*/
template <typename SparsityPatternType>
void
- reinit(const MPI_Comm & communicator,
+ reinit(const MPI_Comm communicator,
const SparsityPatternType & sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
void
reinit(const IndexSet & local_partitioning,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Create a matrix where the size() of the IndexSets determine the
reinit(const IndexSet & local_rows,
const IndexSet & local_columns,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* Initialize this matrix to have the same structure as @p other. This
const IndexSet & local_columns,
const IndexSet & local_active_columns,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator);
+ const MPI_Comm communicator);
/**
* @addtogroup Exceptions
*/
template <typename SparsityPatternType>
void
- do_reinit(const MPI_Comm & comm,
+ do_reinit(const MPI_Comm comm,
const SparsityPatternType & sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
*/
template <typename SparsityPatternType>
void
- do_reinit(const MPI_Comm & comm,
+ do_reinit(const MPI_Comm comm,
const IndexSet & local_rows,
const IndexSet & local_columns,
const SparsityPatternType &sparsity_pattern);
*/
template <typename SparsityPatternType>
void
- do_reinit(const MPI_Comm & comm,
+ do_reinit(const MPI_Comm comm,
const IndexSet & local_rows,
const IndexSet & local_active_rows,
const IndexSet & local_columns,
* Constructor.
*/
TimeStepper(const TimeStepperData &data = TimeStepperData(),
- const MPI_Comm & mpi_comm = PETSC_COMM_WORLD);
+ const MPI_Comm mpi_comm = PETSC_COMM_WORLD);
/**
* Destructor.
std::constructible_from<VectorType, Vec>))
TimeStepper<VectorType, PMatrixType, AMatrixType>::TimeStepper(
const TimeStepperData &data,
- const MPI_Comm & mpi_comm)
+ const MPI_Comm mpi_comm)
{
AssertPETSc(TSCreate(mpi_comm, &ts));
AssertPETSc(TSSetApplicationContext(ts, this));
* <tt>v=Vector@<number@>(0);</tt>, i.e. the vector is replaced by one
* of length zero.
*/
- explicit Vector(const MPI_Comm &communicator,
+ explicit Vector(const MPI_Comm communicator,
const size_type n,
const size_type locally_owned_size);
* different parts of the vector shall communicate
*/
template <typename Number>
- explicit Vector(const MPI_Comm & communicator,
+ explicit Vector(const MPI_Comm communicator,
const dealii::Vector<Number> &v,
const size_type locally_owned_size);
*/
Vector(const IndexSet &local,
const IndexSet &ghost,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Construct a new parallel PETSc vector without ghost elements from an
* not reordered by component (use a PETScWrappers::BlockVector
* otherwise).
*/
- explicit Vector(const IndexSet &local, const MPI_Comm &communicator);
+ explicit Vector(const IndexSet &local, const MPI_Comm communicator);
/**
* Copy constructor.
* Otherwise, the elements are left an unspecified state.
*/
void
- reinit(const MPI_Comm &communicator,
+ reinit(const MPI_Comm communicator,
const size_type N,
const size_type locally_owned_size,
const bool omit_zeroing_entries = false);
void
reinit(const IndexSet &local,
const IndexSet &ghost,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Reinit as a vector without ghost elements. See constructor with same
* @ref GlossGhostedVector "vectors with ghost elements"
*/
void
- reinit(const IndexSet &local, const MPI_Comm &communicator);
+ reinit(const IndexSet &local, const MPI_Comm communicator);
/**
* Initialize the vector given to the parallel partitioning described in
* locally.
*/
virtual void
- create_vector(const MPI_Comm &comm,
+ create_vector(const MPI_Comm comm,
const size_type n,
const size_type locally_owned_size);
* you need to call update_ghost_values() before accessing those.
*/
virtual void
- create_vector(const MPI_Comm &comm,
+ create_vector(const MPI_Comm comm,
const size_type n,
const size_type locally_owned_size,
const IndexSet &ghostnodes);
# ifndef DOXYGEN
template <typename number>
- Vector::Vector(const MPI_Comm & communicator,
+ Vector::Vector(const MPI_Comm communicator,
const dealii::Vector<number> &v,
const size_type locally_owned_size)
{
& tpetra_vector,
const IndexSet & locally_owned_elements,
VectorOperation::values operation,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern);
# endif
import(const Epetra_MultiVector &multivector,
const IndexSet & locally_owned_elements,
VectorOperation::values operation,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern);
#endif
*/
TpetraWrappers::CommunicationPattern
create_tpetra_comm_pattern(const IndexSet &source_index_set,
- const MPI_Comm &mpi_comm);
+ const MPI_Comm mpi_comm);
# endif
/**
*/
EpetraWrappers::CommunicationPattern
create_epetra_comm_pattern(const IndexSet &source_index_set,
- const MPI_Comm &mpi_comm);
+ const MPI_Comm mpi_comm);
#endif
/**
const Tpetra::Vector<Number, int, types::signed_global_dof_index> &vector,
const IndexSet & source_elements,
VectorOperation::values operation,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern)
{
const Epetra_MultiVector &multivector,
const IndexSet & source_elements,
VectorOperation::values operation,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern)
{
TpetraWrappers::CommunicationPattern
ReadWriteVector<Number>::create_tpetra_comm_pattern(
const IndexSet &source_index_set,
- const MPI_Comm &mpi_comm)
+ const MPI_Comm mpi_comm)
{
source_stored_elements = source_index_set;
TpetraWrappers::CommunicationPattern epetra_comm_pattern(
EpetraWrappers::CommunicationPattern
ReadWriteVector<Number>::create_epetra_comm_pattern(
const IndexSet &source_index_set,
- const MPI_Comm &mpi_comm)
+ const MPI_Comm mpi_comm)
{
source_stored_elements = source_index_set;
EpetraWrappers::CommunicationPattern epetra_comm_pattern(
* Constructor. Takes the MPI communicator over which parallel
* computations are to happen.
*/
- SolverBase(SolverControl &cn, const MPI_Comm &mpi_communicator);
+ SolverBase(SolverControl &cn, const MPI_Comm mpi_communicator);
/**
* Destructor.
* behavior as the PETScWrappers, but you can change that.
*/
SolverKrylovSchur(SolverControl & cn,
- const MPI_Comm & mpi_communicator = PETSC_COMM_SELF,
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF,
const AdditionalData &data = AdditionalData());
protected:
* behavior as the PETScWrappers, but you can change that.
*/
SolverArnoldi(SolverControl & cn,
- const MPI_Comm & mpi_communicator = PETSC_COMM_SELF,
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF,
const AdditionalData &data = AdditionalData());
protected:
* behavior as the PETScWrappers, but you can change that.
*/
SolverLanczos(SolverControl & cn,
- const MPI_Comm & mpi_communicator = PETSC_COMM_SELF,
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF,
const AdditionalData &data = AdditionalData());
protected:
* behavior as the PETScWrappers, but you can change that.
*/
SolverPower(SolverControl & cn,
- const MPI_Comm & mpi_communicator = PETSC_COMM_SELF,
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF,
const AdditionalData &data = AdditionalData());
protected:
* computations are parallelized. By default, this carries the same
* behavior as the PETScWrappers, but you can change that.
*/
- SolverGeneralizedDavidson(
- SolverControl & cn,
- const MPI_Comm & mpi_communicator = PETSC_COMM_SELF,
- const AdditionalData &data = AdditionalData());
+ SolverGeneralizedDavidson(SolverControl &cn,
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF,
+ const AdditionalData &data = AdditionalData());
protected:
/**
* computations are parallelized. By default, this carries the same
* behavior as the PETScWrappers, but you can change that.
*/
- SolverJacobiDavidson(SolverControl & cn,
- const MPI_Comm &mpi_communicator = PETSC_COMM_SELF,
- const AdditionalData &data = AdditionalData());
+ SolverJacobiDavidson(SolverControl &cn,
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF,
+ const AdditionalData &data = AdditionalData());
protected:
/**
* behavior as the PETScWrappers, but you can change that.
*/
SolverLAPACK(SolverControl & cn,
- const MPI_Comm & mpi_communicator = PETSC_COMM_SELF,
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF,
const AdditionalData &data = AdditionalData());
protected:
/**
* Constructor.
*/
- TransformationBase(const MPI_Comm &mpi_communicator);
+ TransformationBase(const MPI_Comm mpi_communicator);
public:
/**
/**
* Constructor.
*/
- TransformationShift(const MPI_Comm & mpi_communicator,
+ TransformationShift(const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
/**
* Constructor.
*/
- TransformationShiftInvert(const MPI_Comm & mpi_communicator,
+ TransformationShiftInvert(const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
* Constructor.
*/
TransformationSpectrumFolding(
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
/**
* Constructor.
*/
- TransformationCayley(const MPI_Comm & mpi_communicator,
+ TransformationCayley(const MPI_Comm mpi_communicator,
const AdditionalData &data = AdditionalData());
protected:
{
template <typename Number>
void
- sum(const SparseMatrix<Number> &, const MPI_Comm &, SparseMatrix<Number> &);
+ sum(const SparseMatrix<Number> &, const MPI_Comm, SparseMatrix<Number> &);
}
} // namespace Utilities
# endif
template <typename Number>
friend void
Utilities::MPI::sum(const SparseMatrix<Number> &,
- const MPI_Comm &,
+ const MPI_Comm,
SparseMatrix<Number> &);
#endif
};
{
template <typename T>
std::tuple<T, T>
- compute_prefix_sum(const T &value, const MPI_Comm &comm)
+ compute_prefix_sum(const T &value, const MPI_Comm comm)
{
# ifndef DEAL_II_WITH_MPI
(void)comm;
extract_remote_rows(const SparseMatrixType & system_matrix,
const SparsityPatternType &sparsity_pattern,
const IndexSet & locally_active_dofs,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
std::vector<unsigned int> dummy(locally_active_dofs.n_elements());
void
distribute_sparsity_pattern(DynamicSparsityPattern &dsp,
const IndexSet & locally_owned_rows,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet & locally_relevant_rows);
/**
distribute_sparsity_pattern(
DynamicSparsityPattern & dsp,
const std::vector<DynamicSparsityPattern::size_type> &rows_per_cpu,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet & myrange);
/**
void
distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp,
const IndexSet & locally_owned_rows,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet &locally_relevant_rows);
/**
void
distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp,
const std::vector<IndexSet> &owned_set_per_cpu,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet & myrange);
/**
void
gather_sparsity_pattern(DynamicSparsityPattern &dsp,
const IndexSet & locally_owned_rows,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet & locally_relevant_rows);
#endif
void
reinit(const std::vector<IndexSet> & input_maps,
const BlockSparsityPatternType &block_sparsity_pattern,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool exchange_data = false);
/**
reinit(
const std::vector<IndexSet> & parallel_partitioning,
const ::dealii::BlockSparseMatrix<double> &dealii_block_sparse_matrix,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const double drop_tolerance = 1e-13);
/**
*/
CommunicationPattern(const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Reinitialize the object.
virtual void
reinit(const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator) override;
+ const MPI_Comm communicator) override;
/**
* Return the underlying MPI communicator.
* need to generate a %parallel vector.
*/
explicit Vector(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Reinit functionality. This function destroys the old vector content
*/
void
reinit(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const bool omit_zeroing_entries = false);
/**
*/
void
create_epetra_comm_pattern(const IndexSet &source_index_set,
- const MPI_Comm &mpi_comm);
+ const MPI_Comm mpi_comm);
/**
* Pointer to the actual Epetra vector object.
* the MPI processes.
*/
explicit BlockVector(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Creates a BlockVector with ghost elements. See the respective
*/
BlockVector(const std::vector<IndexSet> ¶llel_partitioning,
const std::vector<IndexSet> &ghost_values,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool vector_writable = false);
/**
*/
void
reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool omit_zeroing_entries = false);
/**
void
reinit(const std::vector<IndexSet> &partitioning,
const std::vector<IndexSet> &ghost_values,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool vector_writable = false);
/*-------------------------- Inline functions ---------------------------*/
inline BlockVector::BlockVector(
const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
reinit(parallel_partitioning, communicator, false);
}
inline BlockVector::BlockVector(
const std::vector<IndexSet> ¶llel_partitioning,
const std::vector<IndexSet> &ghost_values,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool vector_writable)
{
reinit(parallel_partitioning,
* use (in the compress() step).
*/
SparseMatrix(const IndexSet & parallel_partitioning,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const unsigned int n_max_entries_per_row = 0);
/**
* by the respective SparseMatrix::reinit call considerably faster.
*/
SparseMatrix(const IndexSet & parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<unsigned int> &n_entries_per_row);
/**
*/
SparseMatrix(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const size_type n_max_entries_per_row = 0);
/**
*/
SparseMatrix(const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<unsigned int> &n_entries_per_row);
/**
void
reinit(const IndexSet & parallel_partitioning,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool exchange_data = false);
/**
reinit(const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool exchange_data = false);
/**
void
reinit(const IndexSet & parallel_partitioning,
const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const double drop_tolerance = 1e-13,
const bool copy_values = true,
const ::dealii::SparsityPattern * use_this_sparsity = nullptr);
reinit(const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const double drop_tolerance = 1e-13,
const bool copy_values = true,
const ::dealii::SparsityPattern * use_this_sparsity = nullptr);
TrilinosPayload(EpetraOpType & op,
const bool supports_inverse_operations,
const bool use_transpose,
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const IndexSet &locally_owned_domain_indices,
const IndexSet &locally_owned_range_indices);
inline void
SparseMatrix::reinit(const IndexSet & parallel_partitioning,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool exchange_data)
{
reinit(parallel_partitioning,
inline void
SparseMatrix::reinit(const IndexSet ¶llel_partitioning,
const ::dealii::SparseMatrix<number> &sparse_matrix,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const double drop_tolerance,
const bool copy_values,
const ::dealii::SparsityPattern * use_this_sparsity)
EpetraOpType & op,
const bool supports_inverse_operations,
const bool use_transpose,
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const IndexSet &locally_owned_domain_indices,
const IndexSet &locally_owned_range_indices)
: use_transpose(use_transpose)
* the performance when creating the sparsity pattern.
*/
SparsityPattern(const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const size_type n_entries_per_row = 0);
/**
* designed to describe.
*/
SparsityPattern(const IndexSet & parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &n_entries_per_row);
/**
*/
SparsityPattern(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const size_type n_entries_per_row = 0);
/**
*/
SparsityPattern(const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &n_entries_per_row);
/**
SparsityPattern(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
const IndexSet &writable_rows,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const size_type n_entries_per_row = 0);
/**
*/
void
reinit(const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const size_type n_entries_per_row = 0);
/**
*/
void
reinit(const IndexSet & parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &n_entries_per_row);
/**
void
reinit(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const size_type n_entries_per_row = 0);
/**
reinit(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
const IndexSet &writeable_rows,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const size_type n_entries_per_row = 0);
/**
void
reinit(const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &n_entries_per_row);
/**
reinit(const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
const SparsityPatternType &nontrilinos_sparsity_pattern,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool exchange_data = false);
/**
void
reinit(const IndexSet & parallel_partitioning,
const SparsityPatternType &nontrilinos_sparsity_pattern,
- const MPI_Comm & communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool exchange_data = false);
/** @} */
/**
*/
CommunicationPattern(const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Reinitialize the object.
virtual void
reinit(const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator) override;
+ const MPI_Comm communicator) override;
/**
* Return the underlying MPI communicator.
* need to generate a %parallel vector.
*/
explicit Vector(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator);
+ const MPI_Comm communicator);
/**
* Reinit functionality. This function destroys the old vector content
*/
void
reinit(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const bool omit_zeroing_entries = false);
/**
*/
void
create_tpetra_comm_pattern(const IndexSet &source_index_set,
- const MPI_Comm &mpi_comm);
+ const MPI_Comm mpi_comm);
/**
* Pointer to the actual Tpetra vector object.
template <typename Number>
Vector<Number>::Vector(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
: Subscriptor()
, vector(new Tpetra::Vector<Number, int, types::signed_global_dof_index>(
Teuchos::rcp(new Tpetra::Map<int, types::signed_global_dof_index>(
template <typename Number>
void
Vector<Number>::reinit(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const bool omit_zeroing_entries)
{
Tpetra::Map<int, types::signed_global_dof_index> input_map =
template <typename Number>
void
Vector<Number>::create_tpetra_comm_pattern(const IndexSet &source_index_set,
- const MPI_Comm &mpi_comm)
+ const MPI_Comm mpi_comm)
{
source_stored_elements = source_index_set;
tpetra_comm_pattern =
* @ref GlossGhostedVector "vectors with ghost elements"
*/
explicit Vector(const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Creates a ghosted parallel vector.
*/
Vector(const IndexSet &local,
const IndexSet &ghost,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Copy constructor from the TrilinosWrappers vector class. Since a
*/
Vector(const IndexSet ¶llel_partitioning,
const Vector & v,
- const MPI_Comm &communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Copy-constructor from deal.II vectors. Sets the dimension to that of
template <typename Number>
Vector(const IndexSet & parallel_partitioning,
const dealii::Vector<Number> &v,
- const MPI_Comm & communicator = MPI_COMM_WORLD);
+ const MPI_Comm communicator = MPI_COMM_WORLD);
/**
* Move constructor. Creates a new vector by stealing the internal data
*/
void
reinit(const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool omit_zeroing_entries = false);
/**
void
reinit(const IndexSet &locally_owned_entries,
const IndexSet &locally_relevant_or_ghost_entries,
- const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const MPI_Comm communicator = MPI_COMM_WORLD,
const bool vector_writable = false);
/**
template <typename number>
Vector::Vector(const IndexSet & parallel_partitioner,
const dealii::Vector<number> &v,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
*this =
Vector(parallel_partitioner.make_trilinos_map(communicator, true), v);
*/
void
assign_ghosts(const std::vector<unsigned int> &boundary_cells,
- const MPI_Comm & communicator_sm,
+ const MPI_Comm communicator_sm,
const bool use_vector_data_exchanger_full);
/**
const std::vector<FaceToCellTopology<1>> &inner_faces,
const std::vector<FaceToCellTopology<1>> &ghosted_faces,
const bool fill_cell_centric,
- const MPI_Comm & communicator_sm,
+ const MPI_Comm communicator_sm,
const bool use_vector_data_exchanger_full);
/**
public:
Full(
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
- const MPI_Comm &communicator_sm);
+ const MPI_Comm communicator_sm);
unsigned int
locally_owned_size() const override;
* @param mpi_comm MPI Communicator over which logging operations are
* computed. Only used in SUNDIALS 6 and newer.
*/
- ARKode(const AdditionalData &data, const MPI_Comm &mpi_comm);
+ ARKode(const AdditionalData &data, const MPI_Comm mpi_comm);
/**
* Destructor.
* @param mpi_comm MPI Communicator over which logging operations are
* computed. Only used in SUNDIALS 6 and newer.
*/
- IDA(const AdditionalData &data, const MPI_Comm &mpi_comm);
+ IDA(const AdditionalData &data, const MPI_Comm mpi_comm);
/**
* Destructor.
* @param mpi_comm MPI Communicator over which logging operations are
* computed. Only used in SUNDIALS 6 and newer.
*/
- KINSOL(const AdditionalData &data, const MPI_Comm &mpi_comm);
+ KINSOL(const AdditionalData &data, const MPI_Comm mpi_comm);
/**
* Destructor.
& nonscalar_data_ranges,
const Deal_II_IntermediateFlags &flags,
const std::string & filename,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const CompressionLevel compression)
{
#ifndef DEAL_II_WITH_MPI
void
DataOutInterface<dim, spacedim>::write_vtu_in_parallel(
const std::string &filename,
- const MPI_Comm & comm) const
+ const MPI_Comm comm) const
{
#ifndef DEAL_II_WITH_MPI
// without MPI fall back to the normal way to write a vtu file:
const std::string &directory,
const std::string &filename_without_extension,
const unsigned int counter,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const unsigned int n_digits_for_counter,
const unsigned int n_groups) const
{
void
DataOutInterface<dim, spacedim>::write_deal_II_intermediate_in_parallel(
const std::string & filename,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const DataOutBase::CompressionLevel compression) const
{
DataOutBase::write_deal_II_intermediate_in_parallel(
const DataOutBase::DataOutFilter &data_filter,
const std::string & h5_filename,
const double cur_time,
- const MPI_Comm & comm) const
+ const MPI_Comm comm) const
{
return create_xdmf_entry(
data_filter, h5_filename, h5_filename, cur_time, comm);
const std::string & h5_mesh_filename,
const std::string & h5_solution_filename,
const double cur_time,
- const MPI_Comm & comm) const
+ const MPI_Comm comm) const
{
AssertThrow(spacedim == 2 || spacedim == 3,
ExcMessage("XDMF only supports 2 or 3 space dimensions."));
DataOutInterface<dim, spacedim>::write_xdmf_file(
const std::vector<XDMFEntry> &entries,
const std::string & filename,
- const MPI_Comm & comm) const
+ const MPI_Comm comm) const
{
#ifdef DEAL_II_WITH_MPI
const int myrank = Utilities::MPI::this_mpi_process(comm);
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string & solution_filename,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
hid_t h5_mesh_file_id = -1, h5_solution_file_id, file_plist_id, plist_id;
hid_t node_dataspace, node_dataset, node_file_dataspace,
DataOutInterface<dim, spacedim>::write_hdf5_parallel(
const DataOutBase::DataOutFilter &data_filter,
const std::string & filename,
- const MPI_Comm & comm) const
+ const MPI_Comm comm) const
{
DataOutBase::write_hdf5_parallel(
get_patches(), data_filter, hdf5_flags, filename, comm);
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string & solution_filename,
- const MPI_Comm & comm) const
+ const MPI_Comm comm) const
{
DataOutBase::write_hdf5_parallel(get_patches(),
data_filter,
const DataOutBase::DataOutFilter & data_filter,
const DataOutBase::Hdf5Flags & flags,
const std::string & filename,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
write_hdf5_parallel(
patches, data_filter, flags, true, filename, filename, comm);
const bool write_mesh_file,
const std::string & mesh_filename,
const std::string & solution_filename,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
AssertThrow(
spacedim >= 2,
& nonscalar_data_ranges,
const Deal_II_IntermediateFlags &flags,
const std::string & filename,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const CompressionLevel compression);
template void
const DataOutFilter & data_filter,
const DataOutBase::Hdf5Flags &flags,
const std::string & filename,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
template void
write_filtered_data(
File::File(const std::string & name,
const FileAccessMode mode,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
: File(name, mode, true, mpi_communicator)
{}
File::File(const std::string & name,
const FileAccessMode mode,
const bool mpi,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
: Group(name, mpi)
{
hdf5_reference = std::shared_ptr<hid_t>(new hid_t, [](hid_t *pointer) {
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
Tpetra::Map<int, types::signed_global_dof_index>
-IndexSet::make_tpetra_map(const MPI_Comm &communicator,
- const bool overlapping) const
+IndexSet::make_tpetra_map(const MPI_Comm communicator,
+ const bool overlapping) const
{
compress();
(void)communicator;
Epetra_Map
-IndexSet::make_trilinos_map(const MPI_Comm &communicator,
- const bool overlapping) const
+IndexSet::make_trilinos_map(const MPI_Comm communicator,
+ const bool overlapping) const
{
compress();
(void)communicator;
#ifdef DEAL_II_WITH_PETSC
IS
-IndexSet::make_petsc_is(const MPI_Comm &communicator) const
+IndexSet::make_petsc_is(const MPI_Comm communicator) const
{
std::vector<size_type> indices;
fill_index_vector(indices);
bool
-IndexSet::is_ascending_and_one_to_one(const MPI_Comm &communicator) const
+IndexSet::is_ascending_and_one_to_one(const MPI_Comm communicator) const
{
// If the sum of local elements does not add up to the total size,
// the IndexSet can't be complete.
MinMaxAvg
- min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
+ min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
{
MinMaxAvg result;
min_max_avg(ArrayView<const double>(my_value),
std::vector<MinMaxAvg>
min_max_avg(const std::vector<double> &my_values,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
std::vector<MinMaxAvg> results(my_values.size());
min_max_avg(my_values, results, mpi_communicator);
#ifdef DEAL_II_WITH_MPI
unsigned int
- n_mpi_processes(const MPI_Comm &mpi_communicator)
+ n_mpi_processes(const MPI_Comm mpi_communicator)
{
int n_jobs = 1;
const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
unsigned int
- this_mpi_process(const MPI_Comm &mpi_communicator)
+ this_mpi_process(const MPI_Comm mpi_communicator)
{
int rank = 0;
const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
const std::vector<unsigned int>
- mpi_processes_within_communicator(const MPI_Comm &comm_large,
- const MPI_Comm &comm_small)
+ mpi_processes_within_communicator(const MPI_Comm comm_large,
+ const MPI_Comm comm_small)
{
if (Utilities::MPI::job_supports_mpi() == false)
return std::vector<unsigned int>{0};
MPI_Comm
- duplicate_communicator(const MPI_Comm &mpi_communicator)
+ duplicate_communicator(const MPI_Comm mpi_communicator)
{
MPI_Comm new_communicator;
const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
void
- free_communicator(MPI_Comm &mpi_communicator)
+ free_communicator(MPI_Comm mpi_communicator)
{
// MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
const int ierr = MPI_Comm_free(&mpi_communicator);
int
- create_group(const MPI_Comm & comm,
+ create_group(const MPI_Comm comm,
const MPI_Group &group,
const int tag,
MPI_Comm * new_comm)
std::vector<IndexSet>
create_ascending_partitioning(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const types::global_dof_index locally_owned_size)
{
static_assert(
IndexSet
create_evenly_distributed_partitioning(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const types::global_dof_index total_size)
{
const unsigned int this_proc = this_mpi_process(comm);
std::vector<unsigned int>
compute_point_to_point_communication_pattern(
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::vector<unsigned int> &destinations)
{
const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
unsigned int
compute_n_point_to_point_communications(
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::vector<unsigned int> &destinations)
{
// Have a little function that checks if destinations provided
void
min_max_avg(const ArrayView<const double> &my_values,
const ArrayView<MinMaxAvg> & result,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
// If MPI was not started, we have a serial computation and cannot run
// the other MPI commands
#else
unsigned int
- n_mpi_processes(const MPI_Comm &)
+ n_mpi_processes(const MPI_Comm)
{
return 1;
}
unsigned int
- this_mpi_process(const MPI_Comm &)
+ this_mpi_process(const MPI_Comm)
{
return 0;
}
const std::vector<unsigned int>
- mpi_processes_within_communicator(const MPI_Comm &, const MPI_Comm &)
+ mpi_processes_within_communicator(const MPI_Comm, const MPI_Comm)
{
return std::vector<unsigned int>{0};
}
std::vector<IndexSet>
create_ascending_partitioning(
- const MPI_Comm & /*comm*/,
+ const MPI_Comm /*comm*/,
const types::global_dof_index locally_owned_size)
{
return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
IndexSet
create_evenly_distributed_partitioning(
- const MPI_Comm & /*comm*/,
+ const MPI_Comm /*comm*/,
const types::global_dof_index total_size)
{
return complete_index_set(total_size);
MPI_Comm
- duplicate_communicator(const MPI_Comm &mpi_communicator)
+ duplicate_communicator(const MPI_Comm mpi_communicator)
{
return mpi_communicator;
}
- void
- free_communicator(MPI_Comm & /*mpi_communicator*/)
+ void free_communicator(MPI_Comm /*mpi_communicator*/)
{}
void
min_max_avg(const ArrayView<const double> &my_values,
const ArrayView<MinMaxAvg> & result,
- const MPI_Comm &)
+ const MPI_Comm)
{
AssertDimension(my_values.size(), result.size());
std::vector<unsigned int>
compute_index_owner(const IndexSet &owned_indices,
const IndexSet &indices_to_look_up,
- const MPI_Comm &comm)
+ const MPI_Comm comm)
{
Assert(owned_indices.size() == indices_to_look_up.size(),
ExcMessage("IndexSets have to have the same sizes."));
void
- CollectiveMutex::lock(const MPI_Comm &comm)
+ CollectiveMutex::lock(const MPI_Comm comm)
{
(void)comm;
void
- CollectiveMutex::unlock(const MPI_Comm &comm)
+ CollectiveMutex::unlock(const MPI_Comm comm)
{
(void)comm;
// booleans aren't in MPI_SCALARS
template bool
reduce(const bool &,
- const MPI_Comm &,
+ const MPI_Comm,
const std::function<bool(const bool &, const bool &)> &,
const unsigned int);
template std::vector<bool>
reduce(const std::vector<bool> &,
- const MPI_Comm &,
+ const MPI_Comm,
const std::function<std::vector<bool>(const std::vector<bool> &,
const std::vector<bool> &)> &,
const unsigned int);
template bool
all_reduce(const bool &,
- const MPI_Comm &,
+ const MPI_Comm,
const std::function<bool(const bool &, const bool &)> &);
template std::vector<bool>
all_reduce(
const std::vector<bool> &,
- const MPI_Comm &,
+ const MPI_Comm,
const std::function<std::vector<bool>(const std::vector<bool> &,
const std::vector<bool> &)> &);
template void
internal::all_reduce<bool>(const MPI_Op &,
const ArrayView<const bool> &,
- const MPI_Comm &,
+ const MPI_Comm,
const ArrayView<bool> &);
template bool
- logical_or<bool>(const bool &, const MPI_Comm &);
+ logical_or<bool>(const bool &, const MPI_Comm);
template void
logical_or<bool>(const ArrayView<const bool> &,
- const MPI_Comm &,
+ const MPI_Comm,
const ArrayView<bool> &);
template std::vector<unsigned int>
compute_set_union(const std::vector<unsigned int> &vec,
- const MPI_Comm & comm);
+ const MPI_Comm comm);
template std::set<unsigned int>
- compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
+ compute_set_union(const std::set<unsigned int> &set, const MPI_Comm comm);
#endif
#include "mpi.inst"
for (S : REAL_SCALARS)
{
template void sum<S>(const SparseMatrix<S> &,
- const MPI_Comm &,
+ const MPI_Comm,
SparseMatrix<S> &);
}
for (S : MPI_SCALARS)
{
template void sum<LAPACKFullMatrix<S>>(const LAPACKFullMatrix<S> &,
- const MPI_Comm &,
+ const MPI_Comm,
LAPACKFullMatrix<S> &);
template void sum<Vector<S>>(const Vector<S> &,
- const MPI_Comm &,
+ const MPI_Comm,
Vector<S> &);
template void sum<FullMatrix<S>>(const FullMatrix<S> &,
- const MPI_Comm &,
+ const MPI_Comm,
FullMatrix<S> &);
template void sum<S>(const ArrayView<const S> &,
- const MPI_Comm &,
+ const MPI_Comm,
const ArrayView<S> &);
- template S sum<S>(const S &, const MPI_Comm &);
+ template S sum<S>(const S &, const MPI_Comm);
template void sum<std::vector<S>>(const std::vector<S> &,
- const MPI_Comm &,
+ const MPI_Comm,
std::vector<S> &);
- template S max<S>(const S &, const MPI_Comm &);
+ template S max<S>(const S &, const MPI_Comm);
template void max<std::vector<S>>(const std::vector<S> &,
- const MPI_Comm &,
+ const MPI_Comm,
std::vector<S> &);
template void max<S>(const ArrayView<const S> &,
- const MPI_Comm &,
+ const MPI_Comm,
const ArrayView<S> &);
- template S min<S>(const S &, const MPI_Comm &);
+ template S min<S>(const S &, const MPI_Comm);
template void min<std::vector<S>>(const std::vector<S> &,
- const MPI_Comm &,
+ const MPI_Comm,
std::vector<S> &);
template void min<S>(const ArrayView<const S> &,
- const MPI_Comm &,
+ const MPI_Comm,
const ArrayView<S> &);
template S reduce(const S & vec,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::function<S(const S &, const S &)> &process,
const unsigned int root_process);
template std::vector<S> reduce(
const std::vector<S> & vec,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::function<std::vector<S>(const std::vector<S> &,
const std::vector<S> &)> &process,
const unsigned int root_process);
template S all_reduce(
const S & vec,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::function<S(const S &, const S &)> &process);
template std::vector<S> all_reduce(
const std::vector<S> & vec,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::function<std::vector<S>(const std::vector<S> &,
const std::vector<S> &)> &process);
template void Utilities::MPI::internal::all_reduce<S>(
const MPI_Op &,
const ArrayView<const S> &,
- const MPI_Comm &,
+ const MPI_Comm,
const ArrayView<S> &);
}
for (S : REAL_SCALARS; rank : RANKS; dim : SPACE_DIMENSIONS)
{
template Tensor<rank, dim, S> sum<rank, dim, S>(
- const Tensor<rank, dim, S> &, const MPI_Comm &);
+ const Tensor<rank, dim, S> &, const MPI_Comm);
}
for (S : REAL_SCALARS; dim : SPACE_DIMENSIONS)
{
template SymmetricTensor<2, dim, S> sum<2, dim, S>(
- const SymmetricTensor<2, dim, S> &, const MPI_Comm &);
+ const SymmetricTensor<2, dim, S> &, const MPI_Comm);
template SymmetricTensor<4, dim, S> sum<4, dim, S>(
- const SymmetricTensor<4, dim, S> &, const MPI_Comm &);
+ const SymmetricTensor<4, dim, S> &, const MPI_Comm);
}
void
- Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm &comm)
+ Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm comm)
{
// 1) set up the partition
this->partition(owned_indices, comm);
void
Dictionary::partition(const IndexSet &owned_indices,
- const MPI_Comm &comm)
+ const MPI_Comm comm)
{
#ifdef DEAL_II_WITH_MPI
const unsigned int n_procs = n_mpi_processes(comm);
ConsensusAlgorithmsPayload::ConsensusAlgorithmsPayload(
const IndexSet & owned_indices,
const IndexSet & indices_to_look_up,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
std::vector<unsigned int> &owning_ranks,
const bool track_index_requests)
: owned_indices(owned_indices)
NoncontiguousPartitioner::NoncontiguousPartitioner(
const IndexSet &indexset_has,
const IndexSet &indexset_want,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
this->reinit(indexset_has, indexset_want, communicator);
}
NoncontiguousPartitioner::NoncontiguousPartitioner(
const std::vector<types::global_dof_index> &indices_has,
const std::vector<types::global_dof_index> &indices_want,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
this->reinit(indices_has, indices_want, communicator);
}
void
NoncontiguousPartitioner::reinit(const IndexSet &indexset_has,
const IndexSet &indexset_want,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
this->communicator = communicator;
NoncontiguousPartitioner::reinit(
const std::vector<types::global_dof_index> &indices_has,
const std::vector<types::global_dof_index> &indices_want,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
// step 0) clean vectors from numbers::invalid_dof_index (indicating
// padding)
Partitioner::Partitioner(const types::global_dof_index local_size,
const types::global_dof_index ghost_size,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
: global_size(Utilities::MPI::sum<types::global_dof_index>(local_size,
communicator))
, locally_owned_range_data(global_size)
Partitioner::Partitioner(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices_in,
- const MPI_Comm &communicator_in)
+ const MPI_Comm communicator_in)
: global_size(
static_cast<types::global_dof_index>(locally_owned_indices.size()))
, n_ghost_indices_data(0)
Partitioner::Partitioner(const IndexSet &locally_owned_indices,
- const MPI_Comm &communicator_in)
+ const MPI_Comm communicator_in)
: global_size(
static_cast<types::global_dof_index>(locally_owned_indices.size()))
, n_ghost_indices_data(0)
void
Partitioner::reinit(const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator_in)
+ const MPI_Comm communicator_in)
{
have_ghost_indices = false;
communicator = communicator_in;
* https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91
*/
inline std::pair<int, int>
- compute_processor_grid_sizes(const MPI_Comm & mpi_comm,
+ compute_processor_grid_sizes(const MPI_Comm mpi_comm,
const unsigned int m,
const unsigned int n,
const unsigned int block_size_m,
namespace MPI
{
ProcessGrid::ProcessGrid(
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const std::pair<unsigned int, unsigned int> &grid_dimensions)
: mpi_communicator(mpi_comm)
, this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
- ProcessGrid::ProcessGrid(const MPI_Comm & mpi_comm,
+ ProcessGrid::ProcessGrid(const MPI_Comm mpi_comm,
const unsigned int n_rows_matrix,
const unsigned int n_columns_matrix,
const unsigned int row_block_size,
- ProcessGrid::ProcessGrid(const MPI_Comm & mpi_comm,
+ ProcessGrid::ProcessGrid(const MPI_Comm mpi_comm,
const unsigned int n_rows,
const unsigned int n_columns)
: ProcessGrid(mpi_comm, std::make_pair(n_rows, n_columns))
-Timer::Timer(const MPI_Comm &mpi_communicator, const bool sync_lap_times_)
+Timer::Timer(const MPI_Comm mpi_communicator, const bool sync_lap_times_)
: running(false)
, mpi_communicator(mpi_communicator)
, sync_lap_times(sync_lap_times_)
-TimerOutput::TimerOutput(const MPI_Comm & mpi_communicator,
+TimerOutput::TimerOutput(const MPI_Comm mpi_communicator,
std::ostream & stream,
const OutputFrequency output_frequency,
const OutputType output_type)
-TimerOutput::TimerOutput(const MPI_Comm & mpi_communicator,
+TimerOutput::TimerOutput(const MPI_Comm mpi_communicator,
ConditionalOStream & stream,
const OutputFrequency output_frequency,
const OutputType output_type)
void
-TimerOutput::print_wall_time_statistics(const MPI_Comm &mpi_comm,
- const double quantile) const
+TimerOutput::print_wall_time_statistics(const MPI_Comm mpi_comm,
+ const double quantile) const
{
// we are going to change the precision and width of output below. store the
// old values so the get restored when exiting this function
{
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
- Triangulation<dim, spacedim>::Triangulation(
- const MPI_Comm &mpi_communicator)
+ Triangulation<dim, spacedim>::Triangulation(const MPI_Comm mpi_communicator)
: parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
, settings(TriangulationDescription::Settings::default_setting)
, partitioner([](dealii::Triangulation<dim, spacedim> &tria,
template <typename number>
double
compute_global_sum(const dealii::Vector<number> &criteria,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
double my_sum =
std::accumulate(criteria.begin(),
std::pair<number, number>
compute_global_min_and_max_at_root(
const dealii::Vector<number> &criteria,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
// we'd like to compute the global max and min from the local ones in
// one MPI communication. we can do that by taking the elementwise
compute_threshold(const dealii::Vector<number> & criteria,
const std::pair<double, double> &global_min_and_max,
const types::global_cell_index n_target_cells,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
double interesting_range[2] = {global_min_and_max.first,
global_min_and_max.second};
compute_threshold(const dealii::Vector<number> & criteria,
const std::pair<double, double> &global_min_and_max,
const double target_error,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
double interesting_range[2] = {global_min_and_max.first,
global_min_and_max.second};
\{
template std::pair<S, S>
compute_global_min_and_max_at_root<S>(const dealii::Vector<S> &,
- const MPI_Comm &);
+ const MPI_Comm);
namespace RefineAndCoarsenFixedNumber
\{
compute_threshold<S>(const dealii::Vector<S> &,
const std::pair<double, double> &,
const types::global_cell_index,
- const MPI_Comm &);
+ const MPI_Comm);
\}
namespace RefineAndCoarsenFixedFraction
\{
compute_threshold<S>(const dealii::Vector<S> &,
const std::pair<double, double> &,
const double,
- const MPI_Comm &);
+ const MPI_Comm);
\}
\}
\}
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
Triangulation<dim, spacedim>::Triangulation(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const bool allow_artificial_cells,
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
Triangulation<dim, spacedim>::Triangulation(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const Settings settings)
template <int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<1, spacedim>))
Triangulation<1, spacedim>::Triangulation(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<1, spacedim>::MeshSmoothing
smooth_grid,
const Settings /*settings*/)
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
TriangulationBase<dim, spacedim>::TriangulationBase(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const bool check_for_distorted_cells)
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
DistributedTriangulationBase<dim, spacedim>::DistributedTriangulationBase(
- const MPI_Comm &mpi_communicator,
+ const MPI_Comm mpi_communicator,
const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
smooth_grid,
const bool check_for_distorted_cells)
template <int dim, int spacedim>
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
DistributedTriangulationBase<dim, spacedim>::DataTransfer::DataTransfer(
- const MPI_Comm &mpi_communicator)
+ const MPI_Comm mpi_communicator)
: variable_size_data_stored(false)
, mpi_communicator(mpi_communicator)
{}
std::vector<types::global_dof_index>
NumberCache::get_n_locally_owned_dofs_per_processor(
- const MPI_Comm &mpi_communicator) const
+ const MPI_Comm mpi_communicator) const
{
if (n_global_dofs == 0)
return std::vector<types::global_dof_index>();
std::vector<IndexSet>
NumberCache::get_locally_owned_dofs_per_processor(
- const MPI_Comm &mpi_communicator) const
+ const MPI_Comm mpi_communicator) const
{
AssertDimension(locally_owned_dofs.size(), n_global_dofs);
if (n_global_dofs == 0)
std::vector<unsigned int>,
std::vector<unsigned int>>
guess_owners_of_entities(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const std::vector<std::vector<BoundingBox<spacedim>>> &global_bboxes,
const std::vector<T> & entities,
const double tolerance)
std::vector<std::vector<BoundingBox<spacedim>>>
exchange_local_bounding_boxes(
const std::vector<BoundingBox<spacedim>> &local_bboxes,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
#ifndef DEAL_II_WITH_MPI
(void)local_bboxes;
RTree<std::pair<BoundingBox<spacedim>, unsigned int>>
build_global_description_tree(
const std::vector<BoundingBox<spacedim>> &local_description,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
{
#ifndef DEAL_II_WITH_MPI
(void)mpi_communicator;
template std::vector<std::vector<BoundingBox<deal_II_space_dimension>>>
GridTools::exchange_local_bounding_boxes(
const std::vector<BoundingBox<deal_II_space_dimension>> &,
- const MPI_Comm &);
+ const MPI_Comm);
template std::tuple<std::vector<std::vector<unsigned int>>,
std::map<unsigned int, unsigned int>,
std::pair<BoundingBox<deal_II_space_dimension>, unsigned int>>
GridTools::build_global_description_tree(
const std::vector<BoundingBox<deal_II_space_dimension>> &,
- const MPI_Comm &);
+ const MPI_Comm);
template Vector<double> GridTools::compute_aspect_ratio_of_cells(
const Mapping<deal_II_space_dimension> &,
collect(
const std::vector<unsigned int> & relevant_processes,
const std::vector<DescriptionTemp<dim, spacedim>> &description_temp,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const bool vertices_have_unique_ids)
{
const auto create_request = [&](const unsigned int other_rank) {
const std::function<types::subdomain_id(
const typename dealii::Triangulation<dim, spacedim>::cell_iterator
&)> & level_subdomain_id_function,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const TriangulationDescription::Settings settings)
: tria(tria)
, subdomain_id_function(subdomain_id_function)
const typename dealii::Triangulation<dim, spacedim>::cell_iterator &)>
level_subdomain_id_function;
- const MPI_Comm & comm;
+ const MPI_Comm comm;
const TriangulationDescription::Settings settings;
const bool construct_multigrid;
Description<dim, spacedim>
create_description_from_triangulation(
const dealii::Triangulation<dim, spacedim> &tria,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const TriangulationDescription::Settings settings,
const unsigned int my_rank_in)
{
const std::function<void(dealii::Triangulation<dim, spacedim> &)>
& serial_grid_generator,
const std::function<void(dealii::Triangulation<dim, spacedim> &,
- const MPI_Comm &,
+ const MPI_Comm,
const unsigned int)> &serial_grid_partitioner,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const int group_size,
const typename Triangulation<dim, spacedim>::MeshSmoothing smoothing,
const TriangulationDescription::Settings settings)
create_description_from_triangulation(
const dealii::Triangulation<deal_II_dimension,
deal_II_space_dimension> &tria,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const TriangulationDescription::Settings settings,
const unsigned int my_rank_in);
&)> & serial_grid_generator,
const std::function<void(
dealii::Triangulation<deal_II_dimension, deal_II_space_dimension> &,
- const MPI_Comm &,
+ const MPI_Comm,
const unsigned int)> &serial_grid_partitioner,
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const int group_size,
const typename Triangulation<deal_II_dimension,
deal_II_space_dimension>::MeshSmoothing
BlockSparsityPattern::BlockSparsityPattern(
const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
: BlockSparsityPatternBase<SparsityPattern>(parallel_partitioning.size(),
parallel_partitioning.size())
{
const std::vector<IndexSet> &row_parallel_partitioning,
const std::vector<IndexSet> &col_parallel_partitioning,
const std::vector<IndexSet> &writable_rows,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
: BlockSparsityPatternBase<SparsityPattern>(
row_parallel_partitioning.size(),
col_parallel_partitioning.size())
void
BlockSparsityPattern::reinit(
const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
dealii::BlockSparsityPatternBase<SparsityPattern>::reinit(
parallel_partitioning.size(), parallel_partitioning.size());
BlockSparsityPattern::reinit(
const std::vector<IndexSet> &row_parallel_partitioning,
const std::vector<IndexSet> &col_parallel_partitioning,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
dealii::BlockSparsityPatternBase<SparsityPattern>::reinit(
row_parallel_partitioning.size(), col_parallel_partitioning.size());
const std::vector<IndexSet> &row_parallel_partitioning,
const std::vector<IndexSet> &col_parallel_partitioning,
const std::vector<IndexSet> &writable_rows,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
AssertDimension(writable_rows.size(), row_parallel_partitioning.size());
dealii::BlockSparsityPatternBase<SparsityPattern>::reinit(
void
CommunicationPattern::reinit(const types::global_dof_index local_size,
const IndexSet & ghost_indices,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
clear();
void
CommunicationPattern::reinit(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
std::vector<types::global_dof_index> in_deal;
locally_owned_indices.fill_index_vector(in_deal);
CommunicationPattern::reinit(
const std::vector<types::global_dof_index> &indices_has,
const std::vector<types::global_dof_index> &indices_want,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
// Clean vectors from numbers::invalid_dof_index (indicating padding)
std::vector<PetscInt> indices_has_clean, indices_has_loc;
const std::vector<PetscInt> &inloc,
const std::vector<PetscInt> &outidx,
const std::vector<PetscInt> &outloc,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
clear();
void
Partitioner::reinit(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
ghost_indices_data = ghost_indices;
ghost_indices_data.subtract_set(locally_owned_indices);
Partitioner::reinit(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices,
const IndexSet &larger_ghost_indices,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
std::vector<types::global_dof_index> local_indices;
locally_owned_indices.fill_index_vector(local_indices);
- MatrixFree::MatrixFree(const MPI_Comm & communicator,
+ MatrixFree::MatrixFree(const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const unsigned int local_rows,
MatrixFree::MatrixFree(
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const std::vector<unsigned int> &local_rows_per_process,
void
- MatrixFree::reinit(const MPI_Comm & communicator,
+ MatrixFree::reinit(const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const unsigned int local_rows,
void
- MatrixFree::reinit(const MPI_Comm & communicator,
+ MatrixFree::reinit(const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const std::vector<unsigned int> &local_rows_per_process,
void
- MatrixFree::do_reinit(const MPI_Comm & communicator,
+ MatrixFree::do_reinit(const MPI_Comm communicator,
const unsigned int m,
const unsigned int n,
const unsigned int local_rows,
BlockSparseMatrix::reinit(const std::vector<IndexSet> & rows,
const std::vector<IndexSet> & cols,
const BlockDynamicSparsityPattern &bdsp,
- const MPI_Comm & com)
+ const MPI_Comm com)
{
Assert(rows.size() == bdsp.n_block_rows(), ExcMessage("invalid size"));
Assert(cols.size() == bdsp.n_block_cols(), ExcMessage("invalid size"));
void
BlockSparseMatrix::reinit(const std::vector<IndexSet> & sizes,
const BlockDynamicSparsityPattern &bdsp,
- const MPI_Comm & com)
+ const MPI_Comm com)
{
reinit(sizes, sizes, bdsp, com);
}
template <typename SparsityPatternType>
SparseMatrix::SparseMatrix(
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const SparsityPatternType & sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
const IndexSet & local_columns,
const IndexSet & local_active_columns,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
// get rid of old matrix and generate a new one
const PetscErrorCode ierr = MatDestroy(&matrix);
template <typename SparsityPatternType>
void
SparseMatrix::reinit(
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const SparsityPatternType & sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
void
SparseMatrix::reinit(const IndexSet & local_rows,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
do_reinit(communicator, local_rows, local_rows, sparsity_pattern);
}
SparseMatrix::reinit(const IndexSet & local_rows,
const IndexSet & local_columns,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator)
+ const MPI_Comm communicator)
{
// get rid of old matrix and generate a new one
const PetscErrorCode ierr = MatDestroy(&matrix);
template <typename SparsityPatternType>
void
- SparseMatrix::do_reinit(const MPI_Comm & communicator,
+ SparseMatrix::do_reinit(const MPI_Comm communicator,
const IndexSet & local_rows,
const IndexSet & local_columns,
const SparsityPatternType &sparsity_pattern)
template <typename SparsityPatternType>
void
SparseMatrix::do_reinit(
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const SparsityPatternType & sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
// BDDC
template <typename SparsityPatternType>
void
- SparseMatrix::do_reinit(const MPI_Comm & communicator,
+ SparseMatrix::do_reinit(const MPI_Comm communicator,
const IndexSet & local_rows,
const IndexSet & local_active_rows,
const IndexSet & local_columns,
# ifndef DOXYGEN
// explicit instantiations
//
- template SparseMatrix::SparseMatrix(const MPI_Comm &,
+ template SparseMatrix::SparseMatrix(const MPI_Comm,
const SparsityPattern &,
const std::vector<size_type> &,
const std::vector<size_type> &,
const unsigned int,
const bool);
- template SparseMatrix::SparseMatrix(const MPI_Comm &,
+ template SparseMatrix::SparseMatrix(const MPI_Comm,
const DynamicSparsityPattern &,
const std::vector<size_type> &,
const std::vector<size_type> &,
const bool);
template void
- SparseMatrix::reinit(const MPI_Comm &,
+ SparseMatrix::reinit(const MPI_Comm,
const SparsityPattern &,
const std::vector<size_type> &,
const std::vector<size_type> &,
const unsigned int,
const bool);
template void
- SparseMatrix::reinit(const MPI_Comm &,
+ SparseMatrix::reinit(const MPI_Comm,
const DynamicSparsityPattern &,
const std::vector<size_type> &,
const std::vector<size_type> &,
template void
SparseMatrix::reinit(const IndexSet &,
const SparsityPattern &,
- const MPI_Comm &);
+ const MPI_Comm);
template void
SparseMatrix::reinit(const IndexSet &,
const IndexSet &,
const SparsityPattern &,
- const MPI_Comm &);
+ const MPI_Comm);
template void
SparseMatrix::reinit(const IndexSet &,
const DynamicSparsityPattern &,
- const MPI_Comm &);
+ const MPI_Comm);
template void
SparseMatrix::reinit(const IndexSet &,
const IndexSet &,
const DynamicSparsityPattern &,
- const MPI_Comm &);
+ const MPI_Comm);
template void
- SparseMatrix::do_reinit(const MPI_Comm &,
+ SparseMatrix::do_reinit(const MPI_Comm,
const SparsityPattern &,
const std::vector<size_type> &,
const std::vector<size_type> &,
const unsigned int,
const bool);
template void
- SparseMatrix::do_reinit(const MPI_Comm &,
+ SparseMatrix::do_reinit(const MPI_Comm,
const DynamicSparsityPattern &,
const std::vector<size_type> &,
const std::vector<size_type> &,
const bool);
template void
- SparseMatrix::do_reinit(const MPI_Comm &,
+ SparseMatrix::do_reinit(const MPI_Comm,
const IndexSet &,
const IndexSet &,
const SparsityPattern &);
template void
- SparseMatrix::do_reinit(const MPI_Comm &,
+ SparseMatrix::do_reinit(const MPI_Comm,
const IndexSet &,
const IndexSet &,
const DynamicSparsityPattern &);
const IndexSet &,
const IndexSet &,
const SparsityPattern &,
- const MPI_Comm &);
+ const MPI_Comm);
template void
SparseMatrix::reinit(const IndexSet &,
const IndexSet &,
const IndexSet &,
const IndexSet &,
const DynamicSparsityPattern &,
- const MPI_Comm &);
+ const MPI_Comm);
template void
- SparseMatrix::do_reinit(const MPI_Comm &,
+ SparseMatrix::do_reinit(const MPI_Comm,
const IndexSet &,
const IndexSet &,
const IndexSet &,
const IndexSet &,
const SparsityPattern &);
template void
- SparseMatrix::do_reinit(const MPI_Comm &,
+ SparseMatrix::do_reinit(const MPI_Comm,
const IndexSet &,
const IndexSet &,
const IndexSet &,
- Vector::Vector(const MPI_Comm &communicator,
+ Vector::Vector(const MPI_Comm communicator,
const size_type n,
const size_type locally_owned_size)
{
Vector::Vector(const IndexSet &local,
const IndexSet &ghost,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
Assert(local.is_ascending_and_one_to_one(communicator),
ExcNotImplemented());
- Vector::Vector(const IndexSet &local, const MPI_Comm &communicator)
+ Vector::Vector(const IndexSet &local, const MPI_Comm communicator)
{
Assert(local.is_ascending_and_one_to_one(communicator),
ExcNotImplemented());
void
- Vector::reinit(const MPI_Comm &communicator,
+ Vector::reinit(const MPI_Comm communicator,
const size_type n,
const size_type local_sz,
const bool omit_zeroing_entries)
void
Vector::reinit(const IndexSet &local,
const IndexSet &ghost,
- const MPI_Comm &comm)
+ const MPI_Comm comm)
{
const PetscErrorCode ierr = VecDestroy(&vector);
AssertThrow(ierr == 0, ExcPETScError(ierr));
}
void
- Vector::reinit(const IndexSet &local, const MPI_Comm &comm)
+ Vector::reinit(const IndexSet &local, const MPI_Comm comm)
{
const PetscErrorCode ierr = VecDestroy(&vector);
AssertThrow(ierr == 0, ExcPETScError(ierr));
void
- Vector::create_vector(const MPI_Comm &communicator,
+ Vector::create_vector(const MPI_Comm communicator,
const size_type n,
const size_type locally_owned_size)
{
void
- Vector::create_vector(const MPI_Comm &communicator,
+ Vector::create_vector(const MPI_Comm communicator,
const size_type n,
const size_type locally_owned_size,
const IndexSet &ghostnodes)
namespace PETScWrappers
{
- PreconditionBase::PreconditionBase(const MPI_Comm &comm)
+ PreconditionBase::PreconditionBase(const MPI_Comm comm)
: pc(nullptr)
{
create_pc_with_comm(comm);
}
void
- PreconditionBase::create_pc_with_comm(const MPI_Comm &comm)
+ PreconditionBase::create_pc_with_comm(const MPI_Comm comm)
{
clear();
PetscErrorCode ierr = PCCreate(comm, &pc);
- PreconditionJacobi::PreconditionJacobi(const MPI_Comm & comm,
+ PreconditionJacobi::PreconditionJacobi(const MPI_Comm comm,
const AdditionalData &additional_data_)
: PreconditionBase(comm)
{
{}
PreconditionBlockJacobi::PreconditionBlockJacobi(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const AdditionalData &additional_data_)
: PreconditionBase(comm)
{
PreconditionBoomerAMG::PreconditionBoomerAMG(
- const MPI_Comm & comm,
+ const MPI_Comm comm,
const AdditionalData &additional_data_)
: PreconditionBase(comm)
{
initialize(matrix);
}
- PreconditionShell::PreconditionShell(const MPI_Comm &comm)
+ PreconditionShell::PreconditionShell(const MPI_Comm comm)
{
initialize(comm);
}
void
- PreconditionShell::initialize(const MPI_Comm &comm)
+ PreconditionShell::initialize(const MPI_Comm comm)
{
PetscErrorCode ierr;
if (pc)
void
- SolverBase::initialize_ksp_with_comm(const MPI_Comm &comm)
+ SolverBase::initialize_ksp_with_comm(const MPI_Comm comm)
{
// Create the PETSc KSP object
AssertPETSc(KSPCreate(comm, &ksp));
SolverRichardson::SolverRichardson(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverRichardson(cn, data)
{}
SolverChebychev::SolverChebychev(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverChebychev(cn, data)
{}
SolverCG::SolverCG(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverCG(cn, data)
{}
SolverBiCG::SolverBiCG(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverBiCG(cn, data)
{}
SolverGMRES::SolverGMRES(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverGMRES(cn, data)
{}
SolverBicgstab::SolverBicgstab(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverBicgstab(cn, data)
{}
SolverCGS::SolverCGS(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverCGS(cn, data)
{}
SolverTFQMR::SolverTFQMR(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverTFQMR(cn, data)
{}
SolverTCQMR::SolverTCQMR(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverTCQMR(cn, data)
{}
SolverCR::SolverCR(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverCR(cn, data)
{}
SolverLSQR::SolverLSQR(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverLSQR(cn, data)
{}
SolverPreOnly::SolverPreOnly(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SolverPreOnly(cn, data)
{}
SparseDirectMUMPS::SparseDirectMUMPS(SolverControl &cn,
- const MPI_Comm &,
+ const MPI_Comm,
const AdditionalData &data)
: SparseDirectMUMPS(cn, data)
{}
namespace SLEPcWrappers
{
- SolverBase::SolverBase(SolverControl &cn, const MPI_Comm &mpi_communicator)
+ SolverBase::SolverBase(SolverControl &cn, const MPI_Comm mpi_communicator)
: solver_control(cn)
, mpi_communicator(mpi_communicator)
, reason(EPS_CONVERGED_ITERATING)
/* ---------------------- SolverKrylovSchur ------------------------ */
SolverKrylovSchur::SolverKrylovSchur(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: SolverBase(cn, mpi_communicator)
, additional_data(data)
SolverArnoldi::SolverArnoldi(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: SolverBase(cn, mpi_communicator)
, additional_data(data)
SolverLanczos::SolverLanczos(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: SolverBase(cn, mpi_communicator)
, additional_data(data)
/* ----------------------- Power ------------------------- */
SolverPower::SolverPower(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: SolverBase(cn, mpi_communicator)
, additional_data(data)
SolverGeneralizedDavidson::SolverGeneralizedDavidson(
SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: SolverBase(cn, mpi_communicator)
, additional_data(data)
/* ------------------ Jacobi Davidson -------------------- */
- SolverJacobiDavidson::SolverJacobiDavidson(SolverControl & cn,
- const MPI_Comm &mpi_communicator,
+ SolverJacobiDavidson::SolverJacobiDavidson(SolverControl &cn,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: SolverBase(cn, mpi_communicator)
, additional_data(data)
/* ---------------------- LAPACK ------------------------- */
SolverLAPACK::SolverLAPACK(SolverControl & cn,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: SolverBase(cn, mpi_communicator)
, additional_data(data)
namespace SLEPcWrappers
{
- TransformationBase::TransformationBase(const MPI_Comm &mpi_communicator)
+ TransformationBase::TransformationBase(const MPI_Comm mpi_communicator)
{
const PetscErrorCode ierr = STCreate(mpi_communicator, &st);
AssertThrow(ierr == 0, SolverBase::ExcSLEPcError(ierr));
: shift_parameter(shift_parameter)
{}
- TransformationShift::TransformationShift(const MPI_Comm &mpi_communicator,
+ TransformationShift::TransformationShift(const MPI_Comm mpi_communicator,
const AdditionalData &data)
: TransformationBase(mpi_communicator)
, additional_data(data)
{}
TransformationShiftInvert::TransformationShiftInvert(
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: TransformationBase(mpi_communicator)
, additional_data(data)
{}
TransformationSpectrumFolding::TransformationSpectrumFolding(
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const AdditionalData &data)
: TransformationBase(mpi_communicator)
, additional_data(data)
, antishift_parameter(antishift_parameter)
{}
- TransformationCayley::TransformationCayley(const MPI_Comm &mpi_communicator,
+ TransformationCayley::TransformationCayley(const MPI_Comm mpi_communicator,
const AdditionalData &data)
: TransformationBase(mpi_communicator)
, additional_data(data)
void
gather_sparsity_pattern(DynamicSparsityPattern &dsp,
const IndexSet & locally_owned_rows,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet & locally_relevant_rows)
{
using map_vec_t =
distribute_sparsity_pattern(
DynamicSparsityPattern & dsp,
const std::vector<DynamicSparsityPattern::size_type> &rows_per_cpu,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet & myrange)
{
const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
void
distribute_sparsity_pattern(DynamicSparsityPattern &dsp,
const IndexSet & locally_owned_rows,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet & locally_relevant_rows)
{
IndexSet requested_rows(locally_relevant_rows);
void
distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp,
const std::vector<IndexSet> &owned_set_per_cpu,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet & myrange)
{
const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
void
distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp,
const IndexSet & locally_owned_rows,
- const MPI_Comm & mpi_comm,
+ const MPI_Comm mpi_comm,
const IndexSet &locally_relevant_rows)
{
using map_vec_t =
BlockSparseMatrix::reinit(
const std::vector<IndexSet> & parallel_partitioning,
const BlockSparsityPatternType &block_sparsity_pattern,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool exchange_data)
{
std::vector<Epetra_Map> epetra_maps;
BlockSparseMatrix::reinit(
const std::vector<IndexSet> & parallel_partitioning,
const ::dealii::BlockSparseMatrix<double> &dealii_block_sparse_matrix,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const double drop_tolerance)
{
const size_type n_block_rows = parallel_partitioning.size();
template void
BlockSparseMatrix::reinit(const std::vector<IndexSet> &,
const dealii::BlockDynamicSparsityPattern &,
- const MPI_Comm &,
+ const MPI_Comm,
const bool);
# endif // DOXYGEN
void
BlockVector::reinit(const std::vector<IndexSet> ¶llel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool omit_zeroing_entries)
{
// update the number of blocks
void
BlockVector::reinit(const std::vector<IndexSet> ¶llel_partitioning,
const std::vector<IndexSet> &ghost_values,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool vector_writable)
{
AssertDimension(parallel_partitioning.size(), ghost_values.size());
CommunicationPattern::CommunicationPattern(
const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
// virtual functions called in constructors and destructors never use the
// override in a derived class
void
CommunicationPattern::reinit(const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
comm = std::make_shared<const MPI_Comm>(communicator);
Vector::Vector(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
: Subscriptor()
, vector(new Epetra_FEVector(
parallel_partitioner.make_trilinos_map(communicator, false)))
void
Vector::reinit(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const bool omit_zeroing_entries)
{
Epetra_Map input_map =
void
Vector::create_epetra_comm_pattern(const IndexSet &source_index_set,
- const MPI_Comm &mpi_comm)
+ const MPI_Comm mpi_comm)
{
source_stored_elements = source_index_set;
epetra_comm_pattern =
SparseMatrix::SparseMatrix(const IndexSet & parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const unsigned int n_max_entries_per_row)
: column_space_map(new Epetra_Map(
parallel_partitioning.make_trilinos_map(communicator, false)))
SparseMatrix::SparseMatrix(const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const std::vector<unsigned int> &n_entries_per_row)
: column_space_map(new Epetra_Map(
parallel_partitioning.make_trilinos_map(communicator, false)))
SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const size_type n_max_entries_per_row)
: column_space_map(new Epetra_Map(
col_parallel_partitioning.make_trilinos_map(communicator, false)))
SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const std::vector<unsigned int> &n_entries_per_row)
: column_space_map(new Epetra_Map(
col_parallel_partitioning.make_trilinos_map(communicator, false)))
const IndexSet & column_parallel_partitioning,
const SparsityPatternType & sparsity_pattern,
const bool exchange_data,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
std::unique_ptr<Epetra_Map> &column_space_map,
std::unique_ptr<Epetra_FECrsMatrix> &matrix,
std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
const IndexSet & column_parallel_partitioning,
const DynamicSparsityPattern &sparsity_pattern,
const bool exchange_data,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
std::unique_ptr<Epetra_Map> & column_space_map,
std::unique_ptr<Epetra_FECrsMatrix> &matrix,
std::unique_ptr<Epetra_CrsMatrix> & nonlocal_matrix,
SparseMatrix::reinit(const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
const SparsityPatternType &sparsity_pattern,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool exchange_data)
{
reinit_matrix(row_parallel_partitioning,
const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
const ::dealii::SparseMatrix<number> &dealii_sparse_matrix,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const double drop_tolerance,
const bool copy_values,
const ::dealii::SparsityPattern * use_this_sparsity)
SparseMatrix::reinit(const IndexSet &,
const IndexSet &,
const dealii::SparsityPattern &,
- const MPI_Comm &,
+ const MPI_Comm,
const bool);
template void
SparseMatrix::reinit(const IndexSet &,
const IndexSet &,
const DynamicSparsityPattern &,
- const MPI_Comm &,
+ const MPI_Comm,
const bool);
template void
SparseMatrix::reinit(const IndexSet &,
const IndexSet &,
const dealii::SparseMatrix<S> &,
- const MPI_Comm &,
+ const MPI_Comm,
const double,
const bool,
const dealii::SparsityPattern *);
SparsityPattern::SparsityPattern(const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const size_type n_entries_per_row)
{
reinit(parallel_partitioning,
SparsityPattern::SparsityPattern(
const IndexSet & parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &n_entries_per_row)
{
reinit(parallel_partitioning,
SparsityPattern::SparsityPattern(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const size_type n_entries_per_row)
{
reinit(row_parallel_partitioning,
SparsityPattern::SparsityPattern(
const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &n_entries_per_row)
{
reinit(row_parallel_partitioning,
SparsityPattern::SparsityPattern(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
const IndexSet &writable_rows,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const size_type n_max_entries_per_row)
{
reinit(row_parallel_partitioning,
void
SparsityPattern::reinit(const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const size_type n_entries_per_row)
{
SparsityPatternBase::resize(parallel_partitioning.size(),
void
SparsityPattern::reinit(const IndexSet & parallel_partitioning,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &n_entries_per_row)
{
SparsityPatternBase::resize(parallel_partitioning.size(),
void
SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const size_type n_entries_per_row)
{
SparsityPatternBase::resize(row_parallel_partitioning.size(),
void
SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const std::vector<size_type> &n_entries_per_row)
{
SparsityPatternBase::resize(row_parallel_partitioning.size(),
SparsityPattern::reinit(const IndexSet &row_parallel_partitioning,
const IndexSet &col_parallel_partitioning,
const IndexSet &writable_rows,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const size_type n_entries_per_row)
{
SparsityPatternBase::resize(row_parallel_partitioning.size(),
const IndexSet & row_parallel_partitioning,
const IndexSet & col_parallel_partitioning,
const SparsityPatternType &nontrilinos_sparsity_pattern,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool exchange_data)
{
SparsityPatternBase::resize(row_parallel_partitioning.size(),
SparsityPattern::reinit(
const IndexSet & parallel_partitioning,
const SparsityPatternType &nontrilinos_sparsity_pattern,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
const bool exchange_data)
{
AssertDimension(nontrilinos_sparsity_pattern.n_rows(),
template void
SparsityPattern::reinit(const IndexSet &,
const dealii::SparsityPattern &,
- const MPI_Comm &,
+ const MPI_Comm,
bool);
template void
SparsityPattern::reinit(const IndexSet &,
const dealii::DynamicSparsityPattern &,
- const MPI_Comm &,
+ const MPI_Comm,
bool);
SparsityPattern::reinit(const IndexSet &,
const IndexSet &,
const dealii::SparsityPattern &,
- const MPI_Comm &,
+ const MPI_Comm,
bool);
template void
SparsityPattern::reinit(const IndexSet &,
const IndexSet &,
const dealii::DynamicSparsityPattern &,
- const MPI_Comm &,
+ const MPI_Comm,
bool);
# endif
CommunicationPattern::CommunicationPattern(
const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
// virtual functions called in constructors and destructors never use the
// override in a derived class
void
CommunicationPattern::reinit(const IndexSet &vector_space_vector_index_set,
const IndexSet &read_write_vector_index_set,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
{
comm = std::make_shared<const MPI_Comm>(communicator);
Vector::Vector(const IndexSet ¶llel_partitioning,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
: Vector()
{
reinit(parallel_partitioning, communicator);
Vector::Vector(const IndexSet ¶llel_partitioner,
const Vector & v,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
: Vector()
{
AssertThrow(parallel_partitioner.size() ==
Vector::Vector(const IndexSet &local,
const IndexSet &ghost,
- const MPI_Comm &communicator)
+ const MPI_Comm communicator)
: Vector()
{
reinit(local, ghost, communicator, false);
void
Vector::reinit(const IndexSet ¶llel_partitioner,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const bool /*omit_zeroing_entries*/)
{
nonlocal_vector.reset();
void
Vector::reinit(const IndexSet &locally_owned_entries,
const IndexSet &ghost_entries,
- const MPI_Comm &communicator,
+ const MPI_Comm communicator,
const bool vector_writable)
{
nonlocal_vector.reset();
void
DoFInfo::assign_ghosts(const std::vector<unsigned int> &boundary_cells,
- const MPI_Comm & communicator_sm,
+ const MPI_Comm communicator_sm,
const bool use_vector_data_exchanger_full)
{
Assert(boundary_cells.size() < row_starts.size(), ExcInternalError());
const std::vector<FaceToCellTopology<1>> &inner_faces,
const std::vector<FaceToCellTopology<1>> &ghosted_faces,
const bool fill_cell_centric,
- const MPI_Comm & communicator_sm,
+ const MPI_Comm communicator_sm,
const bool use_vector_data_exchanger_full)
{
const Utilities::MPI::Partitioner &part = *vector_partitioner;
Full::Full(
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
- const MPI_Comm &communicator_sm)
+ const MPI_Comm communicator_sm)
: comm(partitioner->get_mpi_communicator())
, comm_sm(communicator_sm)
, n_local_elements(partitioner->locally_owned_range().n_elements())
const MGConstrainedDoFs,
MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>>
mg_constrained_dofs,
- const MPI_Comm & mpi_communicator,
+ const MPI_Comm mpi_communicator,
const bool transfer_solution_vectors,
std::vector<Table<2, unsigned int>> & copy_indices,
std::vector<Table<2, unsigned int>> & copy_indices_global_mine,
std::vector<types::global_dof_index> &ghosted_level_dofs,
const std::shared_ptr<const Utilities::MPI::Partitioner>
& external_partitioner,
- const MPI_Comm & communicator,
+ const MPI_Comm communicator,
std::shared_ptr<const Utilities::MPI::Partitioner> &target_partitioner,
Table<2, unsigned int> ©_indices_global_mine)
{
template <typename VectorType>
ARKode<VectorType>::ARKode(const AdditionalData &data,
- const MPI_Comm & mpi_comm)
+ const MPI_Comm mpi_comm)
: data(data)
, arkode_mem(nullptr)
# if DEAL_II_SUNDIALS_VERSION_GTE(6, 0, 0)
template <typename VectorType>
- IDA<VectorType>::IDA(const AdditionalData &data, const MPI_Comm &mpi_comm)
+ IDA<VectorType>::IDA(const AdditionalData &data, const MPI_Comm mpi_comm)
: data(data)
, ida_mem(nullptr)
# if DEAL_II_SUNDIALS_VERSION_GTE(6, 0, 0)
template <typename VectorType>
KINSOL<VectorType>::KINSOL(const AdditionalData &data,
- const MPI_Comm & mpi_comm)
+ const MPI_Comm mpi_comm)
: data(data)
, mpi_communicator(mpi_comm)
, kinsol_mem(nullptr)
public:
PETScInverse(const dealii::PETScWrappers::MatrixBase &A,
dealii::SolverControl & cn,
- const MPI_Comm &mpi_communicator = PETSC_COMM_SELF)
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF)
: solver(cn)
, matrix(A)
, preconditioner(matrix)
public:
PETScInverse(const dealii::PETScWrappers::MatrixBase &A,
dealii::SolverControl & cn,
- const MPI_Comm &mpi_communicator = PETSC_COMM_SELF)
+ const MPI_Comm mpi_communicator = PETSC_COMM_SELF)
: solver(cn)
, matrix(A)
, preconditioner(matrix)
void
-test(const MPI_Comm &comm)
+test(const MPI_Comm comm)
{
const unsigned int my_rank = dealii::Utilities::MPI::this_mpi_process(comm);
const unsigned int n_rank = dealii::Utilities::MPI::n_mpi_processes(comm);
template <int dim>
void
-test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir)
+test(const MPI_Comm comm, const bool do_revert, const unsigned int dir)
{
const unsigned int degree = 2;
const unsigned int n_refinements = 2;
template <int dim>
void
-test_dim(const MPI_Comm &comm, const bool do_revert)
+test_dim(const MPI_Comm comm, const bool do_revert)
{
for (int dir = 0; dir < dim; ++dir)
test<dim>(comm, do_revert, dir);
// For process i the number of boxes n_bboxes[i%7] is created
std::vector<unsigned int> n_bboxes = {2, 4, 3, 5, 1, 3, 8};
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
- unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
- unsigned int proc = Utilities::MPI::this_mpi_process(mpi_communicator);
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
+ unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
+ unsigned int proc = Utilities::MPI::this_mpi_process(mpi_communicator);
deallog << "Test for: dimension " << spacedim << std::endl;
deallog << n_procs << " mpi processes" << std::endl;
void
test()
{
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << "dim = " << dim << std::endl;
parallel::distributed::Triangulation<dim> tria(mpi_communicator);
void
test()
{
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << "dim = " << dim << std::endl;
parallel::distributed::Triangulation<dim> tria(mpi_communicator);
void
test()
{
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << "dim = " << dim << std::endl;
parallel::shared::Triangulation<dim> tria(
void
test()
{
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << "dim = " << dim << std::endl;
parallel::distributed::Triangulation<dim> tria(mpi_communicator);
using namespace dealii;
MPI_Comm
-create_sub_comm(const MPI_Comm &comm, const unsigned int size)
+create_sub_comm(const MPI_Comm comm, const unsigned int size)
{
const auto rank = Utilities::MPI::this_mpi_process(comm);
template <int dim, int spacedim>
LinearAlgebra::distributed::Vector<double>
partition_distributed_triangulation(const Triangulation<dim, spacedim> &tria_in,
- const MPI_Comm & comm)
+ const MPI_Comm comm)
{
const auto comm_tria = tria_in.get_communicator();
class MyPolicy : public RepartitioningPolicyTools::Base<dim, spacedim>
{
public:
- MyPolicy(const MPI_Comm &comm, const unsigned int direction)
+ MyPolicy(const MPI_Comm comm, const unsigned int direction)
: comm(comm)
, direction(direction)
{}
}
private:
- const MPI_Comm & comm;
+ const MPI_Comm comm;
const unsigned int direction;
};
Vector()
{}
- Vector(const IndexSet local, const MPI_Comm &comm)
+ Vector(const IndexSet local, const MPI_Comm comm)
{}
- Vector(const IndexSet &local, const IndexSet &ghost, const MPI_Comm &comm)
+ Vector(const IndexSet &local, const IndexSet &ghost, const MPI_Comm comm)
{}
void
- reinit(const IndexSet local, const MPI_Comm &comm)
+ reinit(const IndexSet local, const MPI_Comm comm)
{}
void
- reinit(const IndexSet local, const IndexSet &ghost, const MPI_Comm &comm)
+ reinit(const IndexSet local, const IndexSet &ghost, const MPI_Comm comm)
{}
void
template <typename SP>
SparseMatrix(const IndexSet &local,
const IndexSet &,
- SP & sp,
- const MPI_Comm &comm = MPI_COMM_WORLD)
+ SP & sp,
+ const MPI_Comm comm = MPI_COMM_WORLD)
{}
void
void
test(unsigned int ref)
{
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
parallel::distributed::Triangulation<dim, spacedim> tria(mpi_communicator);
GridGenerator::hyper_ball(tria);
void
test(unsigned int ref)
{
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
parallel::distributed::Triangulation<dim, spacedim> tria(mpi_communicator);
GridGenerator::hyper_ball(tria);
void
test_hypercube(unsigned int ref, unsigned int max_bbox)
{
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << "Testing hypercube for spacedim = " << spacedim
<< " refinement: " << ref << " max number of boxes: " << max_bbox
<< std::endl;
void
test()
{
- const MPI_Comm &mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << "dim = " << dim << std::endl;
parallel::distributed::Triangulation<dim> tria(mpi_communicator);
template <int dim>
void
-transfer(const MPI_Comm &mpi_communicator)
+transfer(const MPI_Comm mpi_communicator)
{
const unsigned int this_mpi_process =
Utilities::MPI::this_mpi_process(mpi_communicator);
{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll log;
- const MPI_Comm & mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << " 1D solution transfer" << std::endl;
transfer<1>(mpi_communicator);
template <int dim>
void
-transfer(const MPI_Comm &mpi_communicator)
+transfer(const MPI_Comm mpi_communicator)
{
const unsigned int this_mpi_process =
Utilities::MPI::this_mpi_process(mpi_communicator);
{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll log;
- const MPI_Comm & mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << " 1D solution transfer" << std::endl;
transfer<1>(mpi_communicator);
InverseMatrix(const Matrix & m,
const Preconditioner &preconditioner,
const IndexSet & locally_owned,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
void
vmult(TrilinosWrappers::MPI::Vector & dst,
const Matrix & m,
const Preconditioner &preconditioner,
const IndexSet & locally_owned,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
: matrix(&m)
, preconditioner(&preconditioner)
, mpi_communicator(&mpi_communicator)
Preconditioner> & A_inverse,
const IndexSet & owned_pres,
const IndexSet & relevant_pres,
- const MPI_Comm &mpi_communicator);
+ const MPI_Comm mpi_communicator);
void
vmult(TrilinosWrappers::MPI::Vector & dst,
& A_inverse,
const IndexSet &owned_vel,
const IndexSet &relevant_vel,
- const MPI_Comm &mpi_communicator)
+ const MPI_Comm mpi_communicator)
: system_matrix(&system_matrix)
, A_inverse(&A_inverse)
, tmp1(owned_vel, mpi_communicator)
InverseMatrix(const Matrix & m,
const Preconditioner &preconditioner,
const IndexSet & locally_owned,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
void
vmult(TrilinosWrappers::MPI::Vector & dst,
const Matrix & m,
const Preconditioner &preconditioner,
const IndexSet & locally_owned,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
: matrix(&m)
, preconditioner(&preconditioner)
, mpi_communicator(&mpi_communicator)
Preconditioner> & A_inverse,
const IndexSet & owned_pres,
const IndexSet & relevant_pres,
- const MPI_Comm &mpi_communicator);
+ const MPI_Comm mpi_communicator);
void
vmult(TrilinosWrappers::MPI::Vector & dst,
& A_inverse,
const IndexSet &owned_vel,
const IndexSet &relevant_vel,
- const MPI_Comm &mpi_communicator)
+ const MPI_Comm mpi_communicator)
: system_matrix(&system_matrix)
, A_inverse(&A_inverse)
, tmp1(owned_vel, mpi_communicator)
InverseMatrix(const Matrix & m,
const Preconditioner &preconditioner,
const IndexSet & locally_owned,
- const MPI_Comm & mpi_communicator);
+ const MPI_Comm mpi_communicator);
void
vmult(TrilinosWrappers::MPI::Vector & dst,
const Matrix & m,
const Preconditioner &preconditioner,
const IndexSet & locally_owned,
- const MPI_Comm & mpi_communicator)
+ const MPI_Comm mpi_communicator)
: matrix(&m)
, preconditioner(&preconditioner)
, mpi_communicator(&mpi_communicator)
const InverseMatrix<TrilinosWrappers::SparseMatrix,
Preconditioner> & A_inverse,
const IndexSet & owned_pres,
- const MPI_Comm &mpi_communicator);
+ const MPI_Comm mpi_communicator);
void
vmult(TrilinosWrappers::MPI::Vector & dst,
const InverseMatrix<TrilinosWrappers::SparseMatrix, Preconditioner>
& A_inverse,
const IndexSet &owned_vel,
- const MPI_Comm &mpi_communicator)
+ const MPI_Comm mpi_communicator)
: system_matrix(&system_matrix)
, A_inverse(&A_inverse)
, tmp1(owned_vel, mpi_communicator)
template <int dim>
void
-transfer(const MPI_Comm &mpi_communicator)
+transfer(const MPI_Comm mpi_communicator)
{
const unsigned int this_mpi_process =
Utilities::MPI::this_mpi_process(mpi_communicator);
{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll log;
- const MPI_Comm & mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << " 1D solution transfer" << std::endl;
transfer<1>(mpi_communicator);
template <int dim>
void
-transfer(const MPI_Comm &mpi_communicator)
+transfer(const MPI_Comm mpi_communicator)
{
const unsigned int this_mpi_process =
Utilities::MPI::this_mpi_process(mpi_communicator);
{
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll log;
- const MPI_Comm & mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
deallog << " 1D solution transfer" << std::endl;
transfer<1>(mpi_communicator);
template <int dim>
void
-transfer(const MPI_Comm &comm)
+transfer(const MPI_Comm comm)
{
AssertDimension(Utilities::MPI::n_mpi_processes(comm), 1);
template <int dim, int spacedim>
void
-test(const MPI_Comm &comm)
+test(const MPI_Comm comm)
{
Triangulation<dim> basetria;
GridGenerator::subdivided_hyper_cube(basetria, 4);
deallog << "n_dofs=" << dof_handler.n_dofs() << std::endl;
- const MPI_Comm &mpi_communicator = triangulation.get_communicator();
- const IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
- IndexSet locally_relevant_dofs;
+ const MPI_Comm mpi_communicator = triangulation.get_communicator();
+ const IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
+ IndexSet locally_relevant_dofs;
DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
AffineConstraints<double> constraints;
template <int dim>
void
-test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir)
+test(const MPI_Comm comm, const bool do_revert, const unsigned int dir)
{
const unsigned int degree = 2;
const unsigned int n_refinements = 2;
template <int dim>
void
-test_dim(const MPI_Comm &comm, const bool do_revert)
+test_dim(const MPI_Comm comm, const bool do_revert)
{
for (int dir = 0; dir < dim; ++dir)
test<dim>(comm, do_revert, dir);
template <int dim, int spacedim = dim>
void
-test_tet(const MPI_Comm &comm, const Parameters<dim> ¶ms)
+test_tet(const MPI_Comm comm, const Parameters<dim> ¶ms)
{
const unsigned int tria_type = 2;
template <int dim, int spacedim = dim>
void
-test_hex(const MPI_Comm &comm, const Parameters<dim> ¶ms)
+test_hex(const MPI_Comm comm, const Parameters<dim> ¶ms)
{
// 1) Create triangulation...
parallel::distributed::Triangulation<dim, spacedim> tria(comm);
template <int dim, int spacedim = dim>
void
-test_wedge(const MPI_Comm &comm, const Parameters<dim> ¶ms)
+test_wedge(const MPI_Comm comm, const Parameters<dim> ¶ms)
{
const unsigned int tria_type = 2;
template <int dim, int spacedim = dim>
void
-test_pyramid(const MPI_Comm &comm, const Parameters<dim> ¶ms)
+test_pyramid(const MPI_Comm comm, const Parameters<dim> ¶ms)
{
const unsigned int tria_type = 2;
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll log;
- const MPI_Comm & mpi_communicator = MPI_COMM_WORLD;
+ const MPI_Comm mpi_communicator = MPI_COMM_WORLD;
const unsigned int this_mpi_process =
Utilities::MPI::this_mpi_process(mpi_communicator);
const unsigned int n_mpi_processes =
template <int dim>
void
-test(const MPI_Comm &mpi_communicator)
+test(const MPI_Comm mpi_communicator)
{
parallel::shared::Triangulation<dim> triangulation(
mpi_communicator, Triangulation<dim>::limit_level_difference_at_vertices);