From e54e9caf68f1cae5eb2db198e7b24b08a4a1e036 Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Mon, 24 Apr 2023 16:10:00 -0400 Subject: [PATCH] pass MPI_Comm by value --- examples/step-45/step-45.cc | 8 +- include/deal.II/arborx/distributed_tree.h | 8 +- include/deal.II/base/aligned_vector.h | 4 +- .../deal.II/base/communication_pattern_base.h | 2 +- include/deal.II/base/data_out_base.h | 23 ++- include/deal.II/base/hdf5.h | 8 +- include/deal.II/base/index_set.h | 12 +- include/deal.II/base/mpi.h | 126 ++++++------- include/deal.II/base/mpi.templates.h | 42 ++--- .../base/mpi_compute_index_owner_internal.h | 6 +- .../deal.II/base/mpi_consensus_algorithms.h | 170 +++++++++--------- .../base/mpi_noncontiguous_partitioner.h | 8 +- include/deal.II/base/partitioner.h | 8 +- include/deal.II/base/process_grid.h | 6 +- include/deal.II/base/table.h | 4 +- include/deal.II/base/timer.h | 10 +- .../distributed/fully_distributed_tria.h | 2 +- include/deal.II/distributed/grid_refinement.h | 6 +- include/deal.II/distributed/shared_tria.h | 2 +- include/deal.II/distributed/tria.h | 6 +- include/deal.II/distributed/tria_base.h | 6 +- include/deal.II/dofs/number_cache.h | 4 +- .../fe/fe_tools_interpolate.templates.h | 2 +- include/deal.II/grid/grid_tools.h | 4 +- include/deal.II/grid/tria_description.h | 6 +- include/deal.II/lac/affine_constraints.h | 4 +- .../lac/affine_constraints.templates.h | 4 +- include/deal.II/lac/block_sparsity_pattern.h | 10 +- .../deal.II/lac/la_parallel_block_vector.h | 8 +- .../lac/la_parallel_block_vector.templates.h | 8 +- include/deal.II/lac/la_parallel_vector.h | 16 +- .../lac/la_parallel_vector.templates.h | 24 +-- include/deal.II/lac/parpack_solver.h | 4 +- .../deal.II/lac/petsc_block_sparse_matrix.h | 4 +- include/deal.II/lac/petsc_block_vector.h | 32 ++-- .../deal.II/lac/petsc_communication_pattern.h | 12 +- include/deal.II/lac/petsc_matrix_free.h | 10 +- include/deal.II/lac/petsc_precondition.h | 14 +- include/deal.II/lac/petsc_snes.h | 2 +- include/deal.II/lac/petsc_snes.templates.h | 2 +- include/deal.II/lac/petsc_solver.h | 28 +-- include/deal.II/lac/petsc_sparse_matrix.h | 16 +- include/deal.II/lac/petsc_ts.h | 2 +- include/deal.II/lac/petsc_ts.templates.h | 2 +- include/deal.II/lac/petsc_vector.h | 20 +-- include/deal.II/lac/read_write_vector.h | 8 +- .../deal.II/lac/read_write_vector.templates.h | 8 +- include/deal.II/lac/slepc_solver.h | 25 ++- .../lac/slepc_spectral_transformation.h | 10 +- include/deal.II/lac/sparse_matrix.h | 4 +- include/deal.II/lac/sparse_matrix_tools.h | 4 +- include/deal.II/lac/sparsity_tools.h | 10 +- .../lac/trilinos_block_sparse_matrix.h | 4 +- .../trilinos_epetra_communication_pattern.h | 4 +- include/deal.II/lac/trilinos_epetra_vector.h | 6 +- .../lac/trilinos_parallel_block_vector.h | 12 +- include/deal.II/lac/trilinos_sparse_matrix.h | 24 +-- .../deal.II/lac/trilinos_sparsity_pattern.h | 24 +-- .../trilinos_tpetra_communication_pattern.h | 4 +- include/deal.II/lac/trilinos_tpetra_vector.h | 6 +- .../lac/trilinos_tpetra_vector.templates.h | 6 +- include/deal.II/lac/trilinos_vector.h | 14 +- include/deal.II/matrix_free/dof_info.h | 4 +- .../matrix_free/vector_data_exchange.h | 2 +- include/deal.II/sundials/arkode.h | 2 +- include/deal.II/sundials/ida.h | 2 +- include/deal.II/sundials/kinsol.h | 2 +- source/base/data_out_base.cc | 24 +-- source/base/data_out_base.inst.in | 4 +- source/base/hdf5.cc | 4 +- source/base/index_set.cc | 12 +- source/base/mpi.cc | 69 ++++--- source/base/mpi.inst.in | 42 ++--- .../base/mpi_compute_index_owner_internal.cc | 6 +- source/base/mpi_noncontiguous_partitioner.cc | 8 +- source/base/partitioner.cc | 8 +- source/base/process_grid.cc | 8 +- source/base/timer.cc | 10 +- source/distributed/fully_distributed_tria.cc | 3 +- source/distributed/grid_refinement.cc | 8 +- source/distributed/grid_refinement.inst.in | 6 +- source/distributed/shared_tria.cc | 2 +- source/distributed/tria.cc | 4 +- source/distributed/tria_base.cc | 6 +- source/dofs/number_cache.cc | 4 +- source/grid/grid_tools.cc | 6 +- source/grid/grid_tools.inst.in | 4 +- source/grid/tria_description.cc | 12 +- source/grid/tria_description.inst.in | 6 +- source/lac/block_sparsity_pattern.cc | 10 +- source/lac/petsc_communication_pattern.cc | 12 +- source/lac/petsc_matrix_free.cc | 10 +- .../lac/petsc_parallel_block_sparse_matrix.cc | 4 +- source/lac/petsc_parallel_sparse_matrix.cc | 48 ++--- source/lac/petsc_parallel_vector.cc | 16 +- source/lac/petsc_precondition.cc | 14 +- source/lac/petsc_solver.cc | 28 +-- source/lac/slepc_solver.cc | 18 +- source/lac/slepc_spectral_transformation.cc | 10 +- source/lac/sparsity_tools.cc | 10 +- source/lac/trilinos_block_sparse_matrix.cc | 6 +- source/lac/trilinos_block_vector.cc | 4 +- .../trilinos_epetra_communication_pattern.cc | 4 +- source/lac/trilinos_epetra_vector.cc | 6 +- source/lac/trilinos_sparse_matrix.cc | 20 +-- source/lac/trilinos_sparse_matrix.inst.in | 2 +- source/lac/trilinos_sparsity_pattern.cc | 32 ++-- .../trilinos_tpetra_communication_pattern.cc | 4 +- source/lac/trilinos_vector.cc | 10 +- source/matrix_free/dof_info.cc | 4 +- source/matrix_free/vector_data_exchange.cc | 2 +- source/multigrid/mg_level_global_transfer.cc | 2 +- source/multigrid/mg_transfer_internal.cc | 2 +- source/sundials/arkode.cc | 2 +- source/sundials/ida.cc | 2 +- source/sundials/kinsol.cc | 2 +- .../parpack_advection_diffusion_petsc.cc | 2 +- tests/arpack/step-36_parpack.cc | 2 +- tests/base/consensus_algorithm_01.cc | 2 +- .../base/mpi_noncontiguous_partitioner_02.cc | 4 +- .../grid_tools_exchange_bounding_boxes_1.cc | 6 +- .../grid_tools_exchange_cell_data_01.cc | 2 +- .../grid_tools_exchange_cell_data_02.cc | 2 +- .../grid_tools_exchange_cell_data_03.cc | 2 +- .../grid_tools_exchange_cell_data_04.cc | 2 +- .../repartitioning_05.cc | 4 +- .../repartitioning_08.cc | 4 +- tests/gla/gla.h | 12 +- tests/grid/grid_tools_cache_06.cc | 2 +- tests/grid/grid_tools_cache_07.cc | 2 +- ...s_compute_mesh_predicate_bounding_box_1.cc | 2 +- .../grid/grid_tools_halo_layer_ghost_cells.cc | 2 +- tests/hp/solution_transfer_14.cc | 4 +- tests/hp/solution_transfer_15.cc | 4 +- tests/mpi/muelu_periodicity.cc | 8 +- tests/mpi/periodicity_02.cc | 8 +- tests/mpi/periodicity_03.cc | 8 +- tests/mpi/solution_transfer_02.cc | 4 +- tests/mpi/solution_transfer_03.cc | 4 +- tests/mpi/solution_transfer_06.cc | 2 +- .../global_id_01.cc | 2 +- tests/numerics/project_parallel_common.h | 6 +- .../petsc_noncontiguous_partitioner_02.cc | 4 +- tests/simplex/poisson_01.cc | 8 +- .../trilinos/parallel_block_vector_copy_01.cc | 2 +- tests/zoltan/tria_zoltan_01.cc | 2 +- 146 files changed, 772 insertions(+), 776 deletions(-) diff --git a/examples/step-45/step-45.cc b/examples/step-45/step-45.cc index e851e2cefb..fc8c544d93 100644 --- a/examples/step-45/step-45.cc +++ b/examples/step-45/step-45.cc @@ -193,7 +193,7 @@ namespace Step45 InverseMatrix(const MatrixType & m, const PreconditionerType &preconditioner, const IndexSet & locally_owned, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); void vmult(TrilinosWrappers::MPI::Vector & dst, const TrilinosWrappers::MPI::Vector &src) const; @@ -213,7 +213,7 @@ namespace Step45 const MatrixType & m, const PreconditionerType &preconditioner, const IndexSet & locally_owned, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) : matrix(&m) , preconditioner(&preconditioner) , mpi_communicator(&mpi_communicator) @@ -246,7 +246,7 @@ namespace Step45 const InverseMatrix & A_inverse, const IndexSet & owned_pres, - const MPI_Comm &mpi_communicator); + const MPI_Comm mpi_communicator); void vmult(TrilinosWrappers::MPI::Vector & dst, const TrilinosWrappers::MPI::Vector &src) const; @@ -267,7 +267,7 @@ namespace Step45 const InverseMatrix & A_inverse, const IndexSet &owned_vel, - const MPI_Comm &mpi_communicator) + const MPI_Comm mpi_communicator) : system_matrix(&system_matrix) , A_inverse(&A_inverse) , tmp1(owned_vel, mpi_communicator) diff --git a/include/deal.II/arborx/distributed_tree.h b/include/deal.II/arborx/distributed_tree.h index 2dba72b548..2a22bcc3a5 100644 --- a/include/deal.II/arborx/distributed_tree.h +++ b/include/deal.II/arborx/distributed_tree.h @@ -44,7 +44,7 @@ namespace ArborXWrappers */ template DistributedTree( - const MPI_Comm & comm, + const MPI_Comm comm, const std::vector> &bounding_boxes); /** @@ -52,7 +52,7 @@ namespace ArborXWrappers * in @p points are local to the MPI process. */ template - DistributedTree(const MPI_Comm & comm, + DistributedTree(const MPI_Comm comm, const std::vector> &points); /** @@ -82,7 +82,7 @@ namespace ArborXWrappers template DistributedTree::DistributedTree( - const MPI_Comm & comm, + const MPI_Comm comm, const std::vector> &bounding_boxes) : distributed_tree(comm, Kokkos::DefaultHostExecutionSpace{}, @@ -93,7 +93,7 @@ namespace ArborXWrappers template DistributedTree::DistributedTree( - const MPI_Comm & comm, + const MPI_Comm comm, const std::vector> &points) : distributed_tree(comm, Kokkos::DefaultHostExecutionSpace{}, points) {} diff --git a/include/deal.II/base/aligned_vector.h b/include/deal.II/base/aligned_vector.h index e27b872b3e..f43ba72873 100644 --- a/include/deal.II/base/aligned_vector.h +++ b/include/deal.II/base/aligned_vector.h @@ -346,7 +346,7 @@ public: * the destructor is called. */ void - replicate_across_communicator(const MPI_Comm & communicator, + replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process); /** @@ -1503,7 +1503,7 @@ AlignedVector::fill(const T &value) template inline void -AlignedVector::replicate_across_communicator(const MPI_Comm & communicator, +AlignedVector::replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process) { # ifdef DEAL_II_WITH_MPI diff --git a/include/deal.II/base/communication_pattern_base.h b/include/deal.II/base/communication_pattern_base.h index e1f7866618..148ebfbacf 100644 --- a/include/deal.II/base/communication_pattern_base.h +++ b/include/deal.II/base/communication_pattern_base.h @@ -88,7 +88,7 @@ namespace Utilities virtual void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, - const MPI_Comm &communicator) = 0; + const MPI_Comm communicator) = 0; /** * Return the underlying MPI communicator. diff --git a/include/deal.II/base/data_out_base.h b/include/deal.II/base/data_out_base.h index d486d86f1c..df4c467c4a 100644 --- a/include/deal.II/base/data_out_base.h +++ b/include/deal.II/base/data_out_base.h @@ -2383,7 +2383,7 @@ namespace DataOutBase & nonscalar_data_ranges, const Deal_II_IntermediateFlags &flags, const std::string & filename, - const MPI_Comm & comm, + const MPI_Comm comm, const CompressionLevel compression); /** @@ -2396,7 +2396,7 @@ namespace DataOutBase const DataOutFilter & data_filter, const DataOutBase::Hdf5Flags & flags, const std::string & filename, - const MPI_Comm & comm); + const MPI_Comm comm); /** * Write the data in @p data_filter to HDF5 file(s). If @p write_mesh_file is @@ -2413,7 +2413,7 @@ namespace DataOutBase const bool write_mesh_file, const std::string & mesh_filename, const std::string &solution_filename, - const MPI_Comm & comm); + const MPI_Comm comm); /** * DataOutFilter is an intermediate data format that reduces the amount of @@ -2743,8 +2743,7 @@ public: * DataOutInterface::write_vtu(). */ void - write_vtu_in_parallel(const std::string &filename, - const MPI_Comm & comm) const; + write_vtu_in_parallel(const std::string &filename, const MPI_Comm comm) const; /** * Some visualization programs, such as ParaView, can read several separate @@ -2846,7 +2845,7 @@ public: const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const unsigned int n_digits_for_counter = numbers::invalid_unsigned_int, const unsigned int n_groups = 0) const; @@ -2879,7 +2878,7 @@ public: void write_deal_II_intermediate_in_parallel( const std::string & filename, - const MPI_Comm & comm, + const MPI_Comm comm, const DataOutBase::CompressionLevel compression) const; /** @@ -2891,7 +2890,7 @@ public: create_xdmf_entry(const DataOutBase::DataOutFilter &data_filter, const std::string & h5_filename, const double cur_time, - const MPI_Comm & comm) const; + const MPI_Comm comm) const; /** * Create an XDMFEntry based on the data in the data_filter. This assumes @@ -2903,7 +2902,7 @@ public: const std::string & h5_mesh_filename, const std::string & h5_solution_filename, const double cur_time, - const MPI_Comm & comm) const; + const MPI_Comm comm) const; /** * Write an XDMF file based on the provided vector of XDMFEntry objects. @@ -2932,7 +2931,7 @@ public: void write_xdmf_file(const std::vector &entries, const std::string & filename, - const MPI_Comm & comm) const; + const MPI_Comm comm) const; /** * Write the data in @p data_filter to a single HDF5 file containing both the @@ -2951,7 +2950,7 @@ public: void write_hdf5_parallel(const DataOutBase::DataOutFilter &data_filter, const std::string & filename, - const MPI_Comm & comm) const; + const MPI_Comm comm) const; /** * Write the data in data_filter to HDF5 file(s). If write_mesh_file is @@ -2965,7 +2964,7 @@ public: const bool write_mesh_file, const std::string & mesh_filename, const std::string & solution_filename, - const MPI_Comm & comm) const; + const MPI_Comm comm) const; /** * DataOutFilter is an intermediate data format that reduces the amount of diff --git a/include/deal.II/base/hdf5.h b/include/deal.II/base/hdf5.h index c7e15ba4f0..8ebeddfbb1 100644 --- a/include/deal.II/base/hdf5.h +++ b/include/deal.II/base/hdf5.h @@ -73,7 +73,7 @@ DEAL_II_NAMESPACE_OPEN * MPI support (several processes access the same HDF5 file). * File::File(const std::string &, const FileAccessMode) * opens/creates an HDF5 file for serial operations. - * File::File(const std::string &, const FileAccessMode, const MPI_Comm &) + * File::File(const std::string &, const FileAccessMode, const MPI_Comm ) * creates or opens an HDF5 file in parallel using MPI. The HDF5 calls that * modify the structure of the file are always collective, whereas writing * and reading raw data in a dataset can be done independently or collectively. @@ -1105,12 +1105,12 @@ namespace HDF5 */ File(const std::string & name, const FileAccessMode mode, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); private: /** * Delegation internal constructor. - * File(const std::string &, const MPI_Comm &, const Mode); + * File(const std::string &, const MPI_Comm , const Mode); * and * File(const std::string &, const Mode) * should be used to open or create HDF5 files. @@ -1118,7 +1118,7 @@ namespace HDF5 File(const std::string & name, const FileAccessMode mode, const bool mpi, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); }; namespace internal diff --git a/include/deal.II/base/index_set.h b/include/deal.II/base/index_set.h index 3ff8b14ce5..6a88125a62 100644 --- a/include/deal.II/base/index_set.h +++ b/include/deal.II/base/index_set.h @@ -252,7 +252,7 @@ public: * is complete. */ bool - is_ascending_and_one_to_one(const MPI_Comm &communicator) const; + is_ascending_and_one_to_one(const MPI_Comm communicator) const; /** * Return the number of elements stored in this index set. @@ -503,19 +503,19 @@ public: * vector, e.g. for extracting only certain solution components. */ Epetra_Map - make_trilinos_map(const MPI_Comm &communicator = MPI_COMM_WORLD, - const bool overlapping = false) const; + make_trilinos_map(const MPI_Comm communicator = MPI_COMM_WORLD, + const bool overlapping = false) const; # ifdef DEAL_II_TRILINOS_WITH_TPETRA Tpetra::Map - make_tpetra_map(const MPI_Comm &communicator = MPI_COMM_WORLD, - const bool overlapping = false) const; + make_tpetra_map(const MPI_Comm communicator = MPI_COMM_WORLD, + const bool overlapping = false) const; # endif #endif #ifdef DEAL_II_WITH_PETSC IS - make_petsc_is(const MPI_Comm &communicator = MPI_COMM_WORLD) const; + make_petsc_is(const MPI_Comm communicator = MPI_COMM_WORLD) const; #endif diff --git a/include/deal.II/base/mpi.h b/include/deal.II/base/mpi.h index 9347e17159..7ed205a9c9 100644 --- a/include/deal.II/base/mpi.h +++ b/include/deal.II/base/mpi.h @@ -137,7 +137,7 @@ namespace Utilities * only one process and the function returns 1. */ unsigned int - n_mpi_processes(const MPI_Comm &mpi_communicator); + n_mpi_processes(const MPI_Comm mpi_communicator); /** * Return the @@ -148,15 +148,15 @@ namespace Utilities * than) the number of all processes (given by get_n_mpi_processes()). */ unsigned int - this_mpi_process(const MPI_Comm &mpi_communicator); + this_mpi_process(const MPI_Comm mpi_communicator); /** * Return a vector of the ranks (within @p comm_large) of a subset of * processes specified by @p comm_small. */ const std::vector - mpi_processes_within_communicator(const MPI_Comm &comm_large, - const MPI_Comm &comm_small); + mpi_processes_within_communicator(const MPI_Comm comm_large, + const MPI_Comm comm_small); /** * Consider an unstructured communication pattern where every process in @@ -181,7 +181,7 @@ namespace Utilities */ std::vector compute_point_to_point_communication_pattern( - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::vector &destinations); /** @@ -205,7 +205,7 @@ namespace Utilities */ unsigned int compute_n_point_to_point_communications( - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::vector &destinations); /** @@ -225,7 +225,7 @@ namespace Utilities * MPI_Comm_dup(mpi_communicator, &return_value);. */ MPI_Comm - duplicate_communicator(const MPI_Comm &mpi_communicator); + duplicate_communicator(const MPI_Comm mpi_communicator); /** * Free the given @@ -237,7 +237,7 @@ namespace Utilities * MPI_Comm_free(&mpi_communicator);. */ void - free_communicator(MPI_Comm &mpi_communicator); + free_communicator(MPI_Comm mpi_communicator); /** * Helper class to automatically duplicate and free an MPI @@ -257,7 +257,7 @@ namespace Utilities /** * Create a duplicate of the given @p communicator. */ - explicit DuplicatedCommunicator(const MPI_Comm &communicator) + explicit DuplicatedCommunicator(const MPI_Comm communicator) : comm(duplicate_communicator(communicator)) {} @@ -342,7 +342,7 @@ namespace Utilities /** * Constructor. Blocks until it can acquire the lock. */ - explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm) + explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm) : mutex(mutex) , comm(comm) { @@ -385,7 +385,7 @@ namespace Utilities * in the communicator. */ void - lock(const MPI_Comm &comm); + lock(const MPI_Comm comm); /** * Release the lock. @@ -394,7 +394,7 @@ namespace Utilities * in the communicator. */ void - unlock(const MPI_Comm &comm); + unlock(const MPI_Comm comm); private: /** @@ -573,7 +573,7 @@ namespace Utilities */ #ifdef DEAL_II_WITH_MPI DEAL_II_DEPRECATED int - create_group(const MPI_Comm & comm, + create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm * new_comm); @@ -589,7 +589,7 @@ namespace Utilities */ std::vector create_ascending_partitioning( - const MPI_Comm & comm, + const MPI_Comm comm, const types::global_dof_index locally_owned_size); /** @@ -601,7 +601,7 @@ namespace Utilities */ IndexSet create_evenly_distributed_partitioning( - const MPI_Comm & comm, + const MPI_Comm comm, const types::global_dof_index total_size); #ifdef DEAL_II_WITH_MPI @@ -622,9 +622,9 @@ namespace Utilities */ template std::pair::real_type> - mean_and_standard_deviation(const Iterator begin, - const Iterator end, - const MPI_Comm &comm); + mean_and_standard_deviation(const Iterator begin, + const Iterator end, + const MPI_Comm comm); #endif @@ -699,7 +699,7 @@ namespace Utilities */ template T - sum(const T &t, const MPI_Comm &mpi_communicator); + sum(const T &t, const MPI_Comm mpi_communicator); /** * Like the previous function, but take the sums over the elements of an @@ -712,7 +712,7 @@ namespace Utilities */ template void - sum(const T &values, const MPI_Comm &mpi_communicator, U &sums); + sum(const T &values, const MPI_Comm mpi_communicator, U &sums); /** * Like the previous function, but take the sums over the elements of an @@ -726,7 +726,7 @@ namespace Utilities template void sum(const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & sums); /** @@ -737,7 +737,7 @@ namespace Utilities template SymmetricTensor sum(const SymmetricTensor &local, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); /** * Perform an MPI sum of the entries of a tensor. @@ -747,7 +747,7 @@ namespace Utilities template Tensor sum(const Tensor &local, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); /** * Perform an MPI sum of the entries of a SparseMatrix. @@ -760,7 +760,7 @@ namespace Utilities template void sum(const SparseMatrix &local, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, SparseMatrix & global); /** @@ -784,7 +784,7 @@ namespace Utilities */ template T - max(const T &t, const MPI_Comm &mpi_communicator); + max(const T &t, const MPI_Comm mpi_communicator); /** * Like the previous function, but take the maximum over the elements of an @@ -797,7 +797,7 @@ namespace Utilities */ template void - max(const T &values, const MPI_Comm &mpi_communicator, U &maxima); + max(const T &values, const MPI_Comm mpi_communicator, U &maxima); /** * Like the previous function, but take the maximum over the elements of an @@ -811,7 +811,7 @@ namespace Utilities template void max(const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & maxima); /** @@ -835,7 +835,7 @@ namespace Utilities */ template T - min(const T &t, const MPI_Comm &mpi_communicator); + min(const T &t, const MPI_Comm mpi_communicator); /** * Like the previous function, but take the minima over the elements of an @@ -848,7 +848,7 @@ namespace Utilities */ template void - min(const T &values, const MPI_Comm &mpi_communicator, U &minima); + min(const T &values, const MPI_Comm mpi_communicator, U &minima); /** * Like the previous function, but take the minimum over the elements of an @@ -862,7 +862,7 @@ namespace Utilities template void min(const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & minima); /** @@ -890,7 +890,7 @@ namespace Utilities */ template T - logical_or(const T &t, const MPI_Comm &mpi_communicator); + logical_or(const T &t, const MPI_Comm mpi_communicator); /** * Like the previous function, but performs the logical or operation @@ -908,7 +908,7 @@ namespace Utilities */ template void - logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results); + logical_or(const T &values, const MPI_Comm mpi_communicator, U &results); /** * Like the previous function, but performs the logical or operation @@ -922,7 +922,7 @@ namespace Utilities template void logical_or(const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & results); /** @@ -1001,7 +1001,7 @@ namespace Utilities * everywhere. */ MinMaxAvg - min_max_avg(const double my_value, const MPI_Comm &mpi_communicator); + min_max_avg(const double my_value, const MPI_Comm mpi_communicator); /** * Same as above but returning the sum, average, minimum, maximum, @@ -1016,7 +1016,7 @@ namespace Utilities */ std::vector min_max_avg(const std::vector &my_value, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); /** @@ -1034,7 +1034,7 @@ namespace Utilities void min_max_avg(const ArrayView &my_values, const ArrayView & result, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); /** @@ -1244,7 +1244,7 @@ namespace Utilities */ template std::map - some_to_some(const MPI_Comm & comm, + some_to_some(const MPI_Comm comm, const std::map &objects_to_send); /** @@ -1262,7 +1262,7 @@ namespace Utilities */ template std::vector - all_gather(const MPI_Comm &comm, const T &object_to_send); + all_gather(const MPI_Comm comm, const T &object_to_send); /** * A generalization of the classic MPI_Gather function, that accepts @@ -1281,7 +1281,7 @@ namespace Utilities */ template std::vector - gather(const MPI_Comm & comm, + gather(const MPI_Comm comm, const T & object_to_send, const unsigned int root_process = 0); @@ -1301,7 +1301,7 @@ namespace Utilities */ template T - scatter(const MPI_Comm & comm, + scatter(const MPI_Comm comm, const std::vector &objects_to_send, const unsigned int root_process = 0); @@ -1342,7 +1342,7 @@ namespace Utilities */ template std::enable_if_t == false, T> - broadcast(const MPI_Comm & comm, + broadcast(const MPI_Comm comm, const T & object_to_send, const unsigned int root_process = 0); @@ -1370,7 +1370,7 @@ namespace Utilities */ template std::enable_if_t == true, T> - broadcast(const MPI_Comm & comm, + broadcast(const MPI_Comm comm, const T & object_to_send, const unsigned int root_process = 0); @@ -1395,7 +1395,7 @@ namespace Utilities broadcast(T * buffer, const size_t count, const unsigned int root, - const MPI_Comm & comm); + const MPI_Comm comm); /** * A function that combines values @p local_value from all processes @@ -1412,7 +1412,7 @@ namespace Utilities template T reduce(const T & local_value, - const MPI_Comm & comm, + const MPI_Comm comm, const std::function &combiner, const unsigned int root_process = 0); @@ -1428,7 +1428,7 @@ namespace Utilities template T all_reduce(const T & local_value, - const MPI_Comm & comm, + const MPI_Comm comm, const std::function &combiner); @@ -1528,7 +1528,7 @@ namespace Utilities std::vector compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, - const MPI_Comm &comm); + const MPI_Comm comm); /** * Compute the union of the input vectors @p vec of all processes in the @@ -1539,14 +1539,14 @@ namespace Utilities */ template std::vector - compute_set_union(const std::vector &vec, const MPI_Comm &comm); + compute_set_union(const std::vector &vec, const MPI_Comm comm); /** * The same as above but for std::set. */ template std::set - compute_set_union(const std::set &set, const MPI_Comm &comm); + compute_set_union(const std::set &set, const MPI_Comm comm); @@ -1741,7 +1741,7 @@ namespace Utilities void all_reduce(const MPI_Op & mpi_op, const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & output); } // namespace internal @@ -1804,7 +1804,7 @@ namespace Utilities template void - sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N]) + sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N]) { internal::all_reduce(MPI_SUM, ArrayView(values, N), @@ -1816,7 +1816,7 @@ namespace Utilities template void - max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N]) + max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N]) { internal::all_reduce(MPI_MAX, ArrayView(values, N), @@ -1828,7 +1828,7 @@ namespace Utilities template void - min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N]) + min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N]) { internal::all_reduce(MPI_MIN, ArrayView(values, N), @@ -1841,7 +1841,7 @@ namespace Utilities template void logical_or(const T (&values)[N], - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, T (&results)[N]) { static_assert(std::is_integral::value, @@ -1857,7 +1857,7 @@ namespace Utilities template std::map - some_to_some(const MPI_Comm & comm, + some_to_some(const MPI_Comm comm, const std::map &objects_to_send) { # ifndef DEAL_II_WITH_MPI @@ -1973,7 +1973,7 @@ namespace Utilities template std::vector - all_gather(const MPI_Comm &comm, const T &object) + all_gather(const MPI_Comm comm, const T &object) { if (job_supports_mpi() == false) return {object}; @@ -2036,7 +2036,7 @@ namespace Utilities template std::vector - gather(const MPI_Comm & comm, + gather(const MPI_Comm comm, const T & object_to_send, const unsigned int root_process) { @@ -2119,7 +2119,7 @@ namespace Utilities template T - scatter(const MPI_Comm & comm, + scatter(const MPI_Comm comm, const std::vector &objects_to_send, const unsigned int root_process) { @@ -2197,7 +2197,7 @@ namespace Utilities broadcast(T * buffer, const size_t count, const unsigned int root, - const MPI_Comm & comm) + const MPI_Comm comm) { # ifndef DEAL_II_WITH_MPI (void)buffer; @@ -2233,7 +2233,7 @@ namespace Utilities template std::enable_if_t == false, T> - broadcast(const MPI_Comm & comm, + broadcast(const MPI_Comm comm, const T & object_to_send, const unsigned int root_process) { @@ -2283,7 +2283,7 @@ namespace Utilities template std::enable_if_t == true, T> - broadcast(const MPI_Comm & comm, + broadcast(const MPI_Comm comm, const T & object_to_send, const unsigned int root_process) { @@ -2441,9 +2441,9 @@ namespace Utilities # ifdef DEAL_II_WITH_MPI template std::pair::real_type> - mean_and_standard_deviation(const Iterator begin, - const Iterator end, - const MPI_Comm &comm) + mean_and_standard_deviation(const Iterator begin, + const Iterator end, + const MPI_Comm comm) { // below we do simple and straight-forward implementation. More elaborate // options are: diff --git a/include/deal.II/base/mpi.templates.h b/include/deal.II/base/mpi.templates.h index 794c7c9a33..947837cdc8 100644 --- a/include/deal.II/base/mpi.templates.h +++ b/include/deal.II/base/mpi.templates.h @@ -41,7 +41,7 @@ namespace Utilities void all_reduce(const MPI_Op & mpi_op, const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & output) { AssertDimension(values.size(), output.size()); @@ -102,7 +102,7 @@ namespace Utilities void all_reduce(const MPI_Op & mpi_op, const ArrayView> &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView> & output) { AssertDimension(values.size(), output.size()); @@ -141,7 +141,7 @@ namespace Utilities template T - sum(const T &t, const MPI_Comm &mpi_communicator) + sum(const T &t, const MPI_Comm mpi_communicator) { T return_value{}; internal::all_reduce(MPI_SUM, @@ -155,7 +155,7 @@ namespace Utilities template void - sum(const T &values, const MPI_Comm &mpi_communicator, U &sums) + sum(const T &values, const MPI_Comm mpi_communicator, U &sums) { static_assert(std::is_same::type, typename std::decay::type>::value, @@ -173,7 +173,7 @@ namespace Utilities template void sum(const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & sums) { internal::all_reduce(MPI_SUM, values, mpi_communicator, sums); @@ -183,7 +183,7 @@ namespace Utilities template Tensor - sum(const Tensor &t, const MPI_Comm &mpi_communicator) + sum(const Tensor &t, const MPI_Comm mpi_communicator) { // Copy the tensor into a C-style array with which we can then // call the other sum() function. @@ -204,7 +204,7 @@ namespace Utilities template SymmetricTensor sum(const SymmetricTensor &local, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { // Copy the tensor into a C-style array with which we can then // call the other sum() function. @@ -232,7 +232,7 @@ namespace Utilities template void sum(const SparseMatrix &local, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, SparseMatrix & global) { Assert( @@ -256,7 +256,7 @@ namespace Utilities template T - max(const T &t, const MPI_Comm &mpi_communicator) + max(const T &t, const MPI_Comm mpi_communicator) { T return_value{}; internal::all_reduce(MPI_MAX, @@ -270,7 +270,7 @@ namespace Utilities template void - max(const T &values, const MPI_Comm &mpi_communicator, U &maxima) + max(const T &values, const MPI_Comm mpi_communicator, U &maxima) { static_assert(std::is_same::type, typename std::decay::type>::value, @@ -288,7 +288,7 @@ namespace Utilities template void max(const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & maxima) { internal::all_reduce(MPI_MAX, values, mpi_communicator, maxima); @@ -298,7 +298,7 @@ namespace Utilities template T - min(const T &t, const MPI_Comm &mpi_communicator) + min(const T &t, const MPI_Comm mpi_communicator) { T return_value{}; internal::all_reduce(MPI_MIN, @@ -312,7 +312,7 @@ namespace Utilities template void - min(const T &values, const MPI_Comm &mpi_communicator, U &minima) + min(const T &values, const MPI_Comm mpi_communicator, U &minima) { static_assert(std::is_same::type, typename std::decay::type>::value, @@ -330,7 +330,7 @@ namespace Utilities template void min(const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & minima) { internal::all_reduce(MPI_MIN, values, mpi_communicator, minima); @@ -340,7 +340,7 @@ namespace Utilities template T - logical_or(const T &t, const MPI_Comm &mpi_communicator) + logical_or(const T &t, const MPI_Comm mpi_communicator) { static_assert(std::is_integral::value, "The MPI_LOR operation only allows integral data types."); @@ -357,7 +357,7 @@ namespace Utilities template void - logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results) + logical_or(const T &values, const MPI_Comm mpi_communicator, U &results) { static_assert(std::is_same::type, typename std::decay::type>::value, @@ -387,7 +387,7 @@ namespace Utilities template void logical_or(const ArrayView &values, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const ArrayView & results) { static_assert(std::is_integral::value, @@ -401,7 +401,7 @@ namespace Utilities template T reduce(const T & vec, - const MPI_Comm & comm, + const MPI_Comm comm, const std::function &combiner, const unsigned int root_process) { @@ -485,7 +485,7 @@ namespace Utilities template T all_reduce(const T & vec, - const MPI_Comm & comm, + const MPI_Comm comm, const std::function &combiner) { if (job_supports_mpi() && n_mpi_processes(comm) > 1) @@ -503,7 +503,7 @@ namespace Utilities template std::vector - compute_set_union(const std::vector &vec, const MPI_Comm &comm) + compute_set_union(const std::vector &vec, const MPI_Comm comm) { return Utilities::MPI::all_reduce>( vec, comm, [](const auto &set_1, const auto &set_2) { @@ -520,7 +520,7 @@ namespace Utilities template std::set - compute_set_union(const std::set &set_in, const MPI_Comm &comm) + compute_set_union(const std::set &set_in, const MPI_Comm comm) { // convert vector to set std::vector vector_in(set_in.begin(), set_in.end()); diff --git a/include/deal.II/base/mpi_compute_index_owner_internal.h b/include/deal.II/base/mpi_compute_index_owner_internal.h index ecba6c38b4..feab6d8a43 100644 --- a/include/deal.II/base/mpi_compute_index_owner_internal.h +++ b/include/deal.II/base/mpi_compute_index_owner_internal.h @@ -171,7 +171,7 @@ namespace Utilities * ranges to the owner of the dictionary part. */ void - reinit(const IndexSet &owned_indices, const MPI_Comm &comm); + reinit(const IndexSet &owned_indices, const MPI_Comm comm); /** * Translate a global dof index to the MPI rank in the dictionary @@ -203,7 +203,7 @@ namespace Utilities * the number of ranks. */ void - partition(const IndexSet &owned_indices, const MPI_Comm &comm); + partition(const IndexSet &owned_indices, const MPI_Comm comm); }; @@ -226,7 +226,7 @@ namespace Utilities */ ConsensusAlgorithmsPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, - const MPI_Comm &comm, + const MPI_Comm comm, std::vector &owning_ranks, const bool track_index_requests = false); diff --git a/include/deal.II/base/mpi_consensus_algorithms.h b/include/deal.II/base/mpi_consensus_algorithms.h index 6667de2826..c2b7fdd253 100644 --- a/include/deal.II/base/mpi_consensus_algorithms.h +++ b/include/deal.II/base/mpi_consensus_algorithms.h @@ -253,7 +253,7 @@ namespace Utilities */ DEAL_II_DEPRECATED Interface(Process &process, - const MPI_Comm & comm); + const MPI_Comm comm); /** * Destructor. Made `virtual` to ensure that one can work with @@ -283,7 +283,7 @@ namespace Utilities * that takes a number of `std::function` arguments. */ std::vector - run(Process &process, const MPI_Comm &comm); + run(Process &process, const MPI_Comm comm); /** * Run the consensus algorithm and return a vector of process ranks @@ -315,8 +315,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) = 0; + & process_answer, + const MPI_Comm comm) = 0; private: /** @@ -375,7 +375,7 @@ namespace Utilities * function that takes an argument. */ DEAL_II_DEPRECATED - NBX(Process &process, const MPI_Comm &comm); + NBX(Process &process, const MPI_Comm comm); /** * Destructor. @@ -395,8 +395,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) override; + & process_answer, + const MPI_Comm comm) override; private: #ifdef DEAL_II_WITH_MPI @@ -446,15 +446,15 @@ namespace Utilities bool all_locally_originated_receives_are_completed( const std::function - & process_answer, - const MPI_Comm &comm); + & process_answer, + const MPI_Comm comm); /** * Signal to all other ranks that this rank has received all request * answers via entering IBarrier. */ void - signal_finish(const MPI_Comm &comm); + signal_finish(const MPI_Comm comm); /** * Check whether all of the requests for answers that were created by @@ -473,7 +473,7 @@ namespace Utilities maybe_answer_one_request( const std::function &answer_request, - const MPI_Comm & comm); + const MPI_Comm comm); /** * Start to send all requests via ISend and post IRecvs for the incoming @@ -483,14 +483,14 @@ namespace Utilities start_communication( const std::vector & targets, const std::function &create_request, - const MPI_Comm & comm); + const MPI_Comm comm); /** * After all rank has received all answers, the MPI data structures can * be freed and the received answers can be processed. */ void - clean_up_and_end_communication(const MPI_Comm &comm); + clean_up_and_end_communication(const MPI_Comm comm); }; @@ -545,8 +545,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm); + & process_answer, + const MPI_Comm comm); /** * This function provides a specialization of the one above for @@ -590,8 +590,8 @@ namespace Utilities nbx(const std::vector & targets, const std::function &create_request, const std::function - & process_request, - const MPI_Comm &comm); + & process_request, + const MPI_Comm comm); /** * This class implements a concrete algorithm for the @@ -641,7 +641,7 @@ namespace Utilities * function that takes an argument. */ DEAL_II_DEPRECATED - PEX(Process &process, const MPI_Comm &comm); + PEX(Process &process, const MPI_Comm comm); /** * Destructor. @@ -661,8 +661,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) override; + & process_answer, + const MPI_Comm comm) override; private: #ifdef DEAL_II_WITH_MPI @@ -704,7 +704,7 @@ namespace Utilities start_communication( const std::vector & targets, const std::function &create_request, - const MPI_Comm & comm); + const MPI_Comm comm); /** * The `index`th request message from another rank has been received: @@ -715,7 +715,7 @@ namespace Utilities const unsigned int index, const std::function &answer_request, - const MPI_Comm & comm); + const MPI_Comm comm); /** * Receive and process all of the incoming responses to the @@ -725,8 +725,8 @@ namespace Utilities process_incoming_answers( const unsigned int n_targets, const std::function - & process_answer, - const MPI_Comm &comm); + & process_answer, + const MPI_Comm comm); /** * After all answers have been exchanged, the MPI data structures can be @@ -801,8 +801,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm); + & process_answer, + const MPI_Comm comm); /** * This function provides a specialization of the one above for @@ -846,8 +846,8 @@ namespace Utilities pex(const std::vector & targets, const std::function &create_request, const std::function - & process_request, - const MPI_Comm &comm); + & process_request, + const MPI_Comm comm); /** @@ -876,7 +876,7 @@ namespace Utilities * function that takes an argument. */ DEAL_II_DEPRECATED - Serial(Process &process, const MPI_Comm &comm); + Serial(Process &process, const MPI_Comm comm); // Import the declarations from the base class. using Interface::run; @@ -891,8 +891,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) override; + & process_answer, + const MPI_Comm comm) override; }; @@ -937,8 +937,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm); + & process_answer, + const MPI_Comm comm); /** * This function provides a specialization of the one above for @@ -975,8 +975,8 @@ namespace Utilities const std::vector & targets, const std::function &create_request, const std::function - & process_request, - const MPI_Comm &comm); + & process_request, + const MPI_Comm comm); @@ -1015,7 +1015,7 @@ namespace Utilities */ DEAL_II_DEPRECATED Selector(Process &process, - const MPI_Comm & comm); + const MPI_Comm comm); /** * Destructor. @@ -1037,8 +1037,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) override; + & process_answer, + const MPI_Comm comm) override; private: // Pointer to the actual ConsensusAlgorithms::Interface implementation. @@ -1099,8 +1099,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm); + & process_answer, + const MPI_Comm comm); /** * This function provides a specialization of the one above for @@ -1145,8 +1145,8 @@ namespace Utilities const std::vector & targets, const std::function &create_request, const std::function - & process_request, - const MPI_Comm &comm); + & process_request, + const MPI_Comm comm); /** @@ -1231,8 +1231,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { return NBX().run( targets, create_request, answer_request, process_answer, comm); @@ -1245,8 +1245,8 @@ namespace Utilities nbx(const std::vector & targets, const std::function &create_request, const std::function - & process_request, - const MPI_Comm &comm) + & process_request, + const MPI_Comm comm) { // TODO: For the moment, simply implement this special case by // forwarding to the other function with rewritten function @@ -1283,8 +1283,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { return PEX().run( targets, create_request, answer_request, process_answer, comm); @@ -1297,8 +1297,8 @@ namespace Utilities pex(const std::vector & targets, const std::function &create_request, const std::function - & process_request, - const MPI_Comm &comm) + & process_request, + const MPI_Comm comm) { // TODO: For the moment, simply implement this special case by // forwarding to the other function with rewritten function @@ -1336,8 +1336,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { return Serial().run( targets, create_request, answer_request, process_answer, comm); @@ -1351,8 +1351,8 @@ namespace Utilities const std::vector & targets, const std::function &create_request, const std::function - & process_request, - const MPI_Comm &comm) + & process_request, + const MPI_Comm comm) { // TODO: For the moment, simply implement this special case by // forwarding to the other function with rewritten function @@ -1390,8 +1390,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { return Selector().run( targets, create_request, answer_request, process_answer, comm); @@ -1405,8 +1405,8 @@ namespace Utilities const std::vector & targets, const std::function &create_request, const std::function - & process_request, - const MPI_Comm &comm) + & process_request, + const MPI_Comm comm) { // TODO: For the moment, simply implement this special case by // forwarding to the other function with rewritten function @@ -1554,7 +1554,7 @@ namespace Utilities * Handle exceptions inside the ConsensusAlgorithm::run() functions. */ inline void - handle_exception(std::exception_ptr &&exception, const MPI_Comm &comm) + handle_exception(std::exception_ptr &&exception, const MPI_Comm comm) { # ifdef DEAL_II_WITH_MPI // an exception within a ConsensusAlgorithm likely causes an @@ -1664,7 +1664,7 @@ namespace Utilities template Interface::Interface( Process &process, - const MPI_Comm & comm) + const MPI_Comm comm) : process(&process) , comm(comm) {} @@ -1697,7 +1697,7 @@ namespace Utilities std::vector Interface::run( Process &process, - const MPI_Comm & comm) + const MPI_Comm comm) { // Unpack the 'process' object and call the function that takes // function objects for all operations. @@ -1727,7 +1727,7 @@ namespace Utilities template NBX::NBX( Process &process, - const MPI_Comm & comm) + const MPI_Comm comm) : Interface(process, comm) {} @@ -1741,8 +1741,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { Assert(has_unique_elements(targets), ExcMessage("The consensus algorithms expect that each process " @@ -1805,7 +1805,7 @@ namespace Utilities NBX::start_communication( const std::vector & targets, const std::function &create_request, - const MPI_Comm & comm) + const MPI_Comm comm) { # ifdef DEAL_II_WITH_MPI // 1) @@ -1859,8 +1859,8 @@ namespace Utilities NBX:: all_locally_originated_receives_are_completed( const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { # ifdef DEAL_II_WITH_MPI // We know that all requests have come in when we have pending @@ -1953,8 +1953,8 @@ namespace Utilities void NBX::maybe_answer_one_request( const std::function - & answer_request, - const MPI_Comm &comm) + & answer_request, + const MPI_Comm comm) { # ifdef DEAL_II_WITH_MPI @@ -2035,7 +2035,7 @@ namespace Utilities template void - NBX::signal_finish(const MPI_Comm &comm) + NBX::signal_finish(const MPI_Comm comm) { # ifdef DEAL_II_WITH_MPI const auto ierr = MPI_Ibarrier(comm, &barrier_request); @@ -2069,7 +2069,7 @@ namespace Utilities template void NBX::clean_up_and_end_communication( - const MPI_Comm &comm) + const MPI_Comm comm) { (void)comm; # ifdef DEAL_II_WITH_MPI @@ -2107,7 +2107,7 @@ namespace Utilities template PEX::PEX( Process &process, - const MPI_Comm & comm) + const MPI_Comm comm) : Interface(process, comm) {} @@ -2121,8 +2121,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { Assert(has_unique_elements(targets), ExcMessage("The consensus algorithms expect that each process " @@ -2166,7 +2166,7 @@ namespace Utilities PEX::start_communication( const std::vector & targets, const std::function &create_request, - const MPI_Comm & comm) + const MPI_Comm comm) { # ifdef DEAL_II_WITH_MPI const int tag_request = Utilities::MPI::internal::Tags:: @@ -2226,8 +2226,8 @@ namespace Utilities PEX::answer_one_request( const unsigned int index, const std::function - & answer_request, - const MPI_Comm &comm) + & answer_request, + const MPI_Comm comm) { # ifdef DEAL_II_WITH_MPI const int tag_request = Utilities::MPI::internal::Tags:: @@ -2300,8 +2300,8 @@ namespace Utilities PEX::process_incoming_answers( const unsigned int n_targets, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { # ifdef DEAL_II_WITH_MPI const int tag_deliver = Utilities::MPI::internal::Tags:: @@ -2387,7 +2387,7 @@ namespace Utilities template Serial::Serial( Process &process, - const MPI_Comm & comm) + const MPI_Comm comm) : Interface(process, comm) {} @@ -2401,8 +2401,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { (void)comm; Assert((Utilities::MPI::job_supports_mpi() == false) || @@ -2441,7 +2441,7 @@ namespace Utilities template Selector::Selector( Process &process, - const MPI_Comm & comm) + const MPI_Comm comm) : Interface(process, comm) {} @@ -2455,8 +2455,8 @@ namespace Utilities const std::function &answer_request, const std::function - & process_answer, - const MPI_Comm &comm) + & process_answer, + const MPI_Comm comm) { // Depending on the number of processes we switch between // implementations. We reduce the threshold for debug mode to be diff --git a/include/deal.II/base/mpi_noncontiguous_partitioner.h b/include/deal.II/base/mpi_noncontiguous_partitioner.h index 8b62a5f9b3..9bbea5888c 100644 --- a/include/deal.II/base/mpi_noncontiguous_partitioner.h +++ b/include/deal.II/base/mpi_noncontiguous_partitioner.h @@ -61,7 +61,7 @@ namespace Utilities */ NoncontiguousPartitioner(const IndexSet &indexset_locally_owned, const IndexSet &indexset_ghost, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Constructor. Same as above but for vectors of indices @p indices_locally_owned @@ -75,7 +75,7 @@ namespace Utilities NoncontiguousPartitioner( const std::vector &indices_locally_owned, const std::vector &indices_ghost, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Fill the vector @p ghost_array according to the precomputed communication @@ -197,7 +197,7 @@ namespace Utilities void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, - const MPI_Comm &communicator) override; + const MPI_Comm communicator) override; /** * Initialize the inner data structures using explicit sets of @@ -207,7 +207,7 @@ namespace Utilities void reinit(const std::vector &locally_owned_indices, const std::vector &ghost_indices, - const MPI_Comm & communicator); + const MPI_Comm communicator); private: /** diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index e5bb2703e7..23d1b1bd82 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -224,7 +224,7 @@ namespace Utilities */ Partitioner(const types::global_dof_index local_size, const types::global_dof_index ghost_size, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Constructor with index set arguments. This constructor creates a @@ -235,7 +235,7 @@ namespace Utilities */ Partitioner(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices_in, - const MPI_Comm &communicator_in); + const MPI_Comm communicator_in); /** * Constructor with one index set argument. This constructor creates a @@ -245,7 +245,7 @@ namespace Utilities * constructor with two index sets. */ Partitioner(const IndexSet &locally_owned_indices, - const MPI_Comm &communicator_in); + const MPI_Comm communicator_in); /** * Reinitialize the communication pattern. The first argument @@ -257,7 +257,7 @@ namespace Utilities virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator) override; + const MPI_Comm communicator) override; /** * Set the locally owned indices. Used in the constructor. diff --git a/include/deal.II/base/process_grid.h b/include/deal.II/base/process_grid.h index 62af8f341d..fe8a289af2 100644 --- a/include/deal.II/base/process_grid.h +++ b/include/deal.II/base/process_grid.h @@ -71,7 +71,7 @@ namespace Utilities * number of cores * in the @p mpi_communicator. */ - ProcessGrid(const MPI_Comm & mpi_communicator, + ProcessGrid(const MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns); @@ -92,7 +92,7 @@ namespace Utilities * and the @p mpi_communicator with 11 cores will result in the $3x3$ * process grid. */ - ProcessGrid(const MPI_Comm & mpi_communicator, + ProcessGrid(const MPI_Comm mpi_communicator, const unsigned int n_rows_matrix, const unsigned int n_columns_matrix, const unsigned int row_block_size, @@ -151,7 +151,7 @@ namespace Utilities * A private constructor which takes grid dimensions as an * std::pair. */ - ProcessGrid(const MPI_Comm & mpi_communicator, + ProcessGrid(const MPI_Comm mpi_communicator, const std::pair &grid_dimensions); /** diff --git a/include/deal.II/base/table.h b/include/deal.II/base/table.h index 1d117f669d..50d1611b87 100644 --- a/include/deal.II/base/table.h +++ b/include/deal.II/base/table.h @@ -725,7 +725,7 @@ public: * the destructor is called. */ void - replicate_across_communicator(const MPI_Comm & communicator, + replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process); /** @@ -2385,7 +2385,7 @@ TableBase::fill(const T &value) template inline void -TableBase::replicate_across_communicator(const MPI_Comm & communicator, +TableBase::replicate_across_communicator(const MPI_Comm communicator, const unsigned int root_process) { // Replicate first the actual data, then also exchange the diff --git a/include/deal.II/base/timer.h b/include/deal.II/base/timer.h index 30c9aa50e0..a2eb0fefbc 100644 --- a/include/deal.II/base/timer.h +++ b/include/deal.II/base/timer.h @@ -136,7 +136,7 @@ public: * communicator occurs; the extra cost of the synchronization is not * measured. */ - Timer(const MPI_Comm &mpi_communicator, const bool sync_lap_times = false); + Timer(const MPI_Comm mpi_communicator, const bool sync_lap_times = false); /** * Return a reference to the data structure containing basic statistics on @@ -711,7 +711,7 @@ public: * MPI_Barrier call before starting and stopping the timer for * each section. */ - TimerOutput(const MPI_Comm & mpi_comm, + TimerOutput(const MPI_Comm mpi_comm, std::ostream & stream, const OutputFrequency output_frequency, const OutputType output_type); @@ -739,7 +739,7 @@ public: * MPI_Barrier call before starting and stopping the timer for * each section.) */ - TimerOutput(const MPI_Comm & mpi_comm, + TimerOutput(const MPI_Comm mpi_comm, ConditionalOStream & stream, const OutputFrequency output_frequency, const OutputType output_type); @@ -795,8 +795,8 @@ public: * median is given). */ void - print_wall_time_statistics(const MPI_Comm &mpi_comm, - const double print_quantile = 0.) const; + print_wall_time_statistics(const MPI_Comm mpi_comm, + const double print_quantile = 0.) const; /** * By calling this function, all output can be disabled. This function diff --git a/include/deal.II/distributed/fully_distributed_tria.h b/include/deal.II/distributed/fully_distributed_tria.h index c156eeabf7..518417dab4 100644 --- a/include/deal.II/distributed/fully_distributed_tria.h +++ b/include/deal.II/distributed/fully_distributed_tria.h @@ -129,7 +129,7 @@ namespace parallel * @param mpi_communicator The MPI communicator to be used for the * triangulation. */ - explicit Triangulation(const MPI_Comm &mpi_communicator); + explicit Triangulation(const MPI_Comm mpi_communicator); /** * Destructor. diff --git a/include/deal.II/distributed/grid_refinement.h b/include/deal.II/distributed/grid_refinement.h index 1aef1a106b..b27341cc9b 100644 --- a/include/deal.II/distributed/grid_refinement.h +++ b/include/deal.II/distributed/grid_refinement.h @@ -47,7 +47,7 @@ namespace internal std::pair compute_global_min_and_max_at_root( const dealii::Vector &criteria, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); namespace RefineAndCoarsenFixedNumber { @@ -60,7 +60,7 @@ namespace internal compute_threshold(const dealii::Vector & criteria, const std::pair &global_min_and_max, const types::global_cell_index n_target_cells, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); } // namespace RefineAndCoarsenFixedNumber namespace RefineAndCoarsenFixedFraction @@ -78,7 +78,7 @@ namespace internal compute_threshold(const dealii::Vector & criteria, const std::pair &global_min_and_max, const double target_error, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); } // namespace RefineAndCoarsenFixedFraction } // namespace GridRefinement } // namespace distributed diff --git a/include/deal.II/distributed/shared_tria.h b/include/deal.II/distributed/shared_tria.h index a08381b1e7..a1ecc929ef 100644 --- a/include/deal.II/distributed/shared_tria.h +++ b/include/deal.II/distributed/shared_tria.h @@ -257,7 +257,7 @@ namespace parallel * consider enabling artificial cells. */ Triangulation( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation::MeshSmoothing = (dealii::Triangulation::none), const bool allow_artificial_cells = false, diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index 09cb4b760c..1437fd526a 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -367,7 +367,7 @@ namespace parallel * triangulation is partitioned. */ explicit Triangulation( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation::MeshSmoothing smooth_grid = (dealii::Triangulation::none), const Settings settings = default_setting); @@ -879,7 +879,7 @@ namespace parallel * the triangulation. */ Triangulation( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation<1, spacedim>::MeshSmoothing smooth_grid = (dealii::Triangulation<1, spacedim>::none), const Settings settings = default_setting); @@ -1022,7 +1022,7 @@ namespace parallel * constructed (see also the class documentation). */ explicit Triangulation( - const MPI_Comm & /*mpi_communicator*/, + const MPI_Comm /*mpi_communicator*/, const typename dealii::Triangulation::MeshSmoothing /*smooth_grid*/ = (dealii::Triangulation::none), diff --git a/include/deal.II/distributed/tria_base.h b/include/deal.II/distributed/tria_base.h index 3377c2983e..08a8ee9c76 100644 --- a/include/deal.II/distributed/tria_base.h +++ b/include/deal.II/distributed/tria_base.h @@ -83,7 +83,7 @@ namespace parallel * Constructor. */ TriangulationBase( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation::MeshSmoothing smooth_grid = (dealii::Triangulation::none), const bool check_for_distorted_cells = false); @@ -441,7 +441,7 @@ namespace parallel * Constructor. */ DistributedTriangulationBase( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation::MeshSmoothing smooth_grid = (dealii::Triangulation::none), const bool check_for_distorted_cells = false); @@ -776,7 +776,7 @@ namespace parallel class DataTransfer { public: - DataTransfer(const MPI_Comm &mpi_communicator); + DataTransfer(const MPI_Comm mpi_communicator); /** * Prepare data transfer by calling the pack callback functions on each diff --git a/include/deal.II/dofs/number_cache.h b/include/deal.II/dofs/number_cache.h index 8bd8fc4604..4c3166e3a3 100644 --- a/include/deal.II/dofs/number_cache.h +++ b/include/deal.II/dofs/number_cache.h @@ -117,7 +117,7 @@ namespace internal */ std::vector get_n_locally_owned_dofs_per_processor( - const MPI_Comm &mpi_communicator) const; + const MPI_Comm mpi_communicator) const; /** * Return a representation of @p locally_owned_dofs_per_processor both @@ -128,7 +128,7 @@ namespace internal */ std::vector get_locally_owned_dofs_per_processor( - const MPI_Comm &mpi_communicator) const; + const MPI_Comm mpi_communicator) const; /** * Total number of dofs, accumulated over all processors that may diff --git a/include/deal.II/fe/fe_tools_interpolate.templates.h b/include/deal.II/fe/fe_tools_interpolate.templates.h index 47f312a32e..394b44467a 100644 --- a/include/deal.II/fe/fe_tools_interpolate.templates.h +++ b/include/deal.II/fe/fe_tools_interpolate.templates.h @@ -461,7 +461,7 @@ namespace FETools { if (u1.n_blocks() == 0) return; - const MPI_Comm &mpi_communicator = u1.block(0).get_mpi_communicator(); + const MPI_Comm mpi_communicator = u1.block(0).get_mpi_communicator(); const IndexSet &dof2_locally_owned_dofs = dof2.locally_owned_dofs(); IndexSet dof2_locally_relevant_dofs; DoFTools::extract_locally_relevant_dofs(dof2, dof2_locally_relevant_dofs); diff --git a/include/deal.II/grid/grid_tools.h b/include/deal.II/grid/grid_tools.h index c4924af3f1..45a1ee3c97 100644 --- a/include/deal.II/grid/grid_tools.h +++ b/include/deal.II/grid/grid_tools.h @@ -3417,7 +3417,7 @@ namespace GridTools std::vector>> exchange_local_bounding_boxes( const std::vector> &local_bboxes, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); /** * In this collective operation each process provides a vector @@ -3455,7 +3455,7 @@ namespace GridTools RTree, unsigned int>> build_global_description_tree( const std::vector> &local_description, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); /** * Collect for a given triangulation all locally relevant vertices that diff --git a/include/deal.II/grid/tria_description.h b/include/deal.II/grid/tria_description.h index c1c4d344a0..47a57bd95b 100644 --- a/include/deal.II/grid/tria_description.h +++ b/include/deal.II/grid/tria_description.h @@ -528,7 +528,7 @@ namespace TriangulationDescription Description create_description_from_triangulation( const dealii::Triangulation &tria, - const MPI_Comm & comm, + const MPI_Comm comm, const TriangulationDescription::Settings settings = TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in = numbers::invalid_unsigned_int); @@ -641,9 +641,9 @@ namespace TriangulationDescription const std::function &)> & serial_grid_generator, const std::function &, - const MPI_Comm &, + const MPI_Comm, const unsigned int)> &serial_grid_partitioner, - const MPI_Comm & comm, + const MPI_Comm comm, const int group_size = 1, const typename Triangulation::MeshSmoothing smoothing = dealii::Triangulation::none, diff --git a/include/deal.II/lac/affine_constraints.h b/include/deal.II/lac/affine_constraints.h index 4e23d2bcb6..f295f425f6 100644 --- a/include/deal.II/lac/affine_constraints.h +++ b/include/deal.II/lac/affine_constraints.h @@ -795,7 +795,7 @@ public: * AffineConstraints was created for the DG case. */ bool - is_closed(const MPI_Comm &comm) const; + is_closed(const MPI_Comm comm) const; /** * Merge the constraints represented by the object given as argument into @@ -1758,7 +1758,7 @@ public: bool is_consistent_in_parallel(const std::vector &locally_owned_dofs, const IndexSet & locally_active_dofs, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const bool verbose = false) const; /** diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index 340bfdc64d..6fca2a675b 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -107,7 +107,7 @@ bool AffineConstraints::is_consistent_in_parallel( const std::vector &locally_owned_dofs, const IndexSet & locally_active_dofs, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const bool verbose) const { // Helper to return a ConstraintLine object that belongs to row @p row. @@ -939,7 +939,7 @@ AffineConstraints::is_closed() const template bool -AffineConstraints::is_closed(const MPI_Comm &comm) const +AffineConstraints::is_closed(const MPI_Comm comm) const { return Utilities::MPI::min(static_cast(is_closed()), comm) == 1; } diff --git a/include/deal.II/lac/block_sparsity_pattern.h b/include/deal.II/lac/block_sparsity_pattern.h index 156dc64b76..748835362b 100644 --- a/include/deal.II/lac/block_sparsity_pattern.h +++ b/include/deal.II/lac/block_sparsity_pattern.h @@ -696,7 +696,7 @@ namespace TrilinosWrappers * to be saved in each block. */ BlockSparsityPattern(const std::vector ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Initialize the pattern with two arrays of index sets that specify rows @@ -713,7 +713,7 @@ namespace TrilinosWrappers const std::vector &row_parallel_partitioning, const std::vector &column_parallel_partitioning, const std::vector &writeable_rows, - const MPI_Comm & communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Resize the matrix to a tensor product of matrices with dimensions @@ -734,7 +734,7 @@ namespace TrilinosWrappers */ void reinit(const std::vector ¶llel_partitioning, - const MPI_Comm & communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Resize the matrix to a rectangular block matrices. This method allows @@ -744,7 +744,7 @@ namespace TrilinosWrappers void reinit(const std::vector &row_parallel_partitioning, const std::vector &column_parallel_partitioning, - const MPI_Comm & communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Resize the matrix to a rectangular block matrices that furthermore @@ -758,7 +758,7 @@ namespace TrilinosWrappers reinit(const std::vector &row_parallel_partitioning, const std::vector &column_parallel_partitioning, const std::vector &writeable_rows, - const MPI_Comm & communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Allow the use of the reinit functions of the base class as well. diff --git a/include/deal.II/lac/la_parallel_block_vector.h b/include/deal.II/lac/la_parallel_block_vector.h index a0d1fdb129..659a22a7f3 100644 --- a/include/deal.II/lac/la_parallel_block_vector.h +++ b/include/deal.II/lac/la_parallel_block_vector.h @@ -168,13 +168,13 @@ namespace LinearAlgebra */ BlockVector(const std::vector &local_ranges, const std::vector &ghost_indices, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Same as above but the ghost indices are assumed to be empty. */ BlockVector(const std::vector &local_ranges, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Destructor. @@ -319,14 +319,14 @@ namespace LinearAlgebra void reinit(const std::vector &local_ranges, const std::vector &ghost_indices, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Same as above, but without ghost entries. */ void reinit(const std::vector &local_ranges, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * This function copies the data that has accumulated in the data buffer diff --git a/include/deal.II/lac/la_parallel_block_vector.templates.h b/include/deal.II/lac/la_parallel_block_vector.templates.h index bdb96d022b..14edd312df 100644 --- a/include/deal.II/lac/la_parallel_block_vector.templates.h +++ b/include/deal.II/lac/la_parallel_block_vector.templates.h @@ -60,7 +60,7 @@ namespace LinearAlgebra template BlockVector::BlockVector(const std::vector &local_ranges, const std::vector &ghost_indices, - const MPI_Comm & communicator) + const MPI_Comm communicator) { reinit(local_ranges, ghost_indices, communicator); } @@ -68,7 +68,7 @@ namespace LinearAlgebra template BlockVector::BlockVector(const std::vector &local_ranges, - const MPI_Comm & communicator) + const MPI_Comm communicator) { reinit(local_ranges, communicator); } @@ -149,7 +149,7 @@ namespace LinearAlgebra void BlockVector::reinit(const std::vector &local_ranges, const std::vector &ghost_indices, - const MPI_Comm & communicator) + const MPI_Comm communicator) { AssertDimension(local_ranges.size(), ghost_indices.size()); @@ -172,7 +172,7 @@ namespace LinearAlgebra template void BlockVector::reinit(const std::vector &local_ranges, - const MPI_Comm & communicator) + const MPI_Comm communicator) { // update the number of blocks this->block_indices.reinit(local_ranges.size(), 0); diff --git a/include/deal.II/lac/la_parallel_vector.h b/include/deal.II/lac/la_parallel_vector.h index 8ceb65c44f..d03461cf5d 100644 --- a/include/deal.II/lac/la_parallel_vector.h +++ b/include/deal.II/lac/la_parallel_vector.h @@ -316,12 +316,12 @@ namespace LinearAlgebra */ Vector(const IndexSet &local_range, const IndexSet &ghost_indices, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Same constructor as above but without any ghost indices. */ - Vector(const IndexSet &local_range, const MPI_Comm &communicator); + Vector(const IndexSet &local_range, const MPI_Comm communicator); /** * Create the vector based on the parallel partitioning described in @p @@ -378,13 +378,13 @@ namespace LinearAlgebra void reinit(const IndexSet &local_range, const IndexSet &ghost_indices, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Same as above, but without ghost entries. */ void - reinit(const IndexSet &local_range, const MPI_Comm &communicator); + reinit(const IndexSet &local_range, const MPI_Comm communicator); /** * Initialize the vector given to the parallel partitioning described in @@ -401,7 +401,7 @@ namespace LinearAlgebra void reinit( const std::shared_ptr &partitioner, - const MPI_Comm &comm_sm = MPI_COMM_SELF); + const MPI_Comm comm_sm = MPI_COMM_SELF); /** * Initialize vector with @p local_size locally-owned and @p ghost_size @@ -425,8 +425,8 @@ namespace LinearAlgebra void reinit(const types::global_dof_index local_size, const types::global_dof_index ghost_size, - const MPI_Comm & comm, - const MPI_Comm & comm_sm = MPI_COMM_SELF); + const MPI_Comm comm, + const MPI_Comm comm_sm = MPI_COMM_SELF); /** * Swap the contents of this vector and the other vector @p v. One could @@ -1419,7 +1419,7 @@ namespace LinearAlgebra */ void resize_val(const size_type new_allocated_size, - const MPI_Comm &comm_sm = MPI_COMM_SELF); + const MPI_Comm comm_sm = MPI_COMM_SELF); // Make all other vector types friends. template diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index a0de1a320e..fe89e965f5 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -95,7 +95,7 @@ namespace LinearAlgebra types::global_dof_index & /*allocated_size*/, ::dealii::MemorySpace::MemorySpaceData & /*data*/, - const MPI_Comm & /*comm_sm*/) + const MPI_Comm /*comm_sm*/) {} static void @@ -130,7 +130,7 @@ namespace LinearAlgebra types::global_dof_index & allocated_size, ::dealii::MemorySpace:: MemorySpaceData &data, - const MPI_Comm &comm_shared) + const MPI_Comm comm_shared) { if (comm_shared == MPI_COMM_SELF) { @@ -328,8 +328,8 @@ namespace LinearAlgebra types::global_dof_index & allocated_size, ::dealii::MemorySpace::MemorySpaceData - & data, - const MPI_Comm &comm_sm) + & data, + const MPI_Comm comm_sm) { (void)comm_sm; @@ -521,7 +521,7 @@ namespace LinearAlgebra template void Vector::resize_val(const size_type new_alloc_size, - const MPI_Comm &comm_sm) + const MPI_Comm comm_sm) { internal::la_parallel_vector_templates_functions< Number, @@ -567,8 +567,8 @@ namespace LinearAlgebra Vector::reinit( const types::global_dof_index local_size, const types::global_dof_index ghost_size, - const MPI_Comm & comm, - const MPI_Comm & comm_sm) + const MPI_Comm comm, + const MPI_Comm comm_sm) { clear_mpi_requests(); @@ -637,7 +637,7 @@ namespace LinearAlgebra Vector::reinit( const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, - const MPI_Comm &communicator) + const MPI_Comm communicator) { // set up parallel partitioner with index sets and communicator reinit(std::make_shared( @@ -650,7 +650,7 @@ namespace LinearAlgebra void Vector::reinit( const IndexSet &locally_owned_indices, - const MPI_Comm &communicator) + const MPI_Comm communicator) { // set up parallel partitioner with index sets and communicator reinit( @@ -664,7 +664,7 @@ namespace LinearAlgebra void Vector::reinit( const std::shared_ptr &partitioner_in, - const MPI_Comm & comm_sm) + const MPI_Comm comm_sm) { clear_mpi_requests(); @@ -737,7 +737,7 @@ namespace LinearAlgebra template Vector::Vector(const IndexSet &local_range, const IndexSet &ghost_indices, - const MPI_Comm &communicator) + const MPI_Comm communicator) : allocated_size(0) , vector_is_ghosted(false) , comm_sm(MPI_COMM_SELF) @@ -749,7 +749,7 @@ namespace LinearAlgebra template Vector::Vector(const IndexSet &local_range, - const MPI_Comm &communicator) + const MPI_Comm communicator) : allocated_size(0) , vector_is_ghosted(false) , comm_sm(MPI_COMM_SELF) diff --git a/include/deal.II/lac/parpack_solver.h b/include/deal.II/lac/parpack_solver.h index 99ef5fb0ad..d4fdacc611 100644 --- a/include/deal.II/lac/parpack_solver.h +++ b/include/deal.II/lac/parpack_solver.h @@ -295,7 +295,7 @@ public: * Constructor. */ PArpackSolver(SolverControl & control, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); /** @@ -638,7 +638,7 @@ PArpackSolver::AdditionalData::AdditionalData( template PArpackSolver::PArpackSolver(SolverControl & control, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : solver_control(control) , additional_data(data) diff --git a/include/deal.II/lac/petsc_block_sparse_matrix.h b/include/deal.II/lac/petsc_block_sparse_matrix.h index cf785e3b47..3bb81f9189 100644 --- a/include/deal.II/lac/petsc_block_sparse_matrix.h +++ b/include/deal.II/lac/petsc_block_sparse_matrix.h @@ -172,7 +172,7 @@ namespace PETScWrappers reinit(const std::vector & rows, const std::vector & cols, const BlockDynamicSparsityPattern &bdsp, - const MPI_Comm & com); + const MPI_Comm com); /** @@ -181,7 +181,7 @@ namespace PETScWrappers void reinit(const std::vector & sizes, const BlockDynamicSparsityPattern &bdsp, - const MPI_Comm & com); + const MPI_Comm com); /** diff --git a/include/deal.II/lac/petsc_block_vector.h b/include/deal.II/lac/petsc_block_vector.h index 9bec0c860d..94e4a1aa76 100644 --- a/include/deal.II/lac/petsc_block_vector.h +++ b/include/deal.II/lac/petsc_block_vector.h @@ -95,7 +95,7 @@ namespace PETScWrappers * present process. */ explicit BlockVector(const unsigned int n_blocks, - const MPI_Comm & communicator, + const MPI_Comm communicator, const size_type block_size, const size_type locally_owned_size); @@ -113,7 +113,7 @@ namespace PETScWrappers * process. */ BlockVector(const std::vector &block_sizes, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &local_elements); /** @@ -121,14 +121,14 @@ namespace PETScWrappers * initialized with the given IndexSet. */ explicit BlockVector(const std::vector ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Same as above, but include ghost elements */ BlockVector(const std::vector ¶llel_partitioning, const std::vector &ghost_indices, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Create a BlockVector with a PETSc Vec @@ -181,7 +181,7 @@ namespace PETScWrappers */ void reinit(const unsigned int n_blocks, - const MPI_Comm & communicator, + const MPI_Comm communicator, const size_type block_size, const size_type locally_owned_size, const bool omit_zeroing_entries = false); @@ -208,7 +208,7 @@ namespace PETScWrappers */ void reinit(const std::vector &block_sizes, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &locally_owned_sizes, const bool omit_zeroing_entries = false); @@ -235,7 +235,7 @@ namespace PETScWrappers */ void reinit(const std::vector ¶llel_partitioning, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Same as above but include ghost entries. @@ -243,7 +243,7 @@ namespace PETScWrappers void reinit(const std::vector ¶llel_partitioning, const std::vector &ghost_entries, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * This function collects the sizes of the sub-objects and stores them @@ -370,7 +370,7 @@ namespace PETScWrappers inline BlockVector::BlockVector(const unsigned int n_blocks, - const MPI_Comm & communicator, + const MPI_Comm communicator, const size_type block_size, const size_type locally_owned_size) : BlockVector() @@ -382,7 +382,7 @@ namespace PETScWrappers inline BlockVector::BlockVector( const std::vector &block_sizes, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &local_elements) : BlockVector() { @@ -407,7 +407,7 @@ namespace PETScWrappers inline BlockVector::BlockVector( const std::vector ¶llel_partitioning, - const MPI_Comm & communicator) + const MPI_Comm communicator) : BlockVector() { reinit(parallel_partitioning, communicator); @@ -418,7 +418,7 @@ namespace PETScWrappers inline BlockVector::BlockVector( const std::vector ¶llel_partitioning, const std::vector &ghost_indices, - const MPI_Comm & communicator) + const MPI_Comm communicator) : BlockVector() { reinit(parallel_partitioning, ghost_indices, communicator); @@ -481,7 +481,7 @@ namespace PETScWrappers inline void BlockVector::reinit(const unsigned int n_blocks, - const MPI_Comm & communicator, + const MPI_Comm communicator, const size_type block_size, const size_type locally_owned_size, const bool omit_zeroing_entries) @@ -496,7 +496,7 @@ namespace PETScWrappers inline void BlockVector::reinit(const std::vector &block_sizes, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &locally_owned_sizes, const bool omit_zeroing_entries) { @@ -530,7 +530,7 @@ namespace PETScWrappers inline void BlockVector::reinit(const std::vector ¶llel_partitioning, - const MPI_Comm & communicator) + const MPI_Comm communicator) { // update the number of blocks this->block_indices.reinit(parallel_partitioning.size(), 0); @@ -549,7 +549,7 @@ namespace PETScWrappers inline void BlockVector::reinit(const std::vector ¶llel_partitioning, const std::vector &ghost_entries, - const MPI_Comm & communicator) + const MPI_Comm communicator) { AssertDimension(parallel_partitioning.size(), ghost_entries.size()); diff --git a/include/deal.II/lac/petsc_communication_pattern.h b/include/deal.II/lac/petsc_communication_pattern.h index 619801e2c6..532e89b136 100644 --- a/include/deal.II/lac/petsc_communication_pattern.h +++ b/include/deal.II/lac/petsc_communication_pattern.h @@ -62,7 +62,7 @@ namespace PETScWrappers virtual void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, - const MPI_Comm &communicator) override; + const MPI_Comm communicator) override; /** * Reinitialize the communication pattern. The argument @p indices_locally_owned @@ -80,7 +80,7 @@ namespace PETScWrappers void reinit(const std::vector &indices_locally_owned, const std::vector &indices_want, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Reinitialization that takes the number of locally-owned degrees of @@ -98,7 +98,7 @@ namespace PETScWrappers void reinit(const types::global_dof_index local_size, const IndexSet & ghost_indices, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Fill the vector @p ghost_array according to the precomputed communication @@ -202,7 +202,7 @@ namespace PETScWrappers const std::vector &inloc, const std::vector &outidx, const std::vector &outloc, - const MPI_Comm & communicator); + const MPI_Comm communicator); }; /** @@ -238,7 +238,7 @@ namespace PETScWrappers virtual void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, - const MPI_Comm &communicator) override; + const MPI_Comm communicator) override; /** * Reinitialize the partitioner. As for the Utilities::MPI::Partitioner, @@ -251,7 +251,7 @@ namespace PETScWrappers reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, const IndexSet &larger_ghost_indices, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Return the actual number of ghost indices. diff --git a/include/deal.II/lac/petsc_matrix_free.h b/include/deal.II/lac/petsc_matrix_free.h index 3b5b723f10..41e332cd6a 100644 --- a/include/deal.II/lac/petsc_matrix_free.h +++ b/include/deal.II/lac/petsc_matrix_free.h @@ -78,7 +78,7 @@ namespace PETScWrappers * any estimation of non_zero entries and has no option * is_symmetric. */ - MatrixFree(const MPI_Comm & communicator, + MatrixFree(const MPI_Comm communicator, const unsigned int m, const unsigned int n, const unsigned int local_rows, @@ -95,7 +95,7 @@ namespace PETScWrappers * any estimation of non_zero entries and has no option * is_symmetric. */ - MatrixFree(const MPI_Comm & communicator, + MatrixFree(const MPI_Comm communicator, const unsigned int m, const unsigned int n, const std::vector &local_rows_per_process, @@ -129,7 +129,7 @@ namespace PETScWrappers * the same argument list as the present function. */ void - reinit(const MPI_Comm & communicator, + reinit(const MPI_Comm communicator, const unsigned int m, const unsigned int n, const unsigned int local_rows, @@ -141,7 +141,7 @@ namespace PETScWrappers * the same argument list as the present function. */ void - reinit(const MPI_Comm & communicator, + reinit(const MPI_Comm communicator, const unsigned int m, const unsigned int n, const std::vector &local_rows_per_process, @@ -266,7 +266,7 @@ namespace PETScWrappers * previous matrix is left to the caller. */ void - do_reinit(const MPI_Comm & comm, + do_reinit(const MPI_Comm comm, const unsigned int m, const unsigned int n, const unsigned int local_rows, diff --git a/include/deal.II/lac/petsc_precondition.h b/include/deal.II/lac/petsc_precondition.h index a6b590b971..b7e68a47d3 100644 --- a/include/deal.II/lac/petsc_precondition.h +++ b/include/deal.II/lac/petsc_precondition.h @@ -63,7 +63,7 @@ namespace PETScWrappers /** * Constructor. */ - explicit PreconditionBase(const MPI_Comm &mpi_communicator); + explicit PreconditionBase(const MPI_Comm mpi_communicator); /** * Constructor. @@ -131,7 +131,7 @@ namespace PETScWrappers * Internal function to create the PETSc preconditioner object. */ void - create_pc_with_comm(const MPI_Comm &); + create_pc_with_comm(const MPI_Comm); }; @@ -175,7 +175,7 @@ namespace PETScWrappers * Intended to be used with SLEPc objects. */ PreconditionJacobi( - const MPI_Comm & communicator, + const MPI_Comm communicator, const AdditionalData &additional_data = AdditionalData()); /** @@ -257,7 +257,7 @@ namespace PETScWrappers * Intended to be used with SLEPc objects. */ PreconditionBlockJacobi( - const MPI_Comm & communicator, + const MPI_Comm communicator, const AdditionalData &additional_data = AdditionalData()); @@ -764,7 +764,7 @@ namespace PETScWrappers * Intended to be used with SLEPc objects. */ PreconditionBoomerAMG( - const MPI_Comm & communicator, + const MPI_Comm communicator, const AdditionalData &additional_data = AdditionalData()); @@ -1097,7 +1097,7 @@ namespace PETScWrappers /** * Same as above but without setting a matrix to form the preconditioner. */ - PreconditionShell(const MPI_Comm &communicator); + PreconditionShell(const MPI_Comm communicator); /** * The callback for the application of the preconditioner. @@ -1115,7 +1115,7 @@ namespace PETScWrappers * matrix. This function sets up the PCSHELL preconditioner */ void - initialize(const MPI_Comm &comm); + initialize(const MPI_Comm comm); /** * Initialize the preconditioner object with a particular diff --git a/include/deal.II/lac/petsc_snes.h b/include/deal.II/lac/petsc_snes.h index 19750041e5..a0e6c73841 100644 --- a/include/deal.II/lac/petsc_snes.h +++ b/include/deal.II/lac/petsc_snes.h @@ -247,7 +247,7 @@ namespace PETScWrappers * Constructor. */ NonlinearSolver(const NonlinearSolverData &data = NonlinearSolverData(), - const MPI_Comm & mpi_comm = PETSC_COMM_WORLD); + const MPI_Comm mpi_comm = PETSC_COMM_WORLD); /** * Destructor. diff --git a/include/deal.II/lac/petsc_snes.templates.h b/include/deal.II/lac/petsc_snes.templates.h index 9d272094b9..adac937aec 100644 --- a/include/deal.II/lac/petsc_snes.templates.h +++ b/include/deal.II/lac/petsc_snes.templates.h @@ -55,7 +55,7 @@ namespace PETScWrappers std::constructible_from)) NonlinearSolver::NonlinearSolver( const NonlinearSolverData &data, - const MPI_Comm & mpi_comm) + const MPI_Comm mpi_comm) { AssertPETSc(SNESCreate(mpi_comm, &snes)); AssertPETSc(SNESSetApplicationContext(snes, this)); diff --git a/include/deal.II/lac/petsc_solver.h b/include/deal.II/lac/petsc_solver.h index eaee6fca71..120f23eb90 100644 --- a/include/deal.II/lac/petsc_solver.h +++ b/include/deal.II/lac/petsc_solver.h @@ -179,7 +179,7 @@ namespace PETScWrappers * Utility to create the KSP object and attach convergence test. */ void - initialize_ksp_with_comm(const MPI_Comm &comm); + initialize_ksp_with_comm(const MPI_Comm comm); /** * %Function that takes a Krylov Subspace Solver context object, and sets @@ -272,7 +272,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverRichardson(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -324,7 +324,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverChebychev(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -374,7 +374,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverCG(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -425,7 +425,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverBiCG(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -493,7 +493,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverGMRES(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -545,7 +545,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverBicgstab(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -596,7 +596,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverCGS(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -647,7 +647,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverTFQMR(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -703,7 +703,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverTCQMR(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -753,7 +753,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverCR(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -805,7 +805,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverLSQR(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -861,7 +861,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SolverPreOnly(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -924,7 +924,7 @@ namespace PETScWrappers */ DEAL_II_DEPRECATED_EARLY SparseDirectMUMPS(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); /** diff --git a/include/deal.II/lac/petsc_sparse_matrix.h b/include/deal.II/lac/petsc_sparse_matrix.h index e1a96ffa7a..884b0bb18f 100644 --- a/include/deal.II/lac/petsc_sparse_matrix.h +++ b/include/deal.II/lac/petsc_sparse_matrix.h @@ -432,7 +432,7 @@ namespace PETScWrappers * efficient to get memory allocation right from the start. */ template - SparseMatrix(const MPI_Comm & communicator, + SparseMatrix(const MPI_Comm communicator, const SparsityPatternType & sparsity_pattern, const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, @@ -480,7 +480,7 @@ namespace PETScWrappers */ template void - reinit(const MPI_Comm & communicator, + reinit(const MPI_Comm communicator, const SparsityPatternType & sparsity_pattern, const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, @@ -497,7 +497,7 @@ namespace PETScWrappers void reinit(const IndexSet & local_partitioning, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Create a matrix where the size() of the IndexSets determine the @@ -510,7 +510,7 @@ namespace PETScWrappers reinit(const IndexSet & local_rows, const IndexSet & local_columns, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * Initialize this matrix to have the same structure as @p other. This @@ -536,7 +536,7 @@ namespace PETScWrappers const IndexSet & local_columns, const IndexSet & local_active_columns, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator); + const MPI_Comm communicator); /** * @addtogroup Exceptions @@ -626,7 +626,7 @@ namespace PETScWrappers */ template void - do_reinit(const MPI_Comm & comm, + do_reinit(const MPI_Comm comm, const SparsityPatternType & sparsity_pattern, const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, @@ -638,7 +638,7 @@ namespace PETScWrappers */ template void - do_reinit(const MPI_Comm & comm, + do_reinit(const MPI_Comm comm, const IndexSet & local_rows, const IndexSet & local_columns, const SparsityPatternType &sparsity_pattern); @@ -649,7 +649,7 @@ namespace PETScWrappers */ template void - do_reinit(const MPI_Comm & comm, + do_reinit(const MPI_Comm comm, const IndexSet & local_rows, const IndexSet & local_active_rows, const IndexSet & local_columns, diff --git a/include/deal.II/lac/petsc_ts.h b/include/deal.II/lac/petsc_ts.h index 12461e8ad1..9a45ffc5be 100644 --- a/include/deal.II/lac/petsc_ts.h +++ b/include/deal.II/lac/petsc_ts.h @@ -311,7 +311,7 @@ namespace PETScWrappers * Constructor. */ TimeStepper(const TimeStepperData &data = TimeStepperData(), - const MPI_Comm & mpi_comm = PETSC_COMM_WORLD); + const MPI_Comm mpi_comm = PETSC_COMM_WORLD); /** * Destructor. diff --git a/include/deal.II/lac/petsc_ts.templates.h b/include/deal.II/lac/petsc_ts.templates.h index d75b81eafa..0a3645e0a5 100644 --- a/include/deal.II/lac/petsc_ts.templates.h +++ b/include/deal.II/lac/petsc_ts.templates.h @@ -55,7 +55,7 @@ namespace PETScWrappers std::constructible_from)) TimeStepper::TimeStepper( const TimeStepperData &data, - const MPI_Comm & mpi_comm) + const MPI_Comm mpi_comm) { AssertPETSc(TSCreate(mpi_comm, &ts)); AssertPETSc(TSSetApplicationContext(ts, this)); diff --git a/include/deal.II/lac/petsc_vector.h b/include/deal.II/lac/petsc_vector.h index 00199b16e0..3347055df1 100644 --- a/include/deal.II/lac/petsc_vector.h +++ b/include/deal.II/lac/petsc_vector.h @@ -189,7 +189,7 @@ namespace PETScWrappers * v=Vector@(0);, i.e. the vector is replaced by one * of length zero. */ - explicit Vector(const MPI_Comm &communicator, + explicit Vector(const MPI_Comm communicator, const size_type n, const size_type locally_owned_size); @@ -204,7 +204,7 @@ namespace PETScWrappers * different parts of the vector shall communicate */ template - explicit Vector(const MPI_Comm & communicator, + explicit Vector(const MPI_Comm communicator, const dealii::Vector &v, const size_type locally_owned_size); @@ -233,7 +233,7 @@ namespace PETScWrappers */ Vector(const IndexSet &local, const IndexSet &ghost, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Construct a new parallel PETSc vector without ghost elements from an @@ -246,7 +246,7 @@ namespace PETScWrappers * not reordered by component (use a PETScWrappers::BlockVector * otherwise). */ - explicit Vector(const IndexSet &local, const MPI_Comm &communicator); + explicit Vector(const IndexSet &local, const MPI_Comm communicator); /** * Copy constructor. @@ -308,7 +308,7 @@ namespace PETScWrappers * Otherwise, the elements are left an unspecified state. */ void - reinit(const MPI_Comm &communicator, + reinit(const MPI_Comm communicator, const size_type N, const size_type locally_owned_size, const bool omit_zeroing_entries = false); @@ -335,7 +335,7 @@ namespace PETScWrappers void reinit(const IndexSet &local, const IndexSet &ghost, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Reinit as a vector without ghost elements. See constructor with same @@ -345,7 +345,7 @@ namespace PETScWrappers * @ref GlossGhostedVector "vectors with ghost elements" */ void - reinit(const IndexSet &local, const MPI_Comm &communicator); + reinit(const IndexSet &local, const MPI_Comm communicator); /** * Initialize the vector given to the parallel partitioning described in @@ -389,7 +389,7 @@ namespace PETScWrappers * locally. */ virtual void - create_vector(const MPI_Comm &comm, + create_vector(const MPI_Comm comm, const size_type n, const size_type locally_owned_size); @@ -401,7 +401,7 @@ namespace PETScWrappers * you need to call update_ghost_values() before accessing those. */ virtual void - create_vector(const MPI_Comm &comm, + create_vector(const MPI_Comm comm, const size_type n, const size_type locally_owned_size, const IndexSet &ghostnodes); @@ -428,7 +428,7 @@ namespace PETScWrappers # ifndef DOXYGEN template - Vector::Vector(const MPI_Comm & communicator, + Vector::Vector(const MPI_Comm communicator, const dealii::Vector &v, const size_type locally_owned_size) { diff --git a/include/deal.II/lac/read_write_vector.h b/include/deal.II/lac/read_write_vector.h index c3e9d54496..90e536388f 100644 --- a/include/deal.II/lac/read_write_vector.h +++ b/include/deal.II/lac/read_write_vector.h @@ -665,7 +665,7 @@ namespace LinearAlgebra & tpetra_vector, const IndexSet & locally_owned_elements, VectorOperation::values operation, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::shared_ptr &communication_pattern); # endif @@ -679,7 +679,7 @@ namespace LinearAlgebra import(const Epetra_MultiVector &multivector, const IndexSet & locally_owned_elements, VectorOperation::values operation, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::shared_ptr &communication_pattern); #endif @@ -704,7 +704,7 @@ namespace LinearAlgebra */ TpetraWrappers::CommunicationPattern create_tpetra_comm_pattern(const IndexSet &source_index_set, - const MPI_Comm &mpi_comm); + const MPI_Comm mpi_comm); # endif /** @@ -713,7 +713,7 @@ namespace LinearAlgebra */ EpetraWrappers::CommunicationPattern create_epetra_comm_pattern(const IndexSet &source_index_set, - const MPI_Comm &mpi_comm); + const MPI_Comm mpi_comm); #endif /** diff --git a/include/deal.II/lac/read_write_vector.templates.h b/include/deal.II/lac/read_write_vector.templates.h index ef1d092a1e..6019ad8f59 100644 --- a/include/deal.II/lac/read_write_vector.templates.h +++ b/include/deal.II/lac/read_write_vector.templates.h @@ -582,7 +582,7 @@ namespace LinearAlgebra const Tpetra::Vector &vector, const IndexSet & source_elements, VectorOperation::values operation, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::shared_ptr &communication_pattern) { @@ -699,7 +699,7 @@ namespace LinearAlgebra const Epetra_MultiVector &multivector, const IndexSet & source_elements, VectorOperation::values operation, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::shared_ptr &communication_pattern) { @@ -1046,7 +1046,7 @@ namespace LinearAlgebra TpetraWrappers::CommunicationPattern ReadWriteVector::create_tpetra_comm_pattern( const IndexSet &source_index_set, - const MPI_Comm &mpi_comm) + const MPI_Comm mpi_comm) { source_stored_elements = source_index_set; TpetraWrappers::CommunicationPattern epetra_comm_pattern( @@ -1064,7 +1064,7 @@ namespace LinearAlgebra EpetraWrappers::CommunicationPattern ReadWriteVector::create_epetra_comm_pattern( const IndexSet &source_index_set, - const MPI_Comm &mpi_comm) + const MPI_Comm mpi_comm) { source_stored_elements = source_index_set; EpetraWrappers::CommunicationPattern epetra_comm_pattern( diff --git a/include/deal.II/lac/slepc_solver.h b/include/deal.II/lac/slepc_solver.h index e8665d9f6b..92b074becf 100644 --- a/include/deal.II/lac/slepc_solver.h +++ b/include/deal.II/lac/slepc_solver.h @@ -150,7 +150,7 @@ namespace SLEPcWrappers * Constructor. Takes the MPI communicator over which parallel * computations are to happen. */ - SolverBase(SolverControl &cn, const MPI_Comm &mpi_communicator); + SolverBase(SolverControl &cn, const MPI_Comm mpi_communicator); /** * Destructor. @@ -405,7 +405,7 @@ namespace SLEPcWrappers * behavior as the PETScWrappers, but you can change that. */ SolverKrylovSchur(SolverControl & cn, - const MPI_Comm & mpi_communicator = PETSC_COMM_SELF, + const MPI_Comm mpi_communicator = PETSC_COMM_SELF, const AdditionalData &data = AdditionalData()); protected: @@ -456,7 +456,7 @@ namespace SLEPcWrappers * behavior as the PETScWrappers, but you can change that. */ SolverArnoldi(SolverControl & cn, - const MPI_Comm & mpi_communicator = PETSC_COMM_SELF, + const MPI_Comm mpi_communicator = PETSC_COMM_SELF, const AdditionalData &data = AdditionalData()); protected: @@ -508,7 +508,7 @@ namespace SLEPcWrappers * behavior as the PETScWrappers, but you can change that. */ SolverLanczos(SolverControl & cn, - const MPI_Comm & mpi_communicator = PETSC_COMM_SELF, + const MPI_Comm mpi_communicator = PETSC_COMM_SELF, const AdditionalData &data = AdditionalData()); protected: @@ -548,7 +548,7 @@ namespace SLEPcWrappers * behavior as the PETScWrappers, but you can change that. */ SolverPower(SolverControl & cn, - const MPI_Comm & mpi_communicator = PETSC_COMM_SELF, + const MPI_Comm mpi_communicator = PETSC_COMM_SELF, const AdditionalData &data = AdditionalData()); protected: @@ -597,10 +597,9 @@ namespace SLEPcWrappers * computations are parallelized. By default, this carries the same * behavior as the PETScWrappers, but you can change that. */ - SolverGeneralizedDavidson( - SolverControl & cn, - const MPI_Comm & mpi_communicator = PETSC_COMM_SELF, - const AdditionalData &data = AdditionalData()); + SolverGeneralizedDavidson(SolverControl &cn, + const MPI_Comm mpi_communicator = PETSC_COMM_SELF, + const AdditionalData &data = AdditionalData()); protected: /** @@ -638,9 +637,9 @@ namespace SLEPcWrappers * computations are parallelized. By default, this carries the same * behavior as the PETScWrappers, but you can change that. */ - SolverJacobiDavidson(SolverControl & cn, - const MPI_Comm &mpi_communicator = PETSC_COMM_SELF, - const AdditionalData &data = AdditionalData()); + SolverJacobiDavidson(SolverControl &cn, + const MPI_Comm mpi_communicator = PETSC_COMM_SELF, + const AdditionalData &data = AdditionalData()); protected: /** @@ -679,7 +678,7 @@ namespace SLEPcWrappers * behavior as the PETScWrappers, but you can change that. */ SolverLAPACK(SolverControl & cn, - const MPI_Comm & mpi_communicator = PETSC_COMM_SELF, + const MPI_Comm mpi_communicator = PETSC_COMM_SELF, const AdditionalData &data = AdditionalData()); protected: diff --git a/include/deal.II/lac/slepc_spectral_transformation.h b/include/deal.II/lac/slepc_spectral_transformation.h index 4031ff83c0..a7f685f5bb 100644 --- a/include/deal.II/lac/slepc_spectral_transformation.h +++ b/include/deal.II/lac/slepc_spectral_transformation.h @@ -79,7 +79,7 @@ namespace SLEPcWrappers /** * Constructor. */ - TransformationBase(const MPI_Comm &mpi_communicator); + TransformationBase(const MPI_Comm mpi_communicator); public: /** @@ -144,7 +144,7 @@ namespace SLEPcWrappers /** * Constructor. */ - TransformationShift(const MPI_Comm & mpi_communicator, + TransformationShift(const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); @@ -184,7 +184,7 @@ namespace SLEPcWrappers /** * Constructor. */ - TransformationShiftInvert(const MPI_Comm & mpi_communicator, + TransformationShiftInvert(const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -233,7 +233,7 @@ namespace SLEPcWrappers * Constructor. */ TransformationSpectrumFolding( - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: @@ -277,7 +277,7 @@ namespace SLEPcWrappers /** * Constructor. */ - TransformationCayley(const MPI_Comm & mpi_communicator, + TransformationCayley(const MPI_Comm mpi_communicator, const AdditionalData &data = AdditionalData()); protected: diff --git a/include/deal.II/lac/sparse_matrix.h b/include/deal.II/lac/sparse_matrix.h index 53dcb16cb7..e5038ec7e5 100644 --- a/include/deal.II/lac/sparse_matrix.h +++ b/include/deal.II/lac/sparse_matrix.h @@ -50,7 +50,7 @@ namespace Utilities { template void - sum(const SparseMatrix &, const MPI_Comm &, SparseMatrix &); + sum(const SparseMatrix &, const MPI_Comm, SparseMatrix &); } } // namespace Utilities # endif @@ -1784,7 +1784,7 @@ private: template friend void Utilities::MPI::sum(const SparseMatrix &, - const MPI_Comm &, + const MPI_Comm, SparseMatrix &); #endif }; diff --git a/include/deal.II/lac/sparse_matrix_tools.h b/include/deal.II/lac/sparse_matrix_tools.h index 758b7b37aa..b21a4d1f95 100644 --- a/include/deal.II/lac/sparse_matrix_tools.h +++ b/include/deal.II/lac/sparse_matrix_tools.h @@ -145,7 +145,7 @@ namespace SparseMatrixTools { template std::tuple - compute_prefix_sum(const T &value, const MPI_Comm &comm) + compute_prefix_sum(const T &value, const MPI_Comm comm) { # ifndef DEAL_II_WITH_MPI (void)comm; @@ -231,7 +231,7 @@ namespace SparseMatrixTools extract_remote_rows(const SparseMatrixType & system_matrix, const SparsityPatternType &sparsity_pattern, const IndexSet & locally_active_dofs, - const MPI_Comm & comm) + const MPI_Comm comm) { std::vector dummy(locally_active_dofs.n_elements()); diff --git a/include/deal.II/lac/sparsity_tools.h b/include/deal.II/lac/sparsity_tools.h index 454654e659..0d8ff572b1 100644 --- a/include/deal.II/lac/sparsity_tools.h +++ b/include/deal.II/lac/sparsity_tools.h @@ -267,7 +267,7 @@ namespace SparsityTools void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet & locally_owned_rows, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet & locally_relevant_rows); /** @@ -284,7 +284,7 @@ namespace SparsityTools distribute_sparsity_pattern( DynamicSparsityPattern & dsp, const std::vector &rows_per_cpu, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet & myrange); /** @@ -304,7 +304,7 @@ namespace SparsityTools void distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, const IndexSet & locally_owned_rows, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows); /** @@ -314,7 +314,7 @@ namespace SparsityTools void distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, const std::vector &owned_set_per_cpu, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet & myrange); /** @@ -343,7 +343,7 @@ namespace SparsityTools void gather_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet & locally_owned_rows, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet & locally_relevant_rows); #endif diff --git a/include/deal.II/lac/trilinos_block_sparse_matrix.h b/include/deal.II/lac/trilinos_block_sparse_matrix.h index 26dc8eb32e..9a39fdfc0d 100644 --- a/include/deal.II/lac/trilinos_block_sparse_matrix.h +++ b/include/deal.II/lac/trilinos_block_sparse_matrix.h @@ -156,7 +156,7 @@ namespace TrilinosWrappers void reinit(const std::vector & input_maps, const BlockSparsityPatternType &block_sparsity_pattern, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool exchange_data = false); /** @@ -178,7 +178,7 @@ namespace TrilinosWrappers reinit( const std::vector & parallel_partitioning, const ::dealii::BlockSparseMatrix &dealii_block_sparse_matrix, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const double drop_tolerance = 1e-13); /** diff --git a/include/deal.II/lac/trilinos_epetra_communication_pattern.h b/include/deal.II/lac/trilinos_epetra_communication_pattern.h index b6e8a47068..d993789b89 100644 --- a/include/deal.II/lac/trilinos_epetra_communication_pattern.h +++ b/include/deal.II/lac/trilinos_epetra_communication_pattern.h @@ -50,7 +50,7 @@ namespace LinearAlgebra */ CommunicationPattern(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Reinitialize the object. @@ -58,7 +58,7 @@ namespace LinearAlgebra virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator) override; + const MPI_Comm communicator) override; /** * Return the underlying MPI communicator. diff --git a/include/deal.II/lac/trilinos_epetra_vector.h b/include/deal.II/lac/trilinos_epetra_vector.h index 8be058b6a6..d142c15e5e 100644 --- a/include/deal.II/lac/trilinos_epetra_vector.h +++ b/include/deal.II/lac/trilinos_epetra_vector.h @@ -82,7 +82,7 @@ namespace LinearAlgebra * need to generate a %parallel vector. */ explicit Vector(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Reinit functionality. This function destroys the old vector content @@ -92,7 +92,7 @@ namespace LinearAlgebra */ void reinit(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator, + const MPI_Comm communicator, const bool omit_zeroing_entries = false); /** @@ -370,7 +370,7 @@ namespace LinearAlgebra */ void create_epetra_comm_pattern(const IndexSet &source_index_set, - const MPI_Comm &mpi_comm); + const MPI_Comm mpi_comm); /** * Pointer to the actual Epetra vector object. diff --git a/include/deal.II/lac/trilinos_parallel_block_vector.h b/include/deal.II/lac/trilinos_parallel_block_vector.h index fe42d5a113..160792b7e6 100644 --- a/include/deal.II/lac/trilinos_parallel_block_vector.h +++ b/include/deal.II/lac/trilinos_parallel_block_vector.h @@ -109,7 +109,7 @@ namespace TrilinosWrappers * the MPI processes. */ explicit BlockVector(const std::vector ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Creates a BlockVector with ghost elements. See the respective @@ -118,7 +118,7 @@ namespace TrilinosWrappers */ BlockVector(const std::vector ¶llel_partitioning, const std::vector &ghost_values, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool vector_writable = false); /** @@ -189,7 +189,7 @@ namespace TrilinosWrappers */ void reinit(const std::vector ¶llel_partitioning, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool omit_zeroing_entries = false); /** @@ -212,7 +212,7 @@ namespace TrilinosWrappers void reinit(const std::vector &partitioning, const std::vector &ghost_values, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool vector_writable = false); @@ -317,7 +317,7 @@ namespace TrilinosWrappers /*-------------------------- Inline functions ---------------------------*/ inline BlockVector::BlockVector( const std::vector ¶llel_partitioning, - const MPI_Comm & communicator) + const MPI_Comm communicator) { reinit(parallel_partitioning, communicator, false); } @@ -327,7 +327,7 @@ namespace TrilinosWrappers inline BlockVector::BlockVector( const std::vector ¶llel_partitioning, const std::vector &ghost_values, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool vector_writable) { reinit(parallel_partitioning, diff --git a/include/deal.II/lac/trilinos_sparse_matrix.h b/include/deal.II/lac/trilinos_sparse_matrix.h index b28d0b9d25..004991c8e1 100644 --- a/include/deal.II/lac/trilinos_sparse_matrix.h +++ b/include/deal.II/lac/trilinos_sparse_matrix.h @@ -748,7 +748,7 @@ namespace TrilinosWrappers * use (in the compress() step). */ SparseMatrix(const IndexSet & parallel_partitioning, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const unsigned int n_max_entries_per_row = 0); /** @@ -759,7 +759,7 @@ namespace TrilinosWrappers * by the respective SparseMatrix::reinit call considerably faster. */ SparseMatrix(const IndexSet & parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row); /** @@ -778,7 +778,7 @@ namespace TrilinosWrappers */ SparseMatrix(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const size_type n_max_entries_per_row = 0); /** @@ -797,7 +797,7 @@ namespace TrilinosWrappers */ SparseMatrix(const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row); /** @@ -824,7 +824,7 @@ namespace TrilinosWrappers void reinit(const IndexSet & parallel_partitioning, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool exchange_data = false); /** @@ -845,7 +845,7 @@ namespace TrilinosWrappers reinit(const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool exchange_data = false); /** @@ -868,7 +868,7 @@ namespace TrilinosWrappers void reinit(const IndexSet & parallel_partitioning, const ::dealii::SparseMatrix &dealii_sparse_matrix, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const double drop_tolerance = 1e-13, const bool copy_values = true, const ::dealii::SparsityPattern * use_this_sparsity = nullptr); @@ -891,7 +891,7 @@ namespace TrilinosWrappers reinit(const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, const ::dealii::SparseMatrix &dealii_sparse_matrix, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const double drop_tolerance = 1e-13, const bool copy_values = true, const ::dealii::SparsityPattern * use_this_sparsity = nullptr); @@ -2419,7 +2419,7 @@ namespace TrilinosWrappers TrilinosPayload(EpetraOpType & op, const bool supports_inverse_operations, const bool use_transpose, - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const IndexSet &locally_owned_domain_indices, const IndexSet &locally_owned_range_indices); @@ -3026,7 +3026,7 @@ namespace TrilinosWrappers inline void SparseMatrix::reinit(const IndexSet & parallel_partitioning, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool exchange_data) { reinit(parallel_partitioning, @@ -3042,7 +3042,7 @@ namespace TrilinosWrappers inline void SparseMatrix::reinit(const IndexSet ¶llel_partitioning, const ::dealii::SparseMatrix &sparse_matrix, - const MPI_Comm & communicator, + const MPI_Comm communicator, const double drop_tolerance, const bool copy_values, const ::dealii::SparsityPattern * use_this_sparsity) @@ -3115,7 +3115,7 @@ namespace TrilinosWrappers EpetraOpType & op, const bool supports_inverse_operations, const bool use_transpose, - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const IndexSet &locally_owned_domain_indices, const IndexSet &locally_owned_range_indices) : use_transpose(use_transpose) diff --git a/include/deal.II/lac/trilinos_sparsity_pattern.h b/include/deal.II/lac/trilinos_sparsity_pattern.h index 86af299349..1da9a2fff2 100644 --- a/include/deal.II/lac/trilinos_sparsity_pattern.h +++ b/include/deal.II/lac/trilinos_sparsity_pattern.h @@ -434,7 +434,7 @@ namespace TrilinosWrappers * the performance when creating the sparsity pattern. */ SparsityPattern(const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const size_type n_entries_per_row = 0); /** @@ -448,7 +448,7 @@ namespace TrilinosWrappers * designed to describe. */ SparsityPattern(const IndexSet & parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row); /** @@ -467,7 +467,7 @@ namespace TrilinosWrappers */ SparsityPattern(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const size_type n_entries_per_row = 0); /** @@ -483,7 +483,7 @@ namespace TrilinosWrappers */ SparsityPattern(const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row); /** @@ -515,7 +515,7 @@ namespace TrilinosWrappers SparsityPattern(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const IndexSet &writable_rows, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const size_type n_entries_per_row = 0); /** @@ -535,7 +535,7 @@ namespace TrilinosWrappers */ void reinit(const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const size_type n_entries_per_row = 0); /** @@ -550,7 +550,7 @@ namespace TrilinosWrappers */ void reinit(const IndexSet & parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row); /** @@ -572,7 +572,7 @@ namespace TrilinosWrappers void reinit(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const size_type n_entries_per_row = 0); /** @@ -604,7 +604,7 @@ namespace TrilinosWrappers reinit(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const IndexSet &writeable_rows, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const size_type n_entries_per_row = 0); /** @@ -614,7 +614,7 @@ namespace TrilinosWrappers void reinit(const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row); /** @@ -631,7 +631,7 @@ namespace TrilinosWrappers reinit(const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, const SparsityPatternType &nontrilinos_sparsity_pattern, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool exchange_data = false); /** @@ -646,7 +646,7 @@ namespace TrilinosWrappers void reinit(const IndexSet & parallel_partitioning, const SparsityPatternType &nontrilinos_sparsity_pattern, - const MPI_Comm & communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool exchange_data = false); /** @} */ /** diff --git a/include/deal.II/lac/trilinos_tpetra_communication_pattern.h b/include/deal.II/lac/trilinos_tpetra_communication_pattern.h index 8dd027b754..e2a50fa2cc 100644 --- a/include/deal.II/lac/trilinos_tpetra_communication_pattern.h +++ b/include/deal.II/lac/trilinos_tpetra_communication_pattern.h @@ -49,7 +49,7 @@ namespace LinearAlgebra */ CommunicationPattern(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Reinitialize the object. @@ -57,7 +57,7 @@ namespace LinearAlgebra virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator) override; + const MPI_Comm communicator) override; /** * Return the underlying MPI communicator. diff --git a/include/deal.II/lac/trilinos_tpetra_vector.h b/include/deal.II/lac/trilinos_tpetra_vector.h index bb2094485d..977935216f 100644 --- a/include/deal.II/lac/trilinos_tpetra_vector.h +++ b/include/deal.II/lac/trilinos_tpetra_vector.h @@ -138,7 +138,7 @@ namespace LinearAlgebra * need to generate a %parallel vector. */ explicit Vector(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator); + const MPI_Comm communicator); /** * Reinit functionality. This function destroys the old vector content @@ -148,7 +148,7 @@ namespace LinearAlgebra */ void reinit(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator, + const MPI_Comm communicator, const bool omit_zeroing_entries = false); /** @@ -426,7 +426,7 @@ namespace LinearAlgebra */ void create_tpetra_comm_pattern(const IndexSet &source_index_set, - const MPI_Comm &mpi_comm); + const MPI_Comm mpi_comm); /** * Pointer to the actual Tpetra vector object. diff --git a/include/deal.II/lac/trilinos_tpetra_vector.templates.h b/include/deal.II/lac/trilinos_tpetra_vector.templates.h index c937f9292a..6658937559 100644 --- a/include/deal.II/lac/trilinos_tpetra_vector.templates.h +++ b/include/deal.II/lac/trilinos_tpetra_vector.templates.h @@ -69,7 +69,7 @@ namespace LinearAlgebra template Vector::Vector(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator) + const MPI_Comm communicator) : Subscriptor() , vector(new Tpetra::Vector( Teuchos::rcp(new Tpetra::Map( @@ -81,7 +81,7 @@ namespace LinearAlgebra template void Vector::reinit(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator, + const MPI_Comm communicator, const bool omit_zeroing_entries) { Tpetra::Map input_map = @@ -677,7 +677,7 @@ namespace LinearAlgebra template void Vector::create_tpetra_comm_pattern(const IndexSet &source_index_set, - const MPI_Comm &mpi_comm) + const MPI_Comm mpi_comm) { source_stored_elements = source_index_set; tpetra_comm_pattern = diff --git a/include/deal.II/lac/trilinos_vector.h b/include/deal.II/lac/trilinos_vector.h index c03d0a17b6..4024d14f33 100644 --- a/include/deal.II/lac/trilinos_vector.h +++ b/include/deal.II/lac/trilinos_vector.h @@ -445,7 +445,7 @@ namespace TrilinosWrappers * @ref GlossGhostedVector "vectors with ghost elements" */ explicit Vector(const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Creates a ghosted parallel vector. @@ -460,7 +460,7 @@ namespace TrilinosWrappers */ Vector(const IndexSet &local, const IndexSet &ghost, - const MPI_Comm &communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Copy constructor from the TrilinosWrappers vector class. Since a @@ -478,7 +478,7 @@ namespace TrilinosWrappers */ Vector(const IndexSet ¶llel_partitioning, const Vector & v, - const MPI_Comm &communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Copy-constructor from deal.II vectors. Sets the dimension to that of @@ -495,7 +495,7 @@ namespace TrilinosWrappers template Vector(const IndexSet & parallel_partitioning, const dealii::Vector &v, - const MPI_Comm & communicator = MPI_COMM_WORLD); + const MPI_Comm communicator = MPI_COMM_WORLD); /** * Move constructor. Creates a new vector by stealing the internal data @@ -571,7 +571,7 @@ namespace TrilinosWrappers */ void reinit(const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool omit_zeroing_entries = false); /** @@ -611,7 +611,7 @@ namespace TrilinosWrappers void reinit(const IndexSet &locally_owned_entries, const IndexSet &locally_relevant_or_ghost_entries, - const MPI_Comm &communicator = MPI_COMM_WORLD, + const MPI_Comm communicator = MPI_COMM_WORLD, const bool vector_writable = false); /** @@ -2196,7 +2196,7 @@ namespace TrilinosWrappers template Vector::Vector(const IndexSet & parallel_partitioner, const dealii::Vector &v, - const MPI_Comm & communicator) + const MPI_Comm communicator) { *this = Vector(parallel_partitioner.make_trilinos_map(communicator, true), v); diff --git a/include/deal.II/matrix_free/dof_info.h b/include/deal.II/matrix_free/dof_info.h index 938dc99aff..046cb8f933 100644 --- a/include/deal.II/matrix_free/dof_info.h +++ b/include/deal.II/matrix_free/dof_info.h @@ -229,7 +229,7 @@ namespace internal */ void assign_ghosts(const std::vector &boundary_cells, - const MPI_Comm & communicator_sm, + const MPI_Comm communicator_sm, const bool use_vector_data_exchanger_full); /** @@ -284,7 +284,7 @@ namespace internal const std::vector> &inner_faces, const std::vector> &ghosted_faces, const bool fill_cell_centric, - const MPI_Comm & communicator_sm, + const MPI_Comm communicator_sm, const bool use_vector_data_exchanger_full); /** diff --git a/include/deal.II/matrix_free/vector_data_exchange.h b/include/deal.II/matrix_free/vector_data_exchange.h index 9c2f9cc040..a574aef326 100644 --- a/include/deal.II/matrix_free/vector_data_exchange.h +++ b/include/deal.II/matrix_free/vector_data_exchange.h @@ -264,7 +264,7 @@ namespace internal public: Full( const std::shared_ptr &partitioner, - const MPI_Comm &communicator_sm); + const MPI_Comm communicator_sm); unsigned int locally_owned_size() const override; diff --git a/include/deal.II/sundials/arkode.h b/include/deal.II/sundials/arkode.h index 0926775319..f69622f06e 100644 --- a/include/deal.II/sundials/arkode.h +++ b/include/deal.II/sundials/arkode.h @@ -486,7 +486,7 @@ namespace SUNDIALS * @param mpi_comm MPI Communicator over which logging operations are * computed. Only used in SUNDIALS 6 and newer. */ - ARKode(const AdditionalData &data, const MPI_Comm &mpi_comm); + ARKode(const AdditionalData &data, const MPI_Comm mpi_comm); /** * Destructor. diff --git a/include/deal.II/sundials/ida.h b/include/deal.II/sundials/ida.h index 81f6d0d87a..99757a21bc 100644 --- a/include/deal.II/sundials/ida.h +++ b/include/deal.II/sundials/ida.h @@ -605,7 +605,7 @@ namespace SUNDIALS * @param mpi_comm MPI Communicator over which logging operations are * computed. Only used in SUNDIALS 6 and newer. */ - IDA(const AdditionalData &data, const MPI_Comm &mpi_comm); + IDA(const AdditionalData &data, const MPI_Comm mpi_comm); /** * Destructor. diff --git a/include/deal.II/sundials/kinsol.h b/include/deal.II/sundials/kinsol.h index ccdd39fbb4..92a0da43b5 100644 --- a/include/deal.II/sundials/kinsol.h +++ b/include/deal.II/sundials/kinsol.h @@ -393,7 +393,7 @@ namespace SUNDIALS * @param mpi_comm MPI Communicator over which logging operations are * computed. Only used in SUNDIALS 6 and newer. */ - KINSOL(const AdditionalData &data, const MPI_Comm &mpi_comm); + KINSOL(const AdditionalData &data, const MPI_Comm mpi_comm); /** * Destructor. diff --git a/source/base/data_out_base.cc b/source/base/data_out_base.cc index 294987f1d9..51319b8c72 100644 --- a/source/base/data_out_base.cc +++ b/source/base/data_out_base.cc @@ -7410,7 +7410,7 @@ namespace DataOutBase & nonscalar_data_ranges, const Deal_II_IntermediateFlags &flags, const std::string & filename, - const MPI_Comm & comm, + const MPI_Comm comm, const CompressionLevel compression) { #ifndef DEAL_II_WITH_MPI @@ -7716,7 +7716,7 @@ template void DataOutInterface::write_vtu_in_parallel( const std::string &filename, - const MPI_Comm & comm) const + const MPI_Comm comm) const { #ifndef DEAL_II_WITH_MPI // without MPI fall back to the normal way to write a vtu file: @@ -7857,7 +7857,7 @@ DataOutInterface::write_vtu_with_pvtu_record( const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const unsigned int n_digits_for_counter, const unsigned int n_groups) const { @@ -7950,7 +7950,7 @@ template void DataOutInterface::write_deal_II_intermediate_in_parallel( const std::string & filename, - const MPI_Comm & comm, + const MPI_Comm comm, const DataOutBase::CompressionLevel compression) const { DataOutBase::write_deal_II_intermediate_in_parallel( @@ -7971,7 +7971,7 @@ DataOutInterface::create_xdmf_entry( const DataOutBase::DataOutFilter &data_filter, const std::string & h5_filename, const double cur_time, - const MPI_Comm & comm) const + const MPI_Comm comm) const { return create_xdmf_entry( data_filter, h5_filename, h5_filename, cur_time, comm); @@ -7986,7 +7986,7 @@ DataOutInterface::create_xdmf_entry( const std::string & h5_mesh_filename, const std::string & h5_solution_filename, const double cur_time, - const MPI_Comm & comm) const + const MPI_Comm comm) const { AssertThrow(spacedim == 2 || spacedim == 3, ExcMessage("XDMF only supports 2 or 3 space dimensions.")); @@ -8123,7 +8123,7 @@ void DataOutInterface::write_xdmf_file( const std::vector &entries, const std::string & filename, - const MPI_Comm & comm) const + const MPI_Comm comm) const { #ifdef DEAL_II_WITH_MPI const int myrank = Utilities::MPI::this_mpi_process(comm); @@ -8189,7 +8189,7 @@ namespace const bool write_mesh_file, const std::string & mesh_filename, const std::string & solution_filename, - const MPI_Comm & comm) + const MPI_Comm comm) { hid_t h5_mesh_file_id = -1, h5_solution_file_id, file_plist_id, plist_id; hid_t node_dataspace, node_dataset, node_file_dataspace, @@ -8658,7 +8658,7 @@ void DataOutInterface::write_hdf5_parallel( const DataOutBase::DataOutFilter &data_filter, const std::string & filename, - const MPI_Comm & comm) const + const MPI_Comm comm) const { DataOutBase::write_hdf5_parallel( get_patches(), data_filter, hdf5_flags, filename, comm); @@ -8673,7 +8673,7 @@ DataOutInterface::write_hdf5_parallel( const bool write_mesh_file, const std::string & mesh_filename, const std::string & solution_filename, - const MPI_Comm & comm) const + const MPI_Comm comm) const { DataOutBase::write_hdf5_parallel(get_patches(), data_filter, @@ -8693,7 +8693,7 @@ DataOutBase::write_hdf5_parallel( const DataOutBase::DataOutFilter & data_filter, const DataOutBase::Hdf5Flags & flags, const std::string & filename, - const MPI_Comm & comm) + const MPI_Comm comm) { write_hdf5_parallel( patches, data_filter, flags, true, filename, filename, comm); @@ -8710,7 +8710,7 @@ DataOutBase::write_hdf5_parallel( const bool write_mesh_file, const std::string & mesh_filename, const std::string & solution_filename, - const MPI_Comm & comm) + const MPI_Comm comm) { AssertThrow( spacedim >= 2, diff --git a/source/base/data_out_base.inst.in b/source/base/data_out_base.inst.in index a6ffff5d73..112a29037a 100644 --- a/source/base/data_out_base.inst.in +++ b/source/base/data_out_base.inst.in @@ -202,7 +202,7 @@ for (deal_II_dimension : OUTPUT_DIMENSIONS; & nonscalar_data_ranges, const Deal_II_IntermediateFlags &flags, const std::string & filename, - const MPI_Comm & comm, + const MPI_Comm comm, const CompressionLevel compression); template void @@ -212,7 +212,7 @@ for (deal_II_dimension : OUTPUT_DIMENSIONS; const DataOutFilter & data_filter, const DataOutBase::Hdf5Flags &flags, const std::string & filename, - const MPI_Comm & comm); + const MPI_Comm comm); template void write_filtered_data( diff --git a/source/base/hdf5.cc b/source/base/hdf5.cc index 6aab668ac2..8f951d8f4e 100644 --- a/source/base/hdf5.cc +++ b/source/base/hdf5.cc @@ -409,7 +409,7 @@ namespace HDF5 File::File(const std::string & name, const FileAccessMode mode, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) : File(name, mode, true, mpi_communicator) {} @@ -418,7 +418,7 @@ namespace HDF5 File::File(const std::string & name, const FileAccessMode mode, const bool mpi, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) : Group(name, mpi) { hdf5_reference = std::shared_ptr(new hid_t, [](hid_t *pointer) { diff --git a/source/base/index_set.cc b/source/base/index_set.cc index 630d8ba445..b950d5fca5 100644 --- a/source/base/index_set.cc +++ b/source/base/index_set.cc @@ -717,8 +717,8 @@ IndexSet::fill_index_vector(std::vector &indices) const # ifdef DEAL_II_TRILINOS_WITH_TPETRA Tpetra::Map -IndexSet::make_tpetra_map(const MPI_Comm &communicator, - const bool overlapping) const +IndexSet::make_tpetra_map(const MPI_Comm communicator, + const bool overlapping) const { compress(); (void)communicator; @@ -786,8 +786,8 @@ IndexSet::make_tpetra_map(const MPI_Comm &communicator, Epetra_Map -IndexSet::make_trilinos_map(const MPI_Comm &communicator, - const bool overlapping) const +IndexSet::make_trilinos_map(const MPI_Comm communicator, + const bool overlapping) const { compress(); (void)communicator; @@ -855,7 +855,7 @@ IndexSet::make_trilinos_map(const MPI_Comm &communicator, #ifdef DEAL_II_WITH_PETSC IS -IndexSet::make_petsc_is(const MPI_Comm &communicator) const +IndexSet::make_petsc_is(const MPI_Comm communicator) const { std::vector indices; fill_index_vector(indices); @@ -875,7 +875,7 @@ IndexSet::make_petsc_is(const MPI_Comm &communicator) const bool -IndexSet::is_ascending_and_one_to_one(const MPI_Comm &communicator) const +IndexSet::is_ascending_and_one_to_one(const MPI_Comm communicator) const { // If the sum of local elements does not add up to the total size, // the IndexSet can't be complete. diff --git a/source/base/mpi.cc b/source/base/mpi.cc index a837eecde4..04e072ade3 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -121,7 +121,7 @@ namespace Utilities MinMaxAvg - min_max_avg(const double my_value, const MPI_Comm &mpi_communicator) + min_max_avg(const double my_value, const MPI_Comm mpi_communicator) { MinMaxAvg result; min_max_avg(ArrayView(my_value), @@ -135,7 +135,7 @@ namespace Utilities std::vector min_max_avg(const std::vector &my_values, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { std::vector results(my_values.size()); min_max_avg(my_values, results, mpi_communicator); @@ -147,7 +147,7 @@ namespace Utilities #ifdef DEAL_II_WITH_MPI unsigned int - n_mpi_processes(const MPI_Comm &mpi_communicator) + n_mpi_processes(const MPI_Comm mpi_communicator) { int n_jobs = 1; const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs); @@ -158,7 +158,7 @@ namespace Utilities unsigned int - this_mpi_process(const MPI_Comm &mpi_communicator) + this_mpi_process(const MPI_Comm mpi_communicator) { int rank = 0; const int ierr = MPI_Comm_rank(mpi_communicator, &rank); @@ -170,8 +170,8 @@ namespace Utilities const std::vector - mpi_processes_within_communicator(const MPI_Comm &comm_large, - const MPI_Comm &comm_small) + mpi_processes_within_communicator(const MPI_Comm comm_large, + const MPI_Comm comm_small) { if (Utilities::MPI::job_supports_mpi() == false) return std::vector{0}; @@ -190,7 +190,7 @@ namespace Utilities MPI_Comm - duplicate_communicator(const MPI_Comm &mpi_communicator) + duplicate_communicator(const MPI_Comm mpi_communicator) { MPI_Comm new_communicator; const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator); @@ -201,7 +201,7 @@ namespace Utilities void - free_communicator(MPI_Comm &mpi_communicator) + free_communicator(MPI_Comm mpi_communicator) { // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically. const int ierr = MPI_Comm_free(&mpi_communicator); @@ -211,7 +211,7 @@ namespace Utilities int - create_group(const MPI_Comm & comm, + create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm * new_comm) @@ -225,7 +225,7 @@ namespace Utilities std::vector create_ascending_partitioning( - const MPI_Comm & comm, + const MPI_Comm comm, const types::global_dof_index locally_owned_size) { static_assert( @@ -254,7 +254,7 @@ namespace Utilities IndexSet create_evenly_distributed_partitioning( - const MPI_Comm & comm, + const MPI_Comm comm, const types::global_dof_index total_size) { const unsigned int this_proc = this_mpi_process(comm); @@ -312,7 +312,7 @@ namespace Utilities std::vector compute_point_to_point_communication_pattern( - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::vector &destinations) { const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm); @@ -427,7 +427,7 @@ namespace Utilities unsigned int compute_n_point_to_point_communications( - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::vector &destinations) { // Have a little function that checks if destinations provided @@ -537,7 +537,7 @@ namespace Utilities void min_max_avg(const ArrayView &my_values, const ArrayView & result, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { // If MPI was not started, we have a serial computation and cannot run // the other MPI commands @@ -655,7 +655,7 @@ namespace Utilities #else unsigned int - n_mpi_processes(const MPI_Comm &) + n_mpi_processes(const MPI_Comm) { return 1; } @@ -663,7 +663,7 @@ namespace Utilities unsigned int - this_mpi_process(const MPI_Comm &) + this_mpi_process(const MPI_Comm) { return 0; } @@ -671,7 +671,7 @@ namespace Utilities const std::vector - mpi_processes_within_communicator(const MPI_Comm &, const MPI_Comm &) + mpi_processes_within_communicator(const MPI_Comm, const MPI_Comm) { return std::vector{0}; } @@ -680,7 +680,7 @@ namespace Utilities std::vector create_ascending_partitioning( - const MPI_Comm & /*comm*/, + const MPI_Comm /*comm*/, const types::global_dof_index locally_owned_size) { return std::vector(1, complete_index_set(locally_owned_size)); @@ -688,7 +688,7 @@ namespace Utilities IndexSet create_evenly_distributed_partitioning( - const MPI_Comm & /*comm*/, + const MPI_Comm /*comm*/, const types::global_dof_index total_size) { return complete_index_set(total_size); @@ -697,15 +697,14 @@ namespace Utilities MPI_Comm - duplicate_communicator(const MPI_Comm &mpi_communicator) + duplicate_communicator(const MPI_Comm mpi_communicator) { return mpi_communicator; } - void - free_communicator(MPI_Comm & /*mpi_communicator*/) + void free_communicator(MPI_Comm /*mpi_communicator*/) {} @@ -713,7 +712,7 @@ namespace Utilities void min_max_avg(const ArrayView &my_values, const ArrayView & result, - const MPI_Comm &) + const MPI_Comm) { AssertDimension(my_values.size(), result.size()); @@ -1064,7 +1063,7 @@ namespace Utilities std::vector compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, - const MPI_Comm &comm) + const MPI_Comm comm) { Assert(owned_indices.size() == indices_to_look_up.size(), ExcMessage("IndexSets have to have the same sizes.")); @@ -1165,7 +1164,7 @@ namespace Utilities void - CollectiveMutex::lock(const MPI_Comm &comm) + CollectiveMutex::lock(const MPI_Comm comm) { (void)comm; @@ -1199,7 +1198,7 @@ namespace Utilities void - CollectiveMutex::unlock(const MPI_Comm &comm) + CollectiveMutex::unlock(const MPI_Comm comm) { (void)comm; @@ -1236,26 +1235,26 @@ namespace Utilities // booleans aren't in MPI_SCALARS template bool reduce(const bool &, - const MPI_Comm &, + const MPI_Comm, const std::function &, const unsigned int); template std::vector reduce(const std::vector &, - const MPI_Comm &, + const MPI_Comm, const std::function(const std::vector &, const std::vector &)> &, const unsigned int); template bool all_reduce(const bool &, - const MPI_Comm &, + const MPI_Comm, const std::function &); template std::vector all_reduce( const std::vector &, - const MPI_Comm &, + const MPI_Comm, const std::function(const std::vector &, const std::vector &)> &); @@ -1264,27 +1263,27 @@ namespace Utilities template void internal::all_reduce(const MPI_Op &, const ArrayView &, - const MPI_Comm &, + const MPI_Comm, const ArrayView &); template bool - logical_or(const bool &, const MPI_Comm &); + logical_or(const bool &, const MPI_Comm); template void logical_or(const ArrayView &, - const MPI_Comm &, + const MPI_Comm, const ArrayView &); template std::vector compute_set_union(const std::vector &vec, - const MPI_Comm & comm); + const MPI_Comm comm); template std::set - compute_set_union(const std::set &set, const MPI_Comm &comm); + compute_set_union(const std::set &set, const MPI_Comm comm); #endif #include "mpi.inst" diff --git a/source/base/mpi.inst.in b/source/base/mpi.inst.in index 0ba1696696..5c4bc1409a 100644 --- a/source/base/mpi.inst.in +++ b/source/base/mpi.inst.in @@ -19,74 +19,74 @@ for (S : REAL_SCALARS) { template void sum(const SparseMatrix &, - const MPI_Comm &, + const MPI_Comm, SparseMatrix &); } for (S : MPI_SCALARS) { template void sum>(const LAPACKFullMatrix &, - const MPI_Comm &, + const MPI_Comm, LAPACKFullMatrix &); template void sum>(const Vector &, - const MPI_Comm &, + const MPI_Comm, Vector &); template void sum>(const FullMatrix &, - const MPI_Comm &, + const MPI_Comm, FullMatrix &); template void sum(const ArrayView &, - const MPI_Comm &, + const MPI_Comm, const ArrayView &); - template S sum(const S &, const MPI_Comm &); + template S sum(const S &, const MPI_Comm); template void sum>(const std::vector &, - const MPI_Comm &, + const MPI_Comm, std::vector &); - template S max(const S &, const MPI_Comm &); + template S max(const S &, const MPI_Comm); template void max>(const std::vector &, - const MPI_Comm &, + const MPI_Comm, std::vector &); template void max(const ArrayView &, - const MPI_Comm &, + const MPI_Comm, const ArrayView &); - template S min(const S &, const MPI_Comm &); + template S min(const S &, const MPI_Comm); template void min>(const std::vector &, - const MPI_Comm &, + const MPI_Comm, std::vector &); template void min(const ArrayView &, - const MPI_Comm &, + const MPI_Comm, const ArrayView &); template S reduce(const S & vec, - const MPI_Comm & comm, + const MPI_Comm comm, const std::function &process, const unsigned int root_process); template std::vector reduce( const std::vector & vec, - const MPI_Comm & comm, + const MPI_Comm comm, const std::function(const std::vector &, const std::vector &)> &process, const unsigned int root_process); template S all_reduce( const S & vec, - const MPI_Comm & comm, + const MPI_Comm comm, const std::function &process); template std::vector all_reduce( const std::vector & vec, - const MPI_Comm & comm, + const MPI_Comm comm, const std::function(const std::vector &, const std::vector &)> &process); @@ -107,7 +107,7 @@ for (S : MPI_SCALARS) template void Utilities::MPI::internal::all_reduce( const MPI_Op &, const ArrayView &, - const MPI_Comm &, + const MPI_Comm, const ArrayView &); } @@ -115,7 +115,7 @@ for (S : MPI_SCALARS) for (S : REAL_SCALARS; rank : RANKS; dim : SPACE_DIMENSIONS) { template Tensor sum( - const Tensor &, const MPI_Comm &); + const Tensor &, const MPI_Comm); } @@ -123,10 +123,10 @@ for (S : REAL_SCALARS; rank : RANKS; dim : SPACE_DIMENSIONS) for (S : REAL_SCALARS; dim : SPACE_DIMENSIONS) { template SymmetricTensor<2, dim, S> sum<2, dim, S>( - const SymmetricTensor<2, dim, S> &, const MPI_Comm &); + const SymmetricTensor<2, dim, S> &, const MPI_Comm); template SymmetricTensor<4, dim, S> sum<4, dim, S>( - const SymmetricTensor<4, dim, S> &, const MPI_Comm &); + const SymmetricTensor<4, dim, S> &, const MPI_Comm); } diff --git a/source/base/mpi_compute_index_owner_internal.cc b/source/base/mpi_compute_index_owner_internal.cc index 3beb029a2d..afb1a0de58 100644 --- a/source/base/mpi_compute_index_owner_internal.cc +++ b/source/base/mpi_compute_index_owner_internal.cc @@ -165,7 +165,7 @@ namespace Utilities void - Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm &comm) + Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm comm) { // 1) set up the partition this->partition(owned_indices, comm); @@ -430,7 +430,7 @@ namespace Utilities void Dictionary::partition(const IndexSet &owned_indices, - const MPI_Comm &comm) + const MPI_Comm comm) { #ifdef DEAL_II_WITH_MPI const unsigned int n_procs = n_mpi_processes(comm); @@ -462,7 +462,7 @@ namespace Utilities ConsensusAlgorithmsPayload::ConsensusAlgorithmsPayload( const IndexSet & owned_indices, const IndexSet & indices_to_look_up, - const MPI_Comm & comm, + const MPI_Comm comm, std::vector &owning_ranks, const bool track_index_requests) : owned_indices(owned_indices) diff --git a/source/base/mpi_noncontiguous_partitioner.cc b/source/base/mpi_noncontiguous_partitioner.cc index 9920482537..bfdcf4353c 100644 --- a/source/base/mpi_noncontiguous_partitioner.cc +++ b/source/base/mpi_noncontiguous_partitioner.cc @@ -32,7 +32,7 @@ namespace Utilities NoncontiguousPartitioner::NoncontiguousPartitioner( const IndexSet &indexset_has, const IndexSet &indexset_want, - const MPI_Comm &communicator) + const MPI_Comm communicator) { this->reinit(indexset_has, indexset_want, communicator); } @@ -42,7 +42,7 @@ namespace Utilities NoncontiguousPartitioner::NoncontiguousPartitioner( const std::vector &indices_has, const std::vector &indices_want, - const MPI_Comm & communicator) + const MPI_Comm communicator) { this->reinit(indices_has, indices_want, communicator); } @@ -91,7 +91,7 @@ namespace Utilities void NoncontiguousPartitioner::reinit(const IndexSet &indexset_has, const IndexSet &indexset_want, - const MPI_Comm &communicator) + const MPI_Comm communicator) { this->communicator = communicator; @@ -169,7 +169,7 @@ namespace Utilities NoncontiguousPartitioner::reinit( const std::vector &indices_has, const std::vector &indices_want, - const MPI_Comm & communicator) + const MPI_Comm communicator) { // step 0) clean vectors from numbers::invalid_dof_index (indicating // padding) diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index 0de096dd4a..69b554daf4 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -64,7 +64,7 @@ namespace Utilities Partitioner::Partitioner(const types::global_dof_index local_size, const types::global_dof_index ghost_size, - const MPI_Comm & communicator) + const MPI_Comm communicator) : global_size(Utilities::MPI::sum(local_size, communicator)) , locally_owned_range_data(global_size) @@ -100,7 +100,7 @@ namespace Utilities Partitioner::Partitioner(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices_in, - const MPI_Comm &communicator_in) + const MPI_Comm communicator_in) : global_size( static_cast(locally_owned_indices.size())) , n_ghost_indices_data(0) @@ -118,7 +118,7 @@ namespace Utilities Partitioner::Partitioner(const IndexSet &locally_owned_indices, - const MPI_Comm &communicator_in) + const MPI_Comm communicator_in) : global_size( static_cast(locally_owned_indices.size())) , n_ghost_indices_data(0) @@ -137,7 +137,7 @@ namespace Utilities void Partitioner::reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator_in) + const MPI_Comm communicator_in) { have_ghost_indices = false; communicator = communicator_in; diff --git a/source/base/process_grid.cc b/source/base/process_grid.cc index 7c340b6028..7d31e0421f 100644 --- a/source/base/process_grid.cc +++ b/source/base/process_grid.cc @@ -34,7 +34,7 @@ namespace * https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91 */ inline std::pair - compute_processor_grid_sizes(const MPI_Comm & mpi_comm, + compute_processor_grid_sizes(const MPI_Comm mpi_comm, const unsigned int m, const unsigned int n, const unsigned int block_size_m, @@ -101,7 +101,7 @@ namespace Utilities namespace MPI { ProcessGrid::ProcessGrid( - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const std::pair &grid_dimensions) : mpi_communicator(mpi_comm) , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)) @@ -206,7 +206,7 @@ namespace Utilities - ProcessGrid::ProcessGrid(const MPI_Comm & mpi_comm, + ProcessGrid::ProcessGrid(const MPI_Comm mpi_comm, const unsigned int n_rows_matrix, const unsigned int n_columns_matrix, const unsigned int row_block_size, @@ -221,7 +221,7 @@ namespace Utilities - ProcessGrid::ProcessGrid(const MPI_Comm & mpi_comm, + ProcessGrid::ProcessGrid(const MPI_Comm mpi_comm, const unsigned int n_rows, const unsigned int n_columns) : ProcessGrid(mpi_comm, std::make_pair(n_rows, n_columns)) diff --git a/source/base/timer.cc b/source/base/timer.cc index e7ebe68ce6..17c3441706 100644 --- a/source/base/timer.cc +++ b/source/base/timer.cc @@ -162,7 +162,7 @@ Timer::Timer() -Timer::Timer(const MPI_Comm &mpi_communicator, const bool sync_lap_times_) +Timer::Timer(const MPI_Comm mpi_communicator, const bool sync_lap_times_) : running(false) , mpi_communicator(mpi_communicator) , sync_lap_times(sync_lap_times_) @@ -322,7 +322,7 @@ TimerOutput::TimerOutput(ConditionalOStream & stream, -TimerOutput::TimerOutput(const MPI_Comm & mpi_communicator, +TimerOutput::TimerOutput(const MPI_Comm mpi_communicator, std::ostream & stream, const OutputFrequency output_frequency, const OutputType output_type) @@ -335,7 +335,7 @@ TimerOutput::TimerOutput(const MPI_Comm & mpi_communicator, -TimerOutput::TimerOutput(const MPI_Comm & mpi_communicator, +TimerOutput::TimerOutput(const MPI_Comm mpi_communicator, ConditionalOStream & stream, const OutputFrequency output_frequency, const OutputType output_type) @@ -839,8 +839,8 @@ TimerOutput::print_summary() const void -TimerOutput::print_wall_time_statistics(const MPI_Comm &mpi_comm, - const double quantile) const +TimerOutput::print_wall_time_statistics(const MPI_Comm mpi_comm, + const double quantile) const { // we are going to change the precision and width of output below. store the // old values so the get restored when exiting this function diff --git a/source/distributed/fully_distributed_tria.cc b/source/distributed/fully_distributed_tria.cc index 5f158e268e..2f9fd5cb1b 100644 --- a/source/distributed/fully_distributed_tria.cc +++ b/source/distributed/fully_distributed_tria.cc @@ -45,8 +45,7 @@ namespace parallel { template DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim)) - Triangulation::Triangulation( - const MPI_Comm &mpi_communicator) + Triangulation::Triangulation(const MPI_Comm mpi_communicator) : parallel::DistributedTriangulationBase(mpi_communicator) , settings(TriangulationDescription::Settings::default_setting) , partitioner([](dealii::Triangulation &tria, diff --git a/source/distributed/grid_refinement.cc b/source/distributed/grid_refinement.cc index 2fb60f9433..46daf6e9a0 100644 --- a/source/distributed/grid_refinement.cc +++ b/source/distributed/grid_refinement.cc @@ -84,7 +84,7 @@ namespace template double compute_global_sum(const dealii::Vector &criteria, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { double my_sum = std::accumulate(criteria.begin(), @@ -274,7 +274,7 @@ namespace internal std::pair compute_global_min_and_max_at_root( const dealii::Vector &criteria, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { // we'd like to compute the global max and min from the local ones in // one MPI communication. we can do that by taking the elementwise @@ -307,7 +307,7 @@ namespace internal compute_threshold(const dealii::Vector & criteria, const std::pair &global_min_and_max, const types::global_cell_index n_target_cells, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { double interesting_range[2] = {global_min_and_max.first, global_min_and_max.second}; @@ -390,7 +390,7 @@ namespace internal compute_threshold(const dealii::Vector & criteria, const std::pair &global_min_and_max, const double target_error, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { double interesting_range[2] = {global_min_and_max.first, global_min_and_max.second}; diff --git a/source/distributed/grid_refinement.inst.in b/source/distributed/grid_refinement.inst.in index 7ff532ba9a..3baed17c9e 100644 --- a/source/distributed/grid_refinement.inst.in +++ b/source/distributed/grid_refinement.inst.in @@ -27,7 +27,7 @@ for (S : REAL_SCALARS) \{ template std::pair compute_global_min_and_max_at_root(const dealii::Vector &, - const MPI_Comm &); + const MPI_Comm); namespace RefineAndCoarsenFixedNumber \{ @@ -35,7 +35,7 @@ for (S : REAL_SCALARS) compute_threshold(const dealii::Vector &, const std::pair &, const types::global_cell_index, - const MPI_Comm &); + const MPI_Comm); \} namespace RefineAndCoarsenFixedFraction \{ @@ -43,7 +43,7 @@ for (S : REAL_SCALARS) compute_threshold(const dealii::Vector &, const std::pair &, const double, - const MPI_Comm &); + const MPI_Comm); \} \} \} diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc index e4982a5adb..6edfe9617b 100644 --- a/source/distributed/shared_tria.cc +++ b/source/distributed/shared_tria.cc @@ -41,7 +41,7 @@ namespace parallel template DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim)) Triangulation::Triangulation( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation::MeshSmoothing smooth_grid, const bool allow_artificial_cells, diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index e1d4e0f6c2..d3b6069cbc 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1698,7 +1698,7 @@ namespace parallel template DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim)) Triangulation::Triangulation( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation::MeshSmoothing smooth_grid, const Settings settings) @@ -4073,7 +4073,7 @@ namespace parallel template DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<1, spacedim>)) Triangulation<1, spacedim>::Triangulation( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation<1, spacedim>::MeshSmoothing smooth_grid, const Settings /*settings*/) diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index aed0cc2e10..698892adee 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -46,7 +46,7 @@ namespace parallel template DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim)) TriangulationBase::TriangulationBase( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation::MeshSmoothing smooth_grid, const bool check_for_distorted_cells) @@ -671,7 +671,7 @@ namespace parallel template DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim)) DistributedTriangulationBase::DistributedTriangulationBase( - const MPI_Comm &mpi_communicator, + const MPI_Comm mpi_communicator, const typename dealii::Triangulation::MeshSmoothing smooth_grid, const bool check_for_distorted_cells) @@ -877,7 +877,7 @@ namespace parallel template DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim)) DistributedTriangulationBase::DataTransfer::DataTransfer( - const MPI_Comm &mpi_communicator) + const MPI_Comm mpi_communicator) : variable_size_data_stored(false) , mpi_communicator(mpi_communicator) {} diff --git a/source/dofs/number_cache.cc b/source/dofs/number_cache.cc index 8bd52c44ef..e726198f8d 100644 --- a/source/dofs/number_cache.cc +++ b/source/dofs/number_cache.cc @@ -84,7 +84,7 @@ namespace internal std::vector NumberCache::get_n_locally_owned_dofs_per_processor( - const MPI_Comm &mpi_communicator) const + const MPI_Comm mpi_communicator) const { if (n_global_dofs == 0) return std::vector(); @@ -107,7 +107,7 @@ namespace internal std::vector NumberCache::get_locally_owned_dofs_per_processor( - const MPI_Comm &mpi_communicator) const + const MPI_Comm mpi_communicator) const { AssertDimension(locally_owned_dofs.size(), n_global_dofs); if (n_global_dofs == 0) diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index 8afe92502e..f41f2df0d3 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -5997,7 +5997,7 @@ namespace GridTools std::vector, std::vector> guess_owners_of_entities( - const MPI_Comm & comm, + const MPI_Comm comm, const std::vector>> &global_bboxes, const std::vector & entities, const double tolerance) @@ -6524,7 +6524,7 @@ namespace GridTools std::vector>> exchange_local_bounding_boxes( const std::vector> &local_bboxes, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { #ifndef DEAL_II_WITH_MPI (void)local_bboxes; @@ -6620,7 +6620,7 @@ namespace GridTools RTree, unsigned int>> build_global_description_tree( const std::vector> &local_description, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) { #ifndef DEAL_II_WITH_MPI (void)mpi_communicator; diff --git a/source/grid/grid_tools.inst.in b/source/grid/grid_tools.inst.in index 9e5d8fe338..c8a95ed9da 100644 --- a/source/grid/grid_tools.inst.in +++ b/source/grid/grid_tools.inst.in @@ -181,7 +181,7 @@ for (deal_II_space_dimension : SPACE_DIMENSIONS) template std::vector>> GridTools::exchange_local_bounding_boxes( const std::vector> &, - const MPI_Comm &); + const MPI_Comm); template std::tuple>, std::map, @@ -202,7 +202,7 @@ for (deal_II_space_dimension : SPACE_DIMENSIONS) std::pair, unsigned int>> GridTools::build_global_description_tree( const std::vector> &, - const MPI_Comm &); + const MPI_Comm); template Vector GridTools::compute_aspect_ratio_of_cells( const Mapping &, diff --git a/source/grid/tria_description.cc b/source/grid/tria_description.cc index 1484356c5f..828eee4687 100644 --- a/source/grid/tria_description.cc +++ b/source/grid/tria_description.cc @@ -111,7 +111,7 @@ namespace TriangulationDescription collect( const std::vector & relevant_processes, const std::vector> &description_temp, - const MPI_Comm & comm, + const MPI_Comm comm, const bool vertices_have_unique_ids) { const auto create_request = [&](const unsigned int other_rank) { @@ -404,7 +404,7 @@ namespace TriangulationDescription const std::function::cell_iterator &)> & level_subdomain_id_function, - const MPI_Comm & comm, + const MPI_Comm comm, const TriangulationDescription::Settings settings) : tria(tria) , subdomain_id_function(subdomain_id_function) @@ -709,7 +709,7 @@ namespace TriangulationDescription const typename dealii::Triangulation::cell_iterator &)> level_subdomain_id_function; - const MPI_Comm & comm; + const MPI_Comm comm; const TriangulationDescription::Settings settings; const bool construct_multigrid; @@ -725,7 +725,7 @@ namespace TriangulationDescription Description create_description_from_triangulation( const dealii::Triangulation &tria, - const MPI_Comm & comm, + const MPI_Comm comm, const TriangulationDescription::Settings settings, const unsigned int my_rank_in) { @@ -791,9 +791,9 @@ namespace TriangulationDescription const std::function &)> & serial_grid_generator, const std::function &, - const MPI_Comm &, + const MPI_Comm, const unsigned int)> &serial_grid_partitioner, - const MPI_Comm & comm, + const MPI_Comm comm, const int group_size, const typename Triangulation::MeshSmoothing smoothing, const TriangulationDescription::Settings settings) diff --git a/source/grid/tria_description.inst.in b/source/grid/tria_description.inst.in index 6afbf036f6..2a5be49792 100644 --- a/source/grid/tria_description.inst.in +++ b/source/grid/tria_description.inst.in @@ -26,7 +26,7 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : DIMENSIONS) create_description_from_triangulation( const dealii::Triangulation &tria, - const MPI_Comm & comm, + const MPI_Comm comm, const TriangulationDescription::Settings settings, const unsigned int my_rank_in); @@ -37,9 +37,9 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : DIMENSIONS) &)> & serial_grid_generator, const std::function &, - const MPI_Comm &, + const MPI_Comm, const unsigned int)> &serial_grid_partitioner, - const MPI_Comm & comm, + const MPI_Comm comm, const int group_size, const typename Triangulation::MeshSmoothing diff --git a/source/lac/block_sparsity_pattern.cc b/source/lac/block_sparsity_pattern.cc index d0f785f7b8..9ea75b2a07 100644 --- a/source/lac/block_sparsity_pattern.cc +++ b/source/lac/block_sparsity_pattern.cc @@ -556,7 +556,7 @@ namespace TrilinosWrappers BlockSparsityPattern::BlockSparsityPattern( const std::vector ¶llel_partitioning, - const MPI_Comm & communicator) + const MPI_Comm communicator) : BlockSparsityPatternBase(parallel_partitioning.size(), parallel_partitioning.size()) { @@ -574,7 +574,7 @@ namespace TrilinosWrappers const std::vector &row_parallel_partitioning, const std::vector &col_parallel_partitioning, const std::vector &writable_rows, - const MPI_Comm & communicator) + const MPI_Comm communicator) : BlockSparsityPatternBase( row_parallel_partitioning.size(), col_parallel_partitioning.size()) @@ -607,7 +607,7 @@ namespace TrilinosWrappers void BlockSparsityPattern::reinit( const std::vector ¶llel_partitioning, - const MPI_Comm & communicator) + const MPI_Comm communicator) { dealii::BlockSparsityPatternBase::reinit( parallel_partitioning.size(), parallel_partitioning.size()); @@ -625,7 +625,7 @@ namespace TrilinosWrappers BlockSparsityPattern::reinit( const std::vector &row_parallel_partitioning, const std::vector &col_parallel_partitioning, - const MPI_Comm & communicator) + const MPI_Comm communicator) { dealii::BlockSparsityPatternBase::reinit( row_parallel_partitioning.size(), col_parallel_partitioning.size()); @@ -644,7 +644,7 @@ namespace TrilinosWrappers const std::vector &row_parallel_partitioning, const std::vector &col_parallel_partitioning, const std::vector &writable_rows, - const MPI_Comm & communicator) + const MPI_Comm communicator) { AssertDimension(writable_rows.size(), row_parallel_partitioning.size()); dealii::BlockSparsityPatternBase::reinit( diff --git a/source/lac/petsc_communication_pattern.cc b/source/lac/petsc_communication_pattern.cc index 884019eefa..78d60402a3 100644 --- a/source/lac/petsc_communication_pattern.cc +++ b/source/lac/petsc_communication_pattern.cc @@ -45,7 +45,7 @@ namespace PETScWrappers void CommunicationPattern::reinit(const types::global_dof_index local_size, const IndexSet & ghost_indices, - const MPI_Comm & communicator) + const MPI_Comm communicator) { clear(); @@ -81,7 +81,7 @@ namespace PETScWrappers void CommunicationPattern::reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, - const MPI_Comm &communicator) + const MPI_Comm communicator) { std::vector in_deal; locally_owned_indices.fill_index_vector(in_deal); @@ -100,7 +100,7 @@ namespace PETScWrappers CommunicationPattern::reinit( const std::vector &indices_has, const std::vector &indices_want, - const MPI_Comm & communicator) + const MPI_Comm communicator) { // Clean vectors from numbers::invalid_dof_index (indicating padding) std::vector indices_has_clean, indices_has_loc; @@ -154,7 +154,7 @@ namespace PETScWrappers const std::vector &inloc, const std::vector &outidx, const std::vector &outloc, - const MPI_Comm & communicator) + const MPI_Comm communicator) { clear(); @@ -348,7 +348,7 @@ namespace PETScWrappers void Partitioner::reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, - const MPI_Comm &communicator) + const MPI_Comm communicator) { ghost_indices_data = ghost_indices; ghost_indices_data.subtract_set(locally_owned_indices); @@ -365,7 +365,7 @@ namespace PETScWrappers Partitioner::reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, const IndexSet &larger_ghost_indices, - const MPI_Comm &communicator) + const MPI_Comm communicator) { std::vector local_indices; locally_owned_indices.fill_index_vector(local_indices); diff --git a/source/lac/petsc_matrix_free.cc b/source/lac/petsc_matrix_free.cc index bcbde6697d..a234d72aca 100644 --- a/source/lac/petsc_matrix_free.cc +++ b/source/lac/petsc_matrix_free.cc @@ -33,7 +33,7 @@ namespace PETScWrappers - MatrixFree::MatrixFree(const MPI_Comm & communicator, + MatrixFree::MatrixFree(const MPI_Comm communicator, const unsigned int m, const unsigned int n, const unsigned int local_rows, @@ -45,7 +45,7 @@ namespace PETScWrappers MatrixFree::MatrixFree( - const MPI_Comm & communicator, + const MPI_Comm communicator, const unsigned int m, const unsigned int n, const std::vector &local_rows_per_process, @@ -98,7 +98,7 @@ namespace PETScWrappers void - MatrixFree::reinit(const MPI_Comm & communicator, + MatrixFree::reinit(const MPI_Comm communicator, const unsigned int m, const unsigned int n, const unsigned int local_rows, @@ -114,7 +114,7 @@ namespace PETScWrappers void - MatrixFree::reinit(const MPI_Comm & communicator, + MatrixFree::reinit(const MPI_Comm communicator, const unsigned int m, const unsigned int n, const std::vector &local_rows_per_process, @@ -211,7 +211,7 @@ namespace PETScWrappers void - MatrixFree::do_reinit(const MPI_Comm & communicator, + MatrixFree::do_reinit(const MPI_Comm communicator, const unsigned int m, const unsigned int n, const unsigned int local_rows, diff --git a/source/lac/petsc_parallel_block_sparse_matrix.cc b/source/lac/petsc_parallel_block_sparse_matrix.cc index b91d4cfb7e..421ff54ba1 100644 --- a/source/lac/petsc_parallel_block_sparse_matrix.cc +++ b/source/lac/petsc_parallel_block_sparse_matrix.cc @@ -111,7 +111,7 @@ namespace PETScWrappers BlockSparseMatrix::reinit(const std::vector & rows, const std::vector & cols, const BlockDynamicSparsityPattern &bdsp, - const MPI_Comm & com) + const MPI_Comm com) { Assert(rows.size() == bdsp.n_block_rows(), ExcMessage("invalid size")); Assert(cols.size() == bdsp.n_block_cols(), ExcMessage("invalid size")); @@ -149,7 +149,7 @@ namespace PETScWrappers void BlockSparseMatrix::reinit(const std::vector & sizes, const BlockDynamicSparsityPattern &bdsp, - const MPI_Comm & com) + const MPI_Comm com) { reinit(sizes, sizes, bdsp, com); } diff --git a/source/lac/petsc_parallel_sparse_matrix.cc b/source/lac/petsc_parallel_sparse_matrix.cc index 8d1469c866..b3e8b85727 100644 --- a/source/lac/petsc_parallel_sparse_matrix.cc +++ b/source/lac/petsc_parallel_sparse_matrix.cc @@ -57,7 +57,7 @@ namespace PETScWrappers template SparseMatrix::SparseMatrix( - const MPI_Comm & communicator, + const MPI_Comm communicator, const SparsityPatternType & sparsity_pattern, const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, @@ -94,7 +94,7 @@ namespace PETScWrappers const IndexSet & local_columns, const IndexSet & local_active_columns, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator) + const MPI_Comm communicator) { // get rid of old matrix and generate a new one const PetscErrorCode ierr = MatDestroy(&matrix); @@ -132,7 +132,7 @@ namespace PETScWrappers template void SparseMatrix::reinit( - const MPI_Comm & communicator, + const MPI_Comm communicator, const SparsityPatternType & sparsity_pattern, const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, @@ -158,7 +158,7 @@ namespace PETScWrappers void SparseMatrix::reinit(const IndexSet & local_rows, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator) + const MPI_Comm communicator) { do_reinit(communicator, local_rows, local_rows, sparsity_pattern); } @@ -168,7 +168,7 @@ namespace PETScWrappers SparseMatrix::reinit(const IndexSet & local_rows, const IndexSet & local_columns, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator) + const MPI_Comm communicator) { // get rid of old matrix and generate a new one const PetscErrorCode ierr = MatDestroy(&matrix); @@ -181,7 +181,7 @@ namespace PETScWrappers template void - SparseMatrix::do_reinit(const MPI_Comm & communicator, + SparseMatrix::do_reinit(const MPI_Comm communicator, const IndexSet & local_rows, const IndexSet & local_columns, const SparsityPatternType &sparsity_pattern) @@ -340,7 +340,7 @@ namespace PETScWrappers template void SparseMatrix::do_reinit( - const MPI_Comm & communicator, + const MPI_Comm communicator, const SparsityPatternType & sparsity_pattern, const std::vector &local_rows_per_process, const std::vector &local_columns_per_process, @@ -465,7 +465,7 @@ namespace PETScWrappers // BDDC template void - SparseMatrix::do_reinit(const MPI_Comm & communicator, + SparseMatrix::do_reinit(const MPI_Comm communicator, const IndexSet & local_rows, const IndexSet & local_active_rows, const IndexSet & local_columns, @@ -693,13 +693,13 @@ namespace PETScWrappers # ifndef DOXYGEN // explicit instantiations // - template SparseMatrix::SparseMatrix(const MPI_Comm &, + template SparseMatrix::SparseMatrix(const MPI_Comm, const SparsityPattern &, const std::vector &, const std::vector &, const unsigned int, const bool); - template SparseMatrix::SparseMatrix(const MPI_Comm &, + template SparseMatrix::SparseMatrix(const MPI_Comm, const DynamicSparsityPattern &, const std::vector &, const std::vector &, @@ -707,14 +707,14 @@ namespace PETScWrappers const bool); template void - SparseMatrix::reinit(const MPI_Comm &, + SparseMatrix::reinit(const MPI_Comm, const SparsityPattern &, const std::vector &, const std::vector &, const unsigned int, const bool); template void - SparseMatrix::reinit(const MPI_Comm &, + SparseMatrix::reinit(const MPI_Comm, const DynamicSparsityPattern &, const std::vector &, const std::vector &, @@ -724,34 +724,34 @@ namespace PETScWrappers template void SparseMatrix::reinit(const IndexSet &, const SparsityPattern &, - const MPI_Comm &); + const MPI_Comm); template void SparseMatrix::reinit(const IndexSet &, const IndexSet &, const SparsityPattern &, - const MPI_Comm &); + const MPI_Comm); template void SparseMatrix::reinit(const IndexSet &, const DynamicSparsityPattern &, - const MPI_Comm &); + const MPI_Comm); template void SparseMatrix::reinit(const IndexSet &, const IndexSet &, const DynamicSparsityPattern &, - const MPI_Comm &); + const MPI_Comm); template void - SparseMatrix::do_reinit(const MPI_Comm &, + SparseMatrix::do_reinit(const MPI_Comm, const SparsityPattern &, const std::vector &, const std::vector &, const unsigned int, const bool); template void - SparseMatrix::do_reinit(const MPI_Comm &, + SparseMatrix::do_reinit(const MPI_Comm, const DynamicSparsityPattern &, const std::vector &, const std::vector &, @@ -759,13 +759,13 @@ namespace PETScWrappers const bool); template void - SparseMatrix::do_reinit(const MPI_Comm &, + SparseMatrix::do_reinit(const MPI_Comm, const IndexSet &, const IndexSet &, const SparsityPattern &); template void - SparseMatrix::do_reinit(const MPI_Comm &, + SparseMatrix::do_reinit(const MPI_Comm, const IndexSet &, const IndexSet &, const DynamicSparsityPattern &); @@ -776,24 +776,24 @@ namespace PETScWrappers const IndexSet &, const IndexSet &, const SparsityPattern &, - const MPI_Comm &); + const MPI_Comm); template void SparseMatrix::reinit(const IndexSet &, const IndexSet &, const IndexSet &, const IndexSet &, const DynamicSparsityPattern &, - const MPI_Comm &); + const MPI_Comm); template void - SparseMatrix::do_reinit(const MPI_Comm &, + SparseMatrix::do_reinit(const MPI_Comm, const IndexSet &, const IndexSet &, const IndexSet &, const IndexSet &, const SparsityPattern &); template void - SparseMatrix::do_reinit(const MPI_Comm &, + SparseMatrix::do_reinit(const MPI_Comm, const IndexSet &, const IndexSet &, const IndexSet &, diff --git a/source/lac/petsc_parallel_vector.cc b/source/lac/petsc_parallel_vector.cc index fddd6cbba8..85ee9bbf9b 100644 --- a/source/lac/petsc_parallel_vector.cc +++ b/source/lac/petsc_parallel_vector.cc @@ -38,7 +38,7 @@ namespace PETScWrappers - Vector::Vector(const MPI_Comm &communicator, + Vector::Vector(const MPI_Comm communicator, const size_type n, const size_type locally_owned_size) { @@ -49,7 +49,7 @@ namespace PETScWrappers Vector::Vector(const IndexSet &local, const IndexSet &ghost, - const MPI_Comm &communicator) + const MPI_Comm communicator) { Assert(local.is_ascending_and_one_to_one(communicator), ExcNotImplemented()); @@ -83,7 +83,7 @@ namespace PETScWrappers - Vector::Vector(const IndexSet &local, const MPI_Comm &communicator) + Vector::Vector(const IndexSet &local, const MPI_Comm communicator) { Assert(local.is_ascending_and_one_to_one(communicator), ExcNotImplemented()); @@ -145,7 +145,7 @@ namespace PETScWrappers void - Vector::reinit(const MPI_Comm &communicator, + Vector::reinit(const MPI_Comm communicator, const size_type n, const size_type local_sz, const bool omit_zeroing_entries) @@ -211,7 +211,7 @@ namespace PETScWrappers void Vector::reinit(const IndexSet &local, const IndexSet &ghost, - const MPI_Comm &comm) + const MPI_Comm comm) { const PetscErrorCode ierr = VecDestroy(&vector); AssertThrow(ierr == 0, ExcPETScError(ierr)); @@ -225,7 +225,7 @@ namespace PETScWrappers } void - Vector::reinit(const IndexSet &local, const MPI_Comm &comm) + Vector::reinit(const IndexSet &local, const MPI_Comm comm) { const PetscErrorCode ierr = VecDestroy(&vector); AssertThrow(ierr == 0, ExcPETScError(ierr)); @@ -246,7 +246,7 @@ namespace PETScWrappers void - Vector::create_vector(const MPI_Comm &communicator, + Vector::create_vector(const MPI_Comm communicator, const size_type n, const size_type locally_owned_size) { @@ -266,7 +266,7 @@ namespace PETScWrappers void - Vector::create_vector(const MPI_Comm &communicator, + Vector::create_vector(const MPI_Comm communicator, const size_type n, const size_type locally_owned_size, const IndexSet &ghostnodes) diff --git a/source/lac/petsc_precondition.cc b/source/lac/petsc_precondition.cc index 55ba96edd2..595050632b 100644 --- a/source/lac/petsc_precondition.cc +++ b/source/lac/petsc_precondition.cc @@ -33,7 +33,7 @@ DEAL_II_NAMESPACE_OPEN namespace PETScWrappers { - PreconditionBase::PreconditionBase(const MPI_Comm &comm) + PreconditionBase::PreconditionBase(const MPI_Comm comm) : pc(nullptr) { create_pc_with_comm(comm); @@ -115,7 +115,7 @@ namespace PETScWrappers } void - PreconditionBase::create_pc_with_comm(const MPI_Comm &comm) + PreconditionBase::create_pc_with_comm(const MPI_Comm comm) { clear(); PetscErrorCode ierr = PCCreate(comm, &pc); @@ -137,7 +137,7 @@ namespace PETScWrappers - PreconditionJacobi::PreconditionJacobi(const MPI_Comm & comm, + PreconditionJacobi::PreconditionJacobi(const MPI_Comm comm, const AdditionalData &additional_data_) : PreconditionBase(comm) { @@ -194,7 +194,7 @@ namespace PETScWrappers {} PreconditionBlockJacobi::PreconditionBlockJacobi( - const MPI_Comm & comm, + const MPI_Comm comm, const AdditionalData &additional_data_) : PreconditionBase(comm) { @@ -540,7 +540,7 @@ namespace PETScWrappers PreconditionBoomerAMG::PreconditionBoomerAMG( - const MPI_Comm & comm, + const MPI_Comm comm, const AdditionalData &additional_data_) : PreconditionBase(comm) { @@ -1057,13 +1057,13 @@ namespace PETScWrappers initialize(matrix); } - PreconditionShell::PreconditionShell(const MPI_Comm &comm) + PreconditionShell::PreconditionShell(const MPI_Comm comm) { initialize(comm); } void - PreconditionShell::initialize(const MPI_Comm &comm) + PreconditionShell::initialize(const MPI_Comm comm) { PetscErrorCode ierr; if (pc) diff --git a/source/lac/petsc_solver.cc b/source/lac/petsc_solver.cc index 060525f3e0..16b7739b53 100644 --- a/source/lac/petsc_solver.cc +++ b/source/lac/petsc_solver.cc @@ -204,7 +204,7 @@ namespace PETScWrappers void - SolverBase::initialize_ksp_with_comm(const MPI_Comm &comm) + SolverBase::initialize_ksp_with_comm(const MPI_Comm comm) { // Create the PETSc KSP object AssertPETSc(KSPCreate(comm, &ksp)); @@ -263,7 +263,7 @@ namespace PETScWrappers SolverRichardson::SolverRichardson(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverRichardson(cn, data) {} @@ -313,7 +313,7 @@ namespace PETScWrappers SolverChebychev::SolverChebychev(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverChebychev(cn, data) {} @@ -340,7 +340,7 @@ namespace PETScWrappers SolverCG::SolverCG(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverCG(cn, data) {} @@ -367,7 +367,7 @@ namespace PETScWrappers SolverBiCG::SolverBiCG(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverBiCG(cn, data) {} @@ -403,7 +403,7 @@ namespace PETScWrappers SolverGMRES::SolverGMRES(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverGMRES(cn, data) {} @@ -438,7 +438,7 @@ namespace PETScWrappers SolverBicgstab::SolverBicgstab(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverBicgstab(cn, data) {} @@ -465,7 +465,7 @@ namespace PETScWrappers SolverCGS::SolverCGS(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverCGS(cn, data) {} @@ -492,7 +492,7 @@ namespace PETScWrappers SolverTFQMR::SolverTFQMR(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverTFQMR(cn, data) {} @@ -519,7 +519,7 @@ namespace PETScWrappers SolverTCQMR::SolverTCQMR(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverTCQMR(cn, data) {} @@ -546,7 +546,7 @@ namespace PETScWrappers SolverCR::SolverCR(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverCR(cn, data) {} @@ -574,7 +574,7 @@ namespace PETScWrappers SolverLSQR::SolverLSQR(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverLSQR(cn, data) {} @@ -609,7 +609,7 @@ namespace PETScWrappers SolverPreOnly::SolverPreOnly(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SolverPreOnly(cn, data) {} @@ -649,7 +649,7 @@ namespace PETScWrappers SparseDirectMUMPS::SparseDirectMUMPS(SolverControl &cn, - const MPI_Comm &, + const MPI_Comm, const AdditionalData &data) : SparseDirectMUMPS(cn, data) {} diff --git a/source/lac/slepc_solver.cc b/source/lac/slepc_solver.cc index d9fa4ff91c..5af5583a53 100644 --- a/source/lac/slepc_solver.cc +++ b/source/lac/slepc_solver.cc @@ -32,7 +32,7 @@ DEAL_II_NAMESPACE_OPEN namespace SLEPcWrappers { - SolverBase::SolverBase(SolverControl &cn, const MPI_Comm &mpi_communicator) + SolverBase::SolverBase(SolverControl &cn, const MPI_Comm mpi_communicator) : solver_control(cn) , mpi_communicator(mpi_communicator) , reason(EPS_CONVERGED_ITERATING) @@ -360,7 +360,7 @@ namespace SLEPcWrappers /* ---------------------- SolverKrylovSchur ------------------------ */ SolverKrylovSchur::SolverKrylovSchur(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : SolverBase(cn, mpi_communicator) , additional_data(data) @@ -381,7 +381,7 @@ namespace SLEPcWrappers SolverArnoldi::SolverArnoldi(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : SolverBase(cn, mpi_communicator) , additional_data(data) @@ -408,7 +408,7 @@ namespace SLEPcWrappers SolverLanczos::SolverLanczos(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : SolverBase(cn, mpi_communicator) , additional_data(data) @@ -424,7 +424,7 @@ namespace SLEPcWrappers /* ----------------------- Power ------------------------- */ SolverPower::SolverPower(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : SolverBase(cn, mpi_communicator) , additional_data(data) @@ -445,7 +445,7 @@ namespace SLEPcWrappers SolverGeneralizedDavidson::SolverGeneralizedDavidson( SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : SolverBase(cn, mpi_communicator) , additional_data(data) @@ -463,8 +463,8 @@ namespace SLEPcWrappers /* ------------------ Jacobi Davidson -------------------- */ - SolverJacobiDavidson::SolverJacobiDavidson(SolverControl & cn, - const MPI_Comm &mpi_communicator, + SolverJacobiDavidson::SolverJacobiDavidson(SolverControl &cn, + const MPI_Comm mpi_communicator, const AdditionalData &data) : SolverBase(cn, mpi_communicator) , additional_data(data) @@ -477,7 +477,7 @@ namespace SLEPcWrappers /* ---------------------- LAPACK ------------------------- */ SolverLAPACK::SolverLAPACK(SolverControl & cn, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : SolverBase(cn, mpi_communicator) , additional_data(data) diff --git a/source/lac/slepc_spectral_transformation.cc b/source/lac/slepc_spectral_transformation.cc index 3a8cf0321b..545746a22e 100644 --- a/source/lac/slepc_spectral_transformation.cc +++ b/source/lac/slepc_spectral_transformation.cc @@ -29,7 +29,7 @@ DEAL_II_NAMESPACE_OPEN namespace SLEPcWrappers { - TransformationBase::TransformationBase(const MPI_Comm &mpi_communicator) + TransformationBase::TransformationBase(const MPI_Comm mpi_communicator) { const PetscErrorCode ierr = STCreate(mpi_communicator, &st); AssertThrow(ierr == 0, SolverBase::ExcSLEPcError(ierr)); @@ -66,7 +66,7 @@ namespace SLEPcWrappers : shift_parameter(shift_parameter) {} - TransformationShift::TransformationShift(const MPI_Comm &mpi_communicator, + TransformationShift::TransformationShift(const MPI_Comm mpi_communicator, const AdditionalData &data) : TransformationBase(mpi_communicator) , additional_data(data) @@ -86,7 +86,7 @@ namespace SLEPcWrappers {} TransformationShiftInvert::TransformationShiftInvert( - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : TransformationBase(mpi_communicator) , additional_data(data) @@ -106,7 +106,7 @@ namespace SLEPcWrappers {} TransformationSpectrumFolding::TransformationSpectrumFolding( - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const AdditionalData &data) : TransformationBase(mpi_communicator) , additional_data(data) @@ -128,7 +128,7 @@ namespace SLEPcWrappers , antishift_parameter(antishift_parameter) {} - TransformationCayley::TransformationCayley(const MPI_Comm &mpi_communicator, + TransformationCayley::TransformationCayley(const MPI_Comm mpi_communicator, const AdditionalData &data) : TransformationBase(mpi_communicator) , additional_data(data) diff --git a/source/lac/sparsity_tools.cc b/source/lac/sparsity_tools.cc index 0b45cbfb96..ecb126abfe 100644 --- a/source/lac/sparsity_tools.cc +++ b/source/lac/sparsity_tools.cc @@ -914,7 +914,7 @@ namespace SparsityTools void gather_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet & locally_owned_rows, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet & locally_relevant_rows) { using map_vec_t = @@ -1007,7 +1007,7 @@ namespace SparsityTools distribute_sparsity_pattern( DynamicSparsityPattern & dsp, const std::vector &rows_per_cpu, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet & myrange) { const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm); @@ -1028,7 +1028,7 @@ namespace SparsityTools void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet & locally_owned_rows, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet & locally_relevant_rows) { IndexSet requested_rows(locally_relevant_rows); @@ -1095,7 +1095,7 @@ namespace SparsityTools void distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, const std::vector &owned_set_per_cpu, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet & myrange) { const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm); @@ -1110,7 +1110,7 @@ namespace SparsityTools void distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, const IndexSet & locally_owned_rows, - const MPI_Comm & mpi_comm, + const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows) { using map_vec_t = diff --git a/source/lac/trilinos_block_sparse_matrix.cc b/source/lac/trilinos_block_sparse_matrix.cc index fa43815a87..d4e4f300e1 100644 --- a/source/lac/trilinos_block_sparse_matrix.cc +++ b/source/lac/trilinos_block_sparse_matrix.cc @@ -73,7 +73,7 @@ namespace TrilinosWrappers BlockSparseMatrix::reinit( const std::vector & parallel_partitioning, const BlockSparsityPatternType &block_sparsity_pattern, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool exchange_data) { std::vector epetra_maps; @@ -164,7 +164,7 @@ namespace TrilinosWrappers BlockSparseMatrix::reinit( const std::vector & parallel_partitioning, const ::dealii::BlockSparseMatrix &dealii_block_sparse_matrix, - const MPI_Comm & communicator, + const MPI_Comm communicator, const double drop_tolerance) { const size_type n_block_rows = parallel_partitioning.size(); @@ -326,7 +326,7 @@ namespace TrilinosWrappers template void BlockSparseMatrix::reinit(const std::vector &, const dealii::BlockDynamicSparsityPattern &, - const MPI_Comm &, + const MPI_Comm, const bool); # endif // DOXYGEN diff --git a/source/lac/trilinos_block_vector.cc b/source/lac/trilinos_block_vector.cc index 486fa0c418..d72464c69c 100644 --- a/source/lac/trilinos_block_vector.cc +++ b/source/lac/trilinos_block_vector.cc @@ -69,7 +69,7 @@ namespace TrilinosWrappers void BlockVector::reinit(const std::vector ¶llel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool omit_zeroing_entries) { // update the number of blocks @@ -91,7 +91,7 @@ namespace TrilinosWrappers void BlockVector::reinit(const std::vector ¶llel_partitioning, const std::vector &ghost_values, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool vector_writable) { AssertDimension(parallel_partitioning.size(), ghost_values.size()); diff --git a/source/lac/trilinos_epetra_communication_pattern.cc b/source/lac/trilinos_epetra_communication_pattern.cc index a5d010bb11..d5ec45d64c 100644 --- a/source/lac/trilinos_epetra_communication_pattern.cc +++ b/source/lac/trilinos_epetra_communication_pattern.cc @@ -32,7 +32,7 @@ namespace LinearAlgebra CommunicationPattern::CommunicationPattern( const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator) + const MPI_Comm communicator) { // virtual functions called in constructors and destructors never use the // override in a derived class @@ -47,7 +47,7 @@ namespace LinearAlgebra void CommunicationPattern::reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator) + const MPI_Comm communicator) { comm = std::make_shared(communicator); diff --git a/source/lac/trilinos_epetra_vector.cc b/source/lac/trilinos_epetra_vector.cc index 39c71c5ff4..a6cecfa583 100644 --- a/source/lac/trilinos_epetra_vector.cc +++ b/source/lac/trilinos_epetra_vector.cc @@ -54,7 +54,7 @@ namespace LinearAlgebra Vector::Vector(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator) + const MPI_Comm communicator) : Subscriptor() , vector(new Epetra_FEVector( parallel_partitioner.make_trilinos_map(communicator, false))) @@ -64,7 +64,7 @@ namespace LinearAlgebra void Vector::reinit(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator, + const MPI_Comm communicator, const bool omit_zeroing_entries) { Epetra_Map input_map = @@ -661,7 +661,7 @@ namespace LinearAlgebra void Vector::create_epetra_comm_pattern(const IndexSet &source_index_set, - const MPI_Comm &mpi_comm) + const MPI_Comm mpi_comm) { source_stored_elements = source_index_set; epetra_comm_pattern = diff --git a/source/lac/trilinos_sparse_matrix.cc b/source/lac/trilinos_sparse_matrix.cc index 374fb94920..6ba2a61422 100644 --- a/source/lac/trilinos_sparse_matrix.cc +++ b/source/lac/trilinos_sparse_matrix.cc @@ -269,7 +269,7 @@ namespace TrilinosWrappers SparseMatrix::SparseMatrix(const IndexSet & parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const unsigned int n_max_entries_per_row) : column_space_map(new Epetra_Map( parallel_partitioning.make_trilinos_map(communicator, false))) @@ -284,7 +284,7 @@ namespace TrilinosWrappers SparseMatrix::SparseMatrix(const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row) : column_space_map(new Epetra_Map( parallel_partitioning.make_trilinos_map(communicator, false))) @@ -302,7 +302,7 @@ namespace TrilinosWrappers SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, + const MPI_Comm communicator, const size_type n_max_entries_per_row) : column_space_map(new Epetra_Map( col_parallel_partitioning.make_trilinos_map(communicator, false))) @@ -319,7 +319,7 @@ namespace TrilinosWrappers SparseMatrix::SparseMatrix(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row) : column_space_map(new Epetra_Map( col_parallel_partitioning.make_trilinos_map(communicator, false))) @@ -452,7 +452,7 @@ namespace TrilinosWrappers const IndexSet & column_parallel_partitioning, const SparsityPatternType & sparsity_pattern, const bool exchange_data, - const MPI_Comm & communicator, + const MPI_Comm communicator, std::unique_ptr &column_space_map, std::unique_ptr &matrix, std::unique_ptr & nonlocal_matrix, @@ -605,7 +605,7 @@ namespace TrilinosWrappers const IndexSet & column_parallel_partitioning, const DynamicSparsityPattern &sparsity_pattern, const bool exchange_data, - const MPI_Comm & communicator, + const MPI_Comm communicator, std::unique_ptr & column_space_map, std::unique_ptr &matrix, std::unique_ptr & nonlocal_matrix, @@ -800,7 +800,7 @@ namespace TrilinosWrappers SparseMatrix::reinit(const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, const SparsityPatternType &sparsity_pattern, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool exchange_data) { reinit_matrix(row_parallel_partitioning, @@ -877,7 +877,7 @@ namespace TrilinosWrappers const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, const ::dealii::SparseMatrix &dealii_sparse_matrix, - const MPI_Comm & communicator, + const MPI_Comm communicator, const double drop_tolerance, const bool copy_values, const ::dealii::SparsityPattern * use_this_sparsity) @@ -3021,14 +3021,14 @@ namespace TrilinosWrappers SparseMatrix::reinit(const IndexSet &, const IndexSet &, const dealii::SparsityPattern &, - const MPI_Comm &, + const MPI_Comm, const bool); template void SparseMatrix::reinit(const IndexSet &, const IndexSet &, const DynamicSparsityPattern &, - const MPI_Comm &, + const MPI_Comm, const bool); template void diff --git a/source/lac/trilinos_sparse_matrix.inst.in b/source/lac/trilinos_sparse_matrix.inst.in index b694953bb0..7b23817993 100644 --- a/source/lac/trilinos_sparse_matrix.inst.in +++ b/source/lac/trilinos_sparse_matrix.inst.in @@ -28,7 +28,7 @@ for (S : REAL_SCALARS) SparseMatrix::reinit(const IndexSet &, const IndexSet &, const dealii::SparseMatrix &, - const MPI_Comm &, + const MPI_Comm, const double, const bool, const dealii::SparsityPattern *); diff --git a/source/lac/trilinos_sparsity_pattern.cc b/source/lac/trilinos_sparsity_pattern.cc index 6e22d44d56..0bab423924 100644 --- a/source/lac/trilinos_sparsity_pattern.cc +++ b/source/lac/trilinos_sparsity_pattern.cc @@ -144,7 +144,7 @@ namespace TrilinosWrappers SparsityPattern::SparsityPattern(const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator, + const MPI_Comm communicator, const size_type n_entries_per_row) { reinit(parallel_partitioning, @@ -157,7 +157,7 @@ namespace TrilinosWrappers SparsityPattern::SparsityPattern( const IndexSet & parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row) { reinit(parallel_partitioning, @@ -170,7 +170,7 @@ namespace TrilinosWrappers SparsityPattern::SparsityPattern(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, + const MPI_Comm communicator, const size_type n_entries_per_row) { reinit(row_parallel_partitioning, @@ -184,7 +184,7 @@ namespace TrilinosWrappers SparsityPattern::SparsityPattern( const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row) { reinit(row_parallel_partitioning, @@ -198,7 +198,7 @@ namespace TrilinosWrappers SparsityPattern::SparsityPattern(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const IndexSet &writable_rows, - const MPI_Comm &communicator, + const MPI_Comm communicator, const size_type n_max_entries_per_row) { reinit(row_parallel_partitioning, @@ -480,7 +480,7 @@ namespace TrilinosWrappers void SparsityPattern::reinit(const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator, + const MPI_Comm communicator, const size_type n_entries_per_row) { SparsityPatternBase::resize(parallel_partitioning.size(), @@ -495,7 +495,7 @@ namespace TrilinosWrappers void SparsityPattern::reinit(const IndexSet & parallel_partitioning, - const MPI_Comm & communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row) { SparsityPatternBase::resize(parallel_partitioning.size(), @@ -511,7 +511,7 @@ namespace TrilinosWrappers void SparsityPattern::reinit(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, + const MPI_Comm communicator, const size_type n_entries_per_row) { SparsityPatternBase::resize(row_parallel_partitioning.size(), @@ -533,7 +533,7 @@ namespace TrilinosWrappers void SparsityPattern::reinit(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, - const MPI_Comm &communicator, + const MPI_Comm communicator, const std::vector &n_entries_per_row) { SparsityPatternBase::resize(row_parallel_partitioning.size(), @@ -556,7 +556,7 @@ namespace TrilinosWrappers SparsityPattern::reinit(const IndexSet &row_parallel_partitioning, const IndexSet &col_parallel_partitioning, const IndexSet &writable_rows, - const MPI_Comm &communicator, + const MPI_Comm communicator, const size_type n_entries_per_row) { SparsityPatternBase::resize(row_parallel_partitioning.size(), @@ -604,7 +604,7 @@ namespace TrilinosWrappers const IndexSet & row_parallel_partitioning, const IndexSet & col_parallel_partitioning, const SparsityPatternType &nontrilinos_sparsity_pattern, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool exchange_data) { SparsityPatternBase::resize(row_parallel_partitioning.size(), @@ -629,7 +629,7 @@ namespace TrilinosWrappers SparsityPattern::reinit( const IndexSet & parallel_partitioning, const SparsityPatternType &nontrilinos_sparsity_pattern, - const MPI_Comm & communicator, + const MPI_Comm communicator, const bool exchange_data) { AssertDimension(nontrilinos_sparsity_pattern.n_rows(), @@ -1111,12 +1111,12 @@ namespace TrilinosWrappers template void SparsityPattern::reinit(const IndexSet &, const dealii::SparsityPattern &, - const MPI_Comm &, + const MPI_Comm, bool); template void SparsityPattern::reinit(const IndexSet &, const dealii::DynamicSparsityPattern &, - const MPI_Comm &, + const MPI_Comm, bool); @@ -1124,13 +1124,13 @@ namespace TrilinosWrappers SparsityPattern::reinit(const IndexSet &, const IndexSet &, const dealii::SparsityPattern &, - const MPI_Comm &, + const MPI_Comm, bool); template void SparsityPattern::reinit(const IndexSet &, const IndexSet &, const dealii::DynamicSparsityPattern &, - const MPI_Comm &, + const MPI_Comm, bool); # endif diff --git a/source/lac/trilinos_tpetra_communication_pattern.cc b/source/lac/trilinos_tpetra_communication_pattern.cc index 303ab93521..567df4a76e 100644 --- a/source/lac/trilinos_tpetra_communication_pattern.cc +++ b/source/lac/trilinos_tpetra_communication_pattern.cc @@ -32,7 +32,7 @@ namespace LinearAlgebra CommunicationPattern::CommunicationPattern( const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator) + const MPI_Comm communicator) { // virtual functions called in constructors and destructors never use the // override in a derived class @@ -47,7 +47,7 @@ namespace LinearAlgebra void CommunicationPattern::reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, - const MPI_Comm &communicator) + const MPI_Comm communicator) { comm = std::make_shared(communicator); diff --git a/source/lac/trilinos_vector.cc b/source/lac/trilinos_vector.cc index 6ad4335026..dfa44187b3 100644 --- a/source/lac/trilinos_vector.cc +++ b/source/lac/trilinos_vector.cc @@ -80,7 +80,7 @@ namespace TrilinosWrappers Vector::Vector(const IndexSet ¶llel_partitioning, - const MPI_Comm &communicator) + const MPI_Comm communicator) : Vector() { reinit(parallel_partitioning, communicator); @@ -110,7 +110,7 @@ namespace TrilinosWrappers Vector::Vector(const IndexSet ¶llel_partitioner, const Vector & v, - const MPI_Comm &communicator) + const MPI_Comm communicator) : Vector() { AssertThrow(parallel_partitioner.size() == @@ -129,7 +129,7 @@ namespace TrilinosWrappers Vector::Vector(const IndexSet &local, const IndexSet &ghost, - const MPI_Comm &communicator) + const MPI_Comm communicator) : Vector() { reinit(local, ghost, communicator, false); @@ -153,7 +153,7 @@ namespace TrilinosWrappers void Vector::reinit(const IndexSet ¶llel_partitioner, - const MPI_Comm &communicator, + const MPI_Comm communicator, const bool /*omit_zeroing_entries*/) { nonlocal_vector.reset(); @@ -353,7 +353,7 @@ namespace TrilinosWrappers void Vector::reinit(const IndexSet &locally_owned_entries, const IndexSet &ghost_entries, - const MPI_Comm &communicator, + const MPI_Comm communicator, const bool vector_writable) { nonlocal_vector.reset(); diff --git a/source/matrix_free/dof_info.cc b/source/matrix_free/dof_info.cc index c455ad48d1..f45e4e8513 100644 --- a/source/matrix_free/dof_info.cc +++ b/source/matrix_free/dof_info.cc @@ -153,7 +153,7 @@ namespace internal void DoFInfo::assign_ghosts(const std::vector &boundary_cells, - const MPI_Comm & communicator_sm, + const MPI_Comm communicator_sm, const bool use_vector_data_exchanger_full) { Assert(boundary_cells.size() < row_starts.size(), ExcInternalError()); @@ -892,7 +892,7 @@ namespace internal const std::vector> &inner_faces, const std::vector> &ghosted_faces, const bool fill_cell_centric, - const MPI_Comm & communicator_sm, + const MPI_Comm communicator_sm, const bool use_vector_data_exchanger_full) { const Utilities::MPI::Partitioner &part = *vector_partitioner; diff --git a/source/matrix_free/vector_data_exchange.cc b/source/matrix_free/vector_data_exchange.cc index 4484dff5b3..a9c7d8bb6d 100644 --- a/source/matrix_free/vector_data_exchange.cc +++ b/source/matrix_free/vector_data_exchange.cc @@ -371,7 +371,7 @@ namespace internal Full::Full( const std::shared_ptr &partitioner, - const MPI_Comm &communicator_sm) + const MPI_Comm communicator_sm) : comm(partitioner->get_mpi_communicator()) , comm_sm(communicator_sm) , n_local_elements(partitioner->locally_owned_range().n_elements()) diff --git a/source/multigrid/mg_level_global_transfer.cc b/source/multigrid/mg_level_global_transfer.cc index 6592169300..49d2530d03 100644 --- a/source/multigrid/mg_level_global_transfer.cc +++ b/source/multigrid/mg_level_global_transfer.cc @@ -165,7 +165,7 @@ namespace const MGConstrainedDoFs, MGLevelGlobalTransfer>> mg_constrained_dofs, - const MPI_Comm & mpi_communicator, + const MPI_Comm mpi_communicator, const bool transfer_solution_vectors, std::vector> & copy_indices, std::vector> & copy_indices_global_mine, diff --git a/source/multigrid/mg_transfer_internal.cc b/source/multigrid/mg_transfer_internal.cc index 92180ed276..9105ddd59b 100644 --- a/source/multigrid/mg_transfer_internal.cc +++ b/source/multigrid/mg_transfer_internal.cc @@ -400,7 +400,7 @@ namespace internal std::vector &ghosted_level_dofs, const std::shared_ptr & external_partitioner, - const MPI_Comm & communicator, + const MPI_Comm communicator, std::shared_ptr &target_partitioner, Table<2, unsigned int> ©_indices_global_mine) { diff --git a/source/sundials/arkode.cc b/source/sundials/arkode.cc index 9efcc895d6..8554e7d3ff 100644 --- a/source/sundials/arkode.cc +++ b/source/sundials/arkode.cc @@ -258,7 +258,7 @@ namespace SUNDIALS template ARKode::ARKode(const AdditionalData &data, - const MPI_Comm & mpi_comm) + const MPI_Comm mpi_comm) : data(data) , arkode_mem(nullptr) # if DEAL_II_SUNDIALS_VERSION_GTE(6, 0, 0) diff --git a/source/sundials/ida.cc b/source/sundials/ida.cc index 09307e172a..39d36ca214 100644 --- a/source/sundials/ida.cc +++ b/source/sundials/ida.cc @@ -128,7 +128,7 @@ namespace SUNDIALS template - IDA::IDA(const AdditionalData &data, const MPI_Comm &mpi_comm) + IDA::IDA(const AdditionalData &data, const MPI_Comm mpi_comm) : data(data) , ida_mem(nullptr) # if DEAL_II_SUNDIALS_VERSION_GTE(6, 0, 0) diff --git a/source/sundials/kinsol.cc b/source/sundials/kinsol.cc index fc1d9adbd5..574bd2a14d 100644 --- a/source/sundials/kinsol.cc +++ b/source/sundials/kinsol.cc @@ -303,7 +303,7 @@ namespace SUNDIALS template KINSOL::KINSOL(const AdditionalData &data, - const MPI_Comm & mpi_comm) + const MPI_Comm mpi_comm) : data(data) , mpi_communicator(mpi_comm) , kinsol_mem(nullptr) diff --git a/tests/arpack/parpack_advection_diffusion_petsc.cc b/tests/arpack/parpack_advection_diffusion_petsc.cc index 2d902f9cb4..792e06aa60 100644 --- a/tests/arpack/parpack_advection_diffusion_petsc.cc +++ b/tests/arpack/parpack_advection_diffusion_petsc.cc @@ -113,7 +113,7 @@ class PETScInverse public: PETScInverse(const dealii::PETScWrappers::MatrixBase &A, dealii::SolverControl & cn, - const MPI_Comm &mpi_communicator = PETSC_COMM_SELF) + const MPI_Comm mpi_communicator = PETSC_COMM_SELF) : solver(cn) , matrix(A) , preconditioner(matrix) diff --git a/tests/arpack/step-36_parpack.cc b/tests/arpack/step-36_parpack.cc index f079f2ad2a..002d5c6fe2 100644 --- a/tests/arpack/step-36_parpack.cc +++ b/tests/arpack/step-36_parpack.cc @@ -116,7 +116,7 @@ class PETScInverse public: PETScInverse(const dealii::PETScWrappers::MatrixBase &A, dealii::SolverControl & cn, - const MPI_Comm &mpi_communicator = PETSC_COMM_SELF) + const MPI_Comm mpi_communicator = PETSC_COMM_SELF) : solver(cn) , matrix(A) , preconditioner(matrix) diff --git a/tests/base/consensus_algorithm_01.cc b/tests/base/consensus_algorithm_01.cc index 347e62bf80..bac7d0f2e2 100644 --- a/tests/base/consensus_algorithm_01.cc +++ b/tests/base/consensus_algorithm_01.cc @@ -22,7 +22,7 @@ void -test(const MPI_Comm &comm) +test(const MPI_Comm comm) { const unsigned int my_rank = dealii::Utilities::MPI::this_mpi_process(comm); const unsigned int n_rank = dealii::Utilities::MPI::n_mpi_processes(comm); diff --git a/tests/base/mpi_noncontiguous_partitioner_02.cc b/tests/base/mpi_noncontiguous_partitioner_02.cc index cb95c55932..8a702e23e9 100644 --- a/tests/base/mpi_noncontiguous_partitioner_02.cc +++ b/tests/base/mpi_noncontiguous_partitioner_02.cc @@ -33,7 +33,7 @@ template void -test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir) +test(const MPI_Comm comm, const bool do_revert, const unsigned int dir) { const unsigned int degree = 2; const unsigned int n_refinements = 2; @@ -143,7 +143,7 @@ test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir) template void -test_dim(const MPI_Comm &comm, const bool do_revert) +test_dim(const MPI_Comm comm, const bool do_revert) { for (int dir = 0; dir < dim; ++dir) test(comm, do_revert, dir); diff --git a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc index 7e360b1775..214f24c738 100644 --- a/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc +++ b/tests/distributed_grids/grid_tools_exchange_bounding_boxes_1.cc @@ -31,9 +31,9 @@ test_exchange_bbox() // For process i the number of boxes n_bboxes[i%7] is created std::vector n_bboxes = {2, 4, 3, 5, 1, 3, 8}; - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; - unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator); - unsigned int proc = Utilities::MPI::this_mpi_process(mpi_communicator); + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; + unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator); + unsigned int proc = Utilities::MPI::this_mpi_process(mpi_communicator); deallog << "Test for: dimension " << spacedim << std::endl; deallog << n_procs << " mpi processes" << std::endl; diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc index c2b52f9a4e..b726011a28 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_01.cc @@ -34,7 +34,7 @@ template void test() { - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << "dim = " << dim << std::endl; parallel::distributed::Triangulation tria(mpi_communicator); diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc index 5491198bcc..07f099cd97 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_02.cc @@ -38,7 +38,7 @@ template void test() { - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << "dim = " << dim << std::endl; parallel::distributed::Triangulation tria(mpi_communicator); diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc index 41bdd4956e..e289bebb37 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_03.cc @@ -35,7 +35,7 @@ template void test() { - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << "dim = " << dim << std::endl; parallel::shared::Triangulation tria( diff --git a/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc b/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc index 089ef59b10..ce6da09425 100644 --- a/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc +++ b/tests/distributed_grids/grid_tools_exchange_cell_data_04.cc @@ -38,7 +38,7 @@ template void test() { - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << "dim = " << dim << std::endl; parallel::distributed::Triangulation tria(mpi_communicator); diff --git a/tests/fullydistributed_grids/repartitioning_05.cc b/tests/fullydistributed_grids/repartitioning_05.cc index 896a5aa0bf..e9cdff1d0b 100644 --- a/tests/fullydistributed_grids/repartitioning_05.cc +++ b/tests/fullydistributed_grids/repartitioning_05.cc @@ -39,7 +39,7 @@ using namespace dealii; MPI_Comm -create_sub_comm(const MPI_Comm &comm, const unsigned int size) +create_sub_comm(const MPI_Comm comm, const unsigned int size) { const auto rank = Utilities::MPI::this_mpi_process(comm); @@ -64,7 +64,7 @@ create_sub_comm(const MPI_Comm &comm, const unsigned int size) template LinearAlgebra::distributed::Vector partition_distributed_triangulation(const Triangulation &tria_in, - const MPI_Comm & comm) + const MPI_Comm comm) { const auto comm_tria = tria_in.get_communicator(); diff --git a/tests/fullydistributed_grids/repartitioning_08.cc b/tests/fullydistributed_grids/repartitioning_08.cc index 5905486ef5..f115b11ffe 100644 --- a/tests/fullydistributed_grids/repartitioning_08.cc +++ b/tests/fullydistributed_grids/repartitioning_08.cc @@ -44,7 +44,7 @@ template class MyPolicy : public RepartitioningPolicyTools::Base { public: - MyPolicy(const MPI_Comm &comm, const unsigned int direction) + MyPolicy(const MPI_Comm comm, const unsigned int direction) : comm(comm) , direction(direction) {} @@ -74,7 +74,7 @@ public: } private: - const MPI_Comm & comm; + const MPI_Comm comm; const unsigned int direction; }; diff --git a/tests/gla/gla.h b/tests/gla/gla.h index 07e989acd2..f2ab07ee70 100644 --- a/tests/gla/gla.h +++ b/tests/gla/gla.h @@ -53,18 +53,18 @@ public: Vector() {} - Vector(const IndexSet local, const MPI_Comm &comm) + Vector(const IndexSet local, const MPI_Comm comm) {} - Vector(const IndexSet &local, const IndexSet &ghost, const MPI_Comm &comm) + Vector(const IndexSet &local, const IndexSet &ghost, const MPI_Comm comm) {} void - reinit(const IndexSet local, const MPI_Comm &comm) + reinit(const IndexSet local, const MPI_Comm comm) {} void - reinit(const IndexSet local, const IndexSet &ghost, const MPI_Comm &comm) + reinit(const IndexSet local, const IndexSet &ghost, const MPI_Comm comm) {} void @@ -124,8 +124,8 @@ public: template SparseMatrix(const IndexSet &local, const IndexSet &, - SP & sp, - const MPI_Comm &comm = MPI_COMM_WORLD) + SP & sp, + const MPI_Comm comm = MPI_COMM_WORLD) {} void diff --git a/tests/grid/grid_tools_cache_06.cc b/tests/grid/grid_tools_cache_06.cc index 73772689d9..b0c9a407ef 100644 --- a/tests/grid/grid_tools_cache_06.cc +++ b/tests/grid/grid_tools_cache_06.cc @@ -40,7 +40,7 @@ template void test(unsigned int ref) { - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; parallel::distributed::Triangulation tria(mpi_communicator); GridGenerator::hyper_ball(tria); diff --git a/tests/grid/grid_tools_cache_07.cc b/tests/grid/grid_tools_cache_07.cc index b8544974c8..01ab1187c1 100644 --- a/tests/grid/grid_tools_cache_07.cc +++ b/tests/grid/grid_tools_cache_07.cc @@ -40,7 +40,7 @@ template void test(unsigned int ref) { - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; parallel::distributed::Triangulation tria(mpi_communicator); GridGenerator::hyper_ball(tria); diff --git a/tests/grid/grid_tools_compute_mesh_predicate_bounding_box_1.cc b/tests/grid/grid_tools_compute_mesh_predicate_bounding_box_1.cc index 74d965e1b3..d840bb1f0e 100644 --- a/tests/grid/grid_tools_compute_mesh_predicate_bounding_box_1.cc +++ b/tests/grid/grid_tools_compute_mesh_predicate_bounding_box_1.cc @@ -47,7 +47,7 @@ template void test_hypercube(unsigned int ref, unsigned int max_bbox) { - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << "Testing hypercube for spacedim = " << spacedim << " refinement: " << ref << " max number of boxes: " << max_bbox << std::endl; diff --git a/tests/grid/grid_tools_halo_layer_ghost_cells.cc b/tests/grid/grid_tools_halo_layer_ghost_cells.cc index 2415024b28..7a2b61d28b 100644 --- a/tests/grid/grid_tools_halo_layer_ghost_cells.cc +++ b/tests/grid/grid_tools_halo_layer_ghost_cells.cc @@ -30,7 +30,7 @@ template void test() { - const MPI_Comm &mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << "dim = " << dim << std::endl; parallel::distributed::Triangulation tria(mpi_communicator); diff --git a/tests/hp/solution_transfer_14.cc b/tests/hp/solution_transfer_14.cc index b9a7be358d..d7a8b33785 100644 --- a/tests/hp/solution_transfer_14.cc +++ b/tests/hp/solution_transfer_14.cc @@ -44,7 +44,7 @@ template void -transfer(const MPI_Comm &mpi_communicator) +transfer(const MPI_Comm mpi_communicator) { const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); @@ -111,7 +111,7 @@ main(int argc, char *argv[]) { Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll log; - const MPI_Comm & mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << " 1D solution transfer" << std::endl; transfer<1>(mpi_communicator); diff --git a/tests/hp/solution_transfer_15.cc b/tests/hp/solution_transfer_15.cc index 4db0c44be8..d53eb1ed1c 100644 --- a/tests/hp/solution_transfer_15.cc +++ b/tests/hp/solution_transfer_15.cc @@ -89,7 +89,7 @@ initialize_indexsets(IndexSet & locally_owned_dofs, template void -transfer(const MPI_Comm &mpi_communicator) +transfer(const MPI_Comm mpi_communicator) { const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); @@ -175,7 +175,7 @@ main(int argc, char *argv[]) { Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll log; - const MPI_Comm & mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << " 1D solution transfer" << std::endl; transfer<1>(mpi_communicator); diff --git a/tests/mpi/muelu_periodicity.cc b/tests/mpi/muelu_periodicity.cc index 168bd45070..73771b32d0 100644 --- a/tests/mpi/muelu_periodicity.cc +++ b/tests/mpi/muelu_periodicity.cc @@ -187,7 +187,7 @@ namespace Step22 InverseMatrix(const Matrix & m, const Preconditioner &preconditioner, const IndexSet & locally_owned, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); void vmult(TrilinosWrappers::MPI::Vector & dst, @@ -207,7 +207,7 @@ namespace Step22 const Matrix & m, const Preconditioner &preconditioner, const IndexSet & locally_owned, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) : matrix(&m) , preconditioner(&preconditioner) , mpi_communicator(&mpi_communicator) @@ -245,7 +245,7 @@ namespace Step22 Preconditioner> & A_inverse, const IndexSet & owned_pres, const IndexSet & relevant_pres, - const MPI_Comm &mpi_communicator); + const MPI_Comm mpi_communicator); void vmult(TrilinosWrappers::MPI::Vector & dst, @@ -268,7 +268,7 @@ namespace Step22 & A_inverse, const IndexSet &owned_vel, const IndexSet &relevant_vel, - const MPI_Comm &mpi_communicator) + const MPI_Comm mpi_communicator) : system_matrix(&system_matrix) , A_inverse(&A_inverse) , tmp1(owned_vel, mpi_communicator) diff --git a/tests/mpi/periodicity_02.cc b/tests/mpi/periodicity_02.cc index 99b3c0dae2..03a4223e4c 100644 --- a/tests/mpi/periodicity_02.cc +++ b/tests/mpi/periodicity_02.cc @@ -189,7 +189,7 @@ namespace Step22 InverseMatrix(const Matrix & m, const Preconditioner &preconditioner, const IndexSet & locally_owned, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); void vmult(TrilinosWrappers::MPI::Vector & dst, @@ -209,7 +209,7 @@ namespace Step22 const Matrix & m, const Preconditioner &preconditioner, const IndexSet & locally_owned, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) : matrix(&m) , preconditioner(&preconditioner) , mpi_communicator(&mpi_communicator) @@ -247,7 +247,7 @@ namespace Step22 Preconditioner> & A_inverse, const IndexSet & owned_pres, const IndexSet & relevant_pres, - const MPI_Comm &mpi_communicator); + const MPI_Comm mpi_communicator); void vmult(TrilinosWrappers::MPI::Vector & dst, @@ -270,7 +270,7 @@ namespace Step22 & A_inverse, const IndexSet &owned_vel, const IndexSet &relevant_vel, - const MPI_Comm &mpi_communicator) + const MPI_Comm mpi_communicator) : system_matrix(&system_matrix) , A_inverse(&A_inverse) , tmp1(owned_vel, mpi_communicator) diff --git a/tests/mpi/periodicity_03.cc b/tests/mpi/periodicity_03.cc index ba78ca4f42..eb1f55a1fc 100644 --- a/tests/mpi/periodicity_03.cc +++ b/tests/mpi/periodicity_03.cc @@ -112,7 +112,7 @@ namespace Step22 InverseMatrix(const Matrix & m, const Preconditioner &preconditioner, const IndexSet & locally_owned, - const MPI_Comm & mpi_communicator); + const MPI_Comm mpi_communicator); void vmult(TrilinosWrappers::MPI::Vector & dst, @@ -132,7 +132,7 @@ namespace Step22 const Matrix & m, const Preconditioner &preconditioner, const IndexSet & locally_owned, - const MPI_Comm & mpi_communicator) + const MPI_Comm mpi_communicator) : matrix(&m) , preconditioner(&preconditioner) , mpi_communicator(&mpi_communicator) @@ -168,7 +168,7 @@ namespace Step22 const InverseMatrix & A_inverse, const IndexSet & owned_pres, - const MPI_Comm &mpi_communicator); + const MPI_Comm mpi_communicator); void vmult(TrilinosWrappers::MPI::Vector & dst, @@ -190,7 +190,7 @@ namespace Step22 const InverseMatrix & A_inverse, const IndexSet &owned_vel, - const MPI_Comm &mpi_communicator) + const MPI_Comm mpi_communicator) : system_matrix(&system_matrix) , A_inverse(&A_inverse) , tmp1(owned_vel, mpi_communicator) diff --git a/tests/mpi/solution_transfer_02.cc b/tests/mpi/solution_transfer_02.cc index b44c577a49..0976994c78 100644 --- a/tests/mpi/solution_transfer_02.cc +++ b/tests/mpi/solution_transfer_02.cc @@ -44,7 +44,7 @@ template void -transfer(const MPI_Comm &mpi_communicator) +transfer(const MPI_Comm mpi_communicator) { const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); @@ -108,7 +108,7 @@ main(int argc, char *argv[]) { Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll log; - const MPI_Comm & mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << " 1D solution transfer" << std::endl; transfer<1>(mpi_communicator); diff --git a/tests/mpi/solution_transfer_03.cc b/tests/mpi/solution_transfer_03.cc index a1d49632b3..cbf21c59f8 100644 --- a/tests/mpi/solution_transfer_03.cc +++ b/tests/mpi/solution_transfer_03.cc @@ -88,7 +88,7 @@ initialize_indexsets(IndexSet & locally_owned_dofs, template void -transfer(const MPI_Comm &mpi_communicator) +transfer(const MPI_Comm mpi_communicator) { const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); @@ -169,7 +169,7 @@ main(int argc, char *argv[]) { Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll log; - const MPI_Comm & mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; deallog << " 1D solution transfer" << std::endl; transfer<1>(mpi_communicator); diff --git a/tests/mpi/solution_transfer_06.cc b/tests/mpi/solution_transfer_06.cc index 2269b5ec88..f704787c37 100644 --- a/tests/mpi/solution_transfer_06.cc +++ b/tests/mpi/solution_transfer_06.cc @@ -46,7 +46,7 @@ template void -transfer(const MPI_Comm &comm) +transfer(const MPI_Comm comm) { AssertDimension(Utilities::MPI::n_mpi_processes(comm), 1); diff --git a/tests/multigrid-global-coarsening/global_id_01.cc b/tests/multigrid-global-coarsening/global_id_01.cc index f113b400b3..624b042eca 100644 --- a/tests/multigrid-global-coarsening/global_id_01.cc +++ b/tests/multigrid-global-coarsening/global_id_01.cc @@ -37,7 +37,7 @@ template void -test(const MPI_Comm &comm) +test(const MPI_Comm comm) { Triangulation basetria; GridGenerator::subdivided_hyper_cube(basetria, 4); diff --git a/tests/numerics/project_parallel_common.h b/tests/numerics/project_parallel_common.h index 8f7a7a87cc..3f3977ef87 100644 --- a/tests/numerics/project_parallel_common.h +++ b/tests/numerics/project_parallel_common.h @@ -112,9 +112,9 @@ do_project(const parallel::distributed::Triangulation &triangulation, deallog << "n_dofs=" << dof_handler.n_dofs() << std::endl; - const MPI_Comm &mpi_communicator = triangulation.get_communicator(); - const IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs(); - IndexSet locally_relevant_dofs; + const MPI_Comm mpi_communicator = triangulation.get_communicator(); + const IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs(); + IndexSet locally_relevant_dofs; DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); AffineConstraints constraints; diff --git a/tests/petsc/petsc_noncontiguous_partitioner_02.cc b/tests/petsc/petsc_noncontiguous_partitioner_02.cc index ff54429753..4ba2e41cb8 100644 --- a/tests/petsc/petsc_noncontiguous_partitioner_02.cc +++ b/tests/petsc/petsc_noncontiguous_partitioner_02.cc @@ -35,7 +35,7 @@ template void -test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir) +test(const MPI_Comm comm, const bool do_revert, const unsigned int dir) { const unsigned int degree = 2; const unsigned int n_refinements = 2; @@ -144,7 +144,7 @@ test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir) template void -test_dim(const MPI_Comm &comm, const bool do_revert) +test_dim(const MPI_Comm comm, const bool do_revert) { for (int dir = 0; dir < dim; ++dir) test(comm, do_revert, dir); diff --git a/tests/simplex/poisson_01.cc b/tests/simplex/poisson_01.cc index e7bffa733e..5dfa148a8c 100644 --- a/tests/simplex/poisson_01.cc +++ b/tests/simplex/poisson_01.cc @@ -338,7 +338,7 @@ test(const Triangulation &tria, template void -test_tet(const MPI_Comm &comm, const Parameters ¶ms) +test_tet(const MPI_Comm comm, const Parameters ¶ms) { const unsigned int tria_type = 2; @@ -425,7 +425,7 @@ test_tet(const MPI_Comm &comm, const Parameters ¶ms) template void -test_hex(const MPI_Comm &comm, const Parameters ¶ms) +test_hex(const MPI_Comm comm, const Parameters ¶ms) { // 1) Create triangulation... parallel::distributed::Triangulation tria(comm); @@ -467,7 +467,7 @@ test_hex(const MPI_Comm &comm, const Parameters ¶ms) template void -test_wedge(const MPI_Comm &comm, const Parameters ¶ms) +test_wedge(const MPI_Comm comm, const Parameters ¶ms) { const unsigned int tria_type = 2; @@ -558,7 +558,7 @@ test_wedge(const MPI_Comm &comm, const Parameters ¶ms) template void -test_pyramid(const MPI_Comm &comm, const Parameters ¶ms) +test_pyramid(const MPI_Comm comm, const Parameters ¶ms) { const unsigned int tria_type = 2; diff --git a/tests/trilinos/parallel_block_vector_copy_01.cc b/tests/trilinos/parallel_block_vector_copy_01.cc index df6430157e..606169967e 100644 --- a/tests/trilinos/parallel_block_vector_copy_01.cc +++ b/tests/trilinos/parallel_block_vector_copy_01.cc @@ -36,7 +36,7 @@ main(int argc, char **argv) Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll log; - const MPI_Comm & mpi_communicator = MPI_COMM_WORLD; + const MPI_Comm mpi_communicator = MPI_COMM_WORLD; const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); const unsigned int n_mpi_processes = diff --git a/tests/zoltan/tria_zoltan_01.cc b/tests/zoltan/tria_zoltan_01.cc index 4a66cfc91b..601c3ea366 100644 --- a/tests/zoltan/tria_zoltan_01.cc +++ b/tests/zoltan/tria_zoltan_01.cc @@ -29,7 +29,7 @@ template void -test(const MPI_Comm &mpi_communicator) +test(const MPI_Comm mpi_communicator) { parallel::shared::Triangulation triangulation( mpi_communicator, Triangulation::limit_level_difference_at_vertices); -- 2.39.5