]> https://gitweb.dealii.org/ - dealii.git/commitdiff
pass MPI_Comm by const ref 11109/head
authorTimo Heister <timo.heister@gmail.com>
Mon, 26 Oct 2020 13:27:13 +0000 (09:27 -0400)
committerTimo Heister <timo.heister@gmail.com>
Fri, 30 Oct 2020 18:13:26 +0000 (14:13 -0400)
fixes #11107

47 files changed:
include/deal.II/base/data_out_base.h
include/deal.II/base/hdf5.h
include/deal.II/base/mpi.h
include/deal.II/base/partitioner.h
include/deal.II/base/process_grid.h
include/deal.II/base/timer.h
include/deal.II/distributed/fully_distributed_tria.h
include/deal.II/distributed/grid_refinement.h
include/deal.II/distributed/shared_tria.h
include/deal.II/distributed/tria.h
include/deal.II/distributed/tria_base.h
include/deal.II/dofs/number_cache.h
include/deal.II/grid/grid_tools.h
include/deal.II/grid/tria_description.h
include/deal.II/lac/affine_constraints.h
include/deal.II/lac/affine_constraints.templates.h
include/deal.II/lac/la_parallel_block_vector.h
include/deal.II/lac/la_parallel_block_vector.templates.h
include/deal.II/lac/la_parallel_vector.h
include/deal.II/lac/la_parallel_vector.templates.h
include/deal.II/lac/petsc_precondition.h
include/deal.II/sundials/arkode.h
include/deal.II/sundials/ida.h
include/deal.II/sundials/kinsol.h
source/base/data_out_base.cc
source/base/data_out_base.inst.in
source/base/hdf5.cc
source/base/mpi.cc
source/base/partitioner.cc
source/base/process_grid.cc
source/base/timer.cc
source/distributed/fully_distributed_tria.cc
source/distributed/grid_refinement.cc
source/distributed/grid_refinement.inst.in
source/distributed/shared_tria.cc
source/distributed/tria.cc
source/distributed/tria_base.cc
source/dofs/number_cache.cc
source/grid/grid_tools.cc
source/grid/grid_tools.inst.in
source/grid/tria_description.cc
source/grid/tria_description.inst.in
source/lac/petsc_precondition.cc
source/multigrid/mg_level_global_transfer.cc
source/sundials/arkode.cc
source/sundials/ida.cc
source/sundials/kinsol.cc

index 2549a5df390d21eb31fc42e6d8d1adade4c50a1d..bbb6e8827a778ee4f5fda460f9d21cb2c9875f09 100644 (file)
@@ -2311,7 +2311,7 @@ namespace DataOutBase
   write_hdf5_parallel(const std::vector<Patch<dim, spacedim>> &patches,
                       const DataOutFilter &                    data_filter,
                       const std::string &                      filename,
-                      MPI_Comm                                 comm);
+                      const MPI_Comm &                         comm);
 
   /**
    * Write the data in @p data_filter to HDF5 file(s). If @p write_mesh_file is
@@ -2327,7 +2327,7 @@ namespace DataOutBase
                       const bool                               write_mesh_file,
                       const std::string &                      mesh_filename,
                       const std::string &solution_filename,
-                      MPI_Comm           comm);
+                      const MPI_Comm &   comm);
 
   /**
    * DataOutFilter is an intermediate data format that reduces the amount of
@@ -2658,7 +2658,8 @@ public:
    * DataOutInterface::write_vtu().
    */
   void
-  write_vtu_in_parallel(const std::string &filename, MPI_Comm comm) const;
+  write_vtu_in_parallel(const std::string &filename,
+                        const MPI_Comm &   comm) const;
 
   /**
    * Some visualization programs, such as ParaView, can read several separate
@@ -2793,7 +2794,7 @@ public:
   create_xdmf_entry(const DataOutBase::DataOutFilter &data_filter,
                     const std::string &               h5_filename,
                     const double                      cur_time,
-                    MPI_Comm                          comm) const;
+                    const MPI_Comm &                  comm) const;
 
   /**
    * Create an XDMFEntry based on the data in the data_filter. This assumes
@@ -2805,7 +2806,7 @@ public:
                     const std::string &               h5_mesh_filename,
                     const std::string &               h5_solution_filename,
                     const double                      cur_time,
-                    MPI_Comm                          comm) const;
+                    const MPI_Comm &                  comm) const;
 
   /**
    * Write an XDMF file based on the provided vector of XDMFEntry objects.
@@ -2834,7 +2835,7 @@ public:
   void
   write_xdmf_file(const std::vector<XDMFEntry> &entries,
                   const std::string &           filename,
-                  MPI_Comm                      comm) const;
+                  const MPI_Comm &              comm) const;
 
   /**
    * Write the data in @p data_filter to a single HDF5 file containing both the
@@ -2853,7 +2854,7 @@ public:
   void
   write_hdf5_parallel(const DataOutBase::DataOutFilter &data_filter,
                       const std::string &               filename,
-                      MPI_Comm                          comm) const;
+                      const MPI_Comm &                  comm) const;
 
   /**
    * Write the data in data_filter to HDF5 file(s). If write_mesh_file is
@@ -2867,7 +2868,7 @@ public:
                       const bool                        write_mesh_file,
                       const std::string &               mesh_filename,
                       const std::string &               solution_filename,
-                      MPI_Comm                          comm) const;
+                      const MPI_Comm &                  comm) const;
 
   /**
    * DataOutFilter is an intermediate data format that reduces the amount of
index fe6a76bb656949d19b164fe4e0038eb044b1176a..c24dc47780e0c8ba847c095458c4280042a58506 100644 (file)
@@ -73,7 +73,7 @@ DEAL_II_NAMESPACE_OPEN
  * MPI support (several processes access the same HDF5 file).
  * File::File(const std::string &, const FileAccessMode)
  * opens/creates an HDF5 file for serial operations.
- * File::File(const std::string &, const FileAccessMode, const MPI_Comm)
+ * File::File(const std::string &, const FileAccessMode, const MPI_Comm &)
  * creates or opens an HDF5 file in parallel using MPI. The HDF5 calls that
  * modify the structure of the file are always collective, whereas writing
  * and reading raw data in a dataset can be done independently or collectively.
@@ -1090,12 +1090,12 @@ namespace HDF5
      */
     File(const std::string &  name,
          const FileAccessMode mode,
-         const MPI_Comm       mpi_communicator);
+         const MPI_Comm &     mpi_communicator);
 
   private:
     /**
      * Delegation internal constructor.
-     * File(const std::string &, const MPI_Comm, const Mode);
+     * File(const std::string &, const MPI_Comm &, const Mode);
      * and
      * File(const std::string &, const Mode)
      * should be used to open or create HDF5 files.
@@ -1103,7 +1103,7 @@ namespace HDF5
     File(const std::string &  name,
          const FileAccessMode mode,
          const bool           mpi,
-         const MPI_Comm       mpi_communicator);
+         const MPI_Comm &     mpi_communicator);
   };
 
   namespace internal
index 19ae43725f47e871b95c7d0d230451983d5af57c..e77e8258e00cde6c4daed715e4d29ce0d1c487df 100644 (file)
@@ -376,7 +376,7 @@ namespace Utilities
        * in the communicator.
        */
       void
-      lock(MPI_Comm comm);
+      lock(const MPI_Comm &comm);
 
       /**
        * Release the lock.
@@ -385,7 +385,7 @@ namespace Utilities
        * in the communicator.
        */
       void
-      unlock(MPI_Comm comm);
+      unlock(const MPI_Comm &comm);
 
     private:
       /**
index b28f635ff83757249f6a5e26dfa736a154c72efd..33717041e4420c3e3287d67cd86afe60fa340aa3 100644 (file)
@@ -220,7 +220,7 @@ namespace Utilities
        */
       Partitioner(const types::global_dof_index local_size,
                   const types::global_dof_index ghost_size,
-                  const MPI_Comm                communicator);
+                  const MPI_Comm &              communicator);
 
       /**
        * Constructor with index set arguments. This constructor creates a
@@ -231,7 +231,7 @@ namespace Utilities
        */
       Partitioner(const IndexSet &locally_owned_indices,
                   const IndexSet &ghost_indices_in,
-                  const MPI_Comm  communicator_in);
+                  const MPI_Comm &communicator_in);
 
       /**
        * Constructor with one index set argument. This constructor creates a
@@ -241,7 +241,7 @@ namespace Utilities
        * constructor with two index sets.
        */
       Partitioner(const IndexSet &locally_owned_indices,
-                  const MPI_Comm  communicator_in);
+                  const MPI_Comm &communicator_in);
 
       /**
        * Reinitialize the communication pattern. The first argument
index 06faf24d2cdab1f418f2a7505de8d674185b250a..b6d0cbf2c544a3302604421177fde5ce9561f4c7 100644 (file)
@@ -71,7 +71,7 @@ namespace Utilities
        * number of cores
        * in the @p mpi_communicator.
        */
-      ProcessGrid(MPI_Comm           mpi_communicator,
+      ProcessGrid(const MPI_Comm &   mpi_communicator,
                   const unsigned int n_rows,
                   const unsigned int n_columns);
 
@@ -92,7 +92,7 @@ namespace Utilities
        * and the @p mpi_communicator with 11 cores will result in the $3x3$
        * process grid.
        */
-      ProcessGrid(MPI_Comm           mpi_communicator,
+      ProcessGrid(const MPI_Comm &   mpi_communicator,
                   const unsigned int n_rows_matrix,
                   const unsigned int n_columns_matrix,
                   const unsigned int row_block_size,
@@ -151,7 +151,7 @@ namespace Utilities
        * A private constructor which takes grid dimensions as an
        * <code>std::pair</code>.
        */
-      ProcessGrid(MPI_Comm                                     mpi_communicator,
+      ProcessGrid(const MPI_Comm &                             mpi_communicator,
                   const std::pair<unsigned int, unsigned int> &grid_dimensions);
 
       /**
index 2bc08dd750ef96f95f05eed2fe1ecfdd1d7889fa..a59712b22059fb421a1ce61d75158594bcd40c7c 100644 (file)
@@ -137,7 +137,7 @@ public:
    * communicator occurs; the extra cost of the synchronization is not
    * measured.
    */
-  Timer(MPI_Comm mpi_communicator, const bool sync_lap_times = false);
+  Timer(const MPI_Comm &mpi_communicator, const bool sync_lap_times = false);
 
   /**
    * Return a reference to the data structure containing basic statistics on
@@ -706,7 +706,7 @@ public:
    * <code>MPI_Barrier</code> call before starting and stopping the timer for
    * each section.
    */
-  TimerOutput(MPI_Comm              mpi_comm,
+  TimerOutput(const MPI_Comm &      mpi_comm,
               std::ostream &        stream,
               const OutputFrequency output_frequency,
               const OutputType      output_type);
@@ -734,7 +734,7 @@ public:
    * <code>MPI_Barrier</code> call before starting and stopping the timer for
    * each section.)
    */
-  TimerOutput(MPI_Comm              mpi_comm,
+  TimerOutput(const MPI_Comm &      mpi_comm,
               ConditionalOStream &  stream,
               const OutputFrequency output_frequency,
               const OutputType      output_type);
@@ -806,8 +806,8 @@ public:
    * median is given).
    */
   void
-  print_wall_time_statistics(const MPI_Comm mpi_comm,
-                             const double   print_quantile = 0.) const;
+  print_wall_time_statistics(const MPI_Comm &mpi_comm,
+                             const double    print_quantile = 0.) const;
 
   /**
    * By calling this function, all output can be disabled. This function
index f84adb47548e51e4e9670e838876b12f9603d30d..9faeaaa313263d815a1959155e802b1e878215bf 100644 (file)
@@ -129,7 +129,7 @@ namespace parallel
        * @param mpi_communicator The MPI communicator to be used for the
        *                         triangulation.
        */
-      explicit Triangulation(MPI_Comm mpi_communicator);
+      explicit Triangulation(const MPI_Comm &mpi_communicator);
 
       /**
        * Destructor.
index 4ff1e2c3480d0402033cc9225a2047d7d43bbb59..96e73eaff9e7242fa323889adfbd60fb9e436a5e 100644 (file)
@@ -47,7 +47,7 @@ namespace internal
         std::pair<number, number>
         compute_global_min_and_max_at_root(
           const dealii::Vector<number> &criteria,
-          MPI_Comm                      mpi_communicator);
+          const MPI_Comm &              mpi_communicator);
 
         namespace RefineAndCoarsenFixedNumber
         {
@@ -60,7 +60,7 @@ namespace internal
           compute_threshold(const dealii::Vector<number> &   criteria,
                             const std::pair<double, double> &global_min_and_max,
                             const types::global_cell_index   n_target_cells,
-                            MPI_Comm                         mpi_communicator);
+                            const MPI_Comm &                 mpi_communicator);
         } // namespace RefineAndCoarsenFixedNumber
 
         namespace RefineAndCoarsenFixedFraction
@@ -76,7 +76,7 @@ namespace internal
           compute_threshold(const dealii::Vector<number> &   criteria,
                             const std::pair<double, double> &global_min_and_max,
                             const double                     target_error,
-                            MPI_Comm                         mpi_communicator);
+                            const MPI_Comm &                 mpi_communicator);
         } // namespace RefineAndCoarsenFixedFraction
       }   // namespace GridRefinement
     }     // namespace distributed
index 25439bb4a425c62c99fef3b56055450af6121bce..8cb2bedefd9034c1c7a655a94908dd88365d86b0 100644 (file)
@@ -235,7 +235,7 @@ namespace parallel
        * Otherwise all non-locally owned cells are considered ghost.
        */
       Triangulation(
-        MPI_Comm mpi_communicator,
+        const MPI_Comm &mpi_communicator,
         const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing =
           (dealii::Triangulation<dim, spacedim>::none),
         const bool     allow_artificial_cells = false,
index 18e42e92abb19ca8164efddc48c553bb004259fb..ae440e367709ad9f72e9c05be32f022b238f6def 100644 (file)
@@ -353,7 +353,7 @@ namespace parallel
        * triangulation is partitioned.
        */
       explicit Triangulation(
-        MPI_Comm mpi_communicator,
+        const MPI_Comm &mpi_communicator,
         const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
                        smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
         const Settings settings    = default_setting);
@@ -967,7 +967,7 @@ namespace parallel
       class DataTransfer
       {
       public:
-        DataTransfer(MPI_Comm mpi_communicator);
+        DataTransfer(const MPI_Comm &mpi_communicator);
 
         /**
          * Prepare data transfer by calling the pack callback functions on each
@@ -1247,7 +1247,7 @@ namespace parallel
        * the triangulation.
        */
       Triangulation(
-        MPI_Comm mpi_communicator,
+        const MPI_Comm &mpi_communicator,
         const typename dealii::Triangulation<1, spacedim>::MeshSmoothing
                        smooth_grid = (dealii::Triangulation<1, spacedim>::none),
         const Settings settings    = default_setting);
index a950bbee1d53cb53163896334933d12e434589ff..5c818533dd6af8c69e9e7eb1e5c9c9650d58be29 100644 (file)
@@ -82,7 +82,7 @@ namespace parallel
      * Constructor.
      */
     TriangulationBase(
-      MPI_Comm mpi_communicator,
+      const MPI_Comm &mpi_communicator,
       const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
                  smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
       const bool check_for_distorted_cells = false);
@@ -357,7 +357,7 @@ namespace parallel
      * Constructor.
      */
     DistributedTriangulationBase(
-      MPI_Comm mpi_communicator,
+      const MPI_Comm &mpi_communicator,
       const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
                  smooth_grid = (dealii::Triangulation<dim, spacedim>::none),
       const bool check_for_distorted_cells = false);
index d970baa15700249b570a6187d9a25a9aa942bebd..59f1839d8fb9134bbb561242ac7c51fa59edcc4a 100644 (file)
@@ -117,7 +117,7 @@ namespace internal
        */
       std::vector<types::global_dof_index>
       get_n_locally_owned_dofs_per_processor(
-        const MPI_Comm mpi_communicator) const;
+        const MPI_Comm &mpi_communicator) const;
 
       /**
        * Return a representation of @p locally_owned_dofs_per_processor both
@@ -128,7 +128,7 @@ namespace internal
        */
       std::vector<IndexSet>
       get_locally_owned_dofs_per_processor(
-        const MPI_Comm mpi_communicator) const;
+        const MPI_Comm &mpi_communicator) const;
 
       /**
        * Total number of dofs, accumulated over all processors that may
index 2252955399e140877e1487aaa91f8f5deb2daa33..893827015a3274280d18056b1b4af82caa352c97 100644 (file)
@@ -2974,7 +2974,7 @@ namespace GridTools
   std::vector<std::vector<BoundingBox<spacedim>>>
   exchange_local_bounding_boxes(
     const std::vector<BoundingBox<spacedim>> &local_bboxes,
-    MPI_Comm                                  mpi_communicator);
+    const MPI_Comm &                          mpi_communicator);
 
   /**
    * In this collective operation each process provides a vector
@@ -3012,7 +3012,7 @@ namespace GridTools
   RTree<std::pair<BoundingBox<spacedim>, unsigned int>>
   build_global_description_tree(
     const std::vector<BoundingBox<spacedim>> &local_description,
-    MPI_Comm                                  mpi_communicator);
+    const MPI_Comm &                          mpi_communicator);
 
   /**
    * Collect for a given triangulation all locally relevant vertices that
index 63bc1b30f7c0e028e97587b8eeaade88fb330c14..0c8918952234b59a377794abf7954a81ea926a6d 100644 (file)
@@ -435,7 +435,7 @@ namespace TriangulationDescription
     Description<dim, spacedim>
     create_description_from_triangulation(
       const dealii::Triangulation<dim, spacedim> &tria,
-      const MPI_Comm                              comm,
+      const MPI_Comm &                            comm,
       const TriangulationDescription::Settings    settings =
         TriangulationDescription::Settings::default_setting,
       const unsigned int my_rank_in = numbers::invalid_unsigned_int);
@@ -475,9 +475,9 @@ namespace TriangulationDescription
       const std::function<void(dealii::Triangulation<dim, spacedim> &)>
         &                                            serial_grid_generator,
       const std::function<void(dealii::Triangulation<dim, spacedim> &,
-                               const MPI_Comm,
+                               const MPI_Comm &,
                                const unsigned int)> &serial_grid_partitioner,
-      const MPI_Comm                                 comm,
+      const MPI_Comm &                               comm,
       const int                                      group_size = 1,
       const typename Triangulation<dim, spacedim>::MeshSmoothing smoothing =
         dealii::Triangulation<dim, spacedim>::none,
index 11c04524484d446cfcdd0370fe6bf15e98b252d9..28b3d427f822a54cad7c5d93ca1bf829d570aee8 100644 (file)
@@ -1650,7 +1650,7 @@ public:
   bool
   is_consistent_in_parallel(const std::vector<IndexSet> &locally_owned_dofs,
                             const IndexSet &             locally_active_dofs,
-                            const MPI_Comm               mpi_communicator,
+                            const MPI_Comm &             mpi_communicator,
                             const bool                   verbose = false) const;
 
   /**
index a9f0bfcc1ea389399076557e8f985392c22c68e6..8dc3938a11ae9a293dc4aadb7f28061fdae1a4e8 100644 (file)
@@ -119,7 +119,7 @@ bool
 AffineConstraints<number>::is_consistent_in_parallel(
   const std::vector<IndexSet> &locally_owned_dofs,
   const IndexSet &             locally_active_dofs,
-  const MPI_Comm               mpi_communicator,
+  const MPI_Comm &             mpi_communicator,
   const bool                   verbose) const
 {
   // Helper to return a ConstraintLine object that belongs to row @p row.
index 6c826897a1507a14d4fda73a0e62089d9d99c974..419614e3951cbac0e4b38d64439fb4d4d83241c6 100644 (file)
@@ -173,13 +173,13 @@ namespace LinearAlgebra
        */
       BlockVector(const std::vector<IndexSet> &local_ranges,
                   const std::vector<IndexSet> &ghost_indices,
-                  const MPI_Comm               communicator);
+                  const MPI_Comm &             communicator);
 
       /**
        * Same as above but the ghost indices are assumed to be empty.
        */
       BlockVector(const std::vector<IndexSet> &local_ranges,
-                  const MPI_Comm               communicator);
+                  const MPI_Comm &             communicator);
 
       /**
        * Destructor.
index f2ba4daaa42b3b06005c00c67542d06dfd3f25e1..f302158c17271c638667d51f0712af743e5685ed 100644 (file)
@@ -60,7 +60,7 @@ namespace LinearAlgebra
     template <typename Number>
     BlockVector<Number>::BlockVector(const std::vector<IndexSet> &local_ranges,
                                      const std::vector<IndexSet> &ghost_indices,
-                                     const MPI_Comm               communicator)
+                                     const MPI_Comm &             communicator)
     {
       std::vector<size_type> sizes(local_ranges.size());
       for (unsigned int i = 0; i < local_ranges.size(); ++i)
@@ -76,7 +76,7 @@ namespace LinearAlgebra
 
     template <typename Number>
     BlockVector<Number>::BlockVector(const std::vector<IndexSet> &local_ranges,
-                                     const MPI_Comm               communicator)
+                                     const MPI_Comm &             communicator)
     {
       std::vector<size_type> sizes(local_ranges.size());
       for (unsigned int i = 0; i < local_ranges.size(); ++i)
index 0b97e7e36bc544841b509280c99dd7118df0329d..09d25ee2189f9e96928b85ddc1ec38ea33efd4ed 100644 (file)
@@ -301,12 +301,12 @@ namespace LinearAlgebra
        */
       Vector(const IndexSet &local_range,
              const IndexSet &ghost_indices,
-             const MPI_Comm  communicator);
+             const MPI_Comm &communicator);
 
       /**
        * Same constructor as above but without any ghost indices.
        */
-      Vector(const IndexSet &local_range, const MPI_Comm communicator);
+      Vector(const IndexSet &local_range, const MPI_Comm &communicator);
 
       /**
        * Create the vector based on the parallel partitioning described in @p
@@ -363,13 +363,13 @@ namespace LinearAlgebra
       void
       reinit(const IndexSet &local_range,
              const IndexSet &ghost_indices,
-             const MPI_Comm  communicator);
+             const MPI_Comm &communicator);
 
       /**
        * Same as above, but without ghost entries.
        */
       void
-      reinit(const IndexSet &local_range, const MPI_Comm communicator);
+      reinit(const IndexSet &local_range, const MPI_Comm &communicator);
 
       /**
        * Initialize the vector given to the parallel partitioning described in
index 24a1864fa7daae5abd3caff69088007b87e6ae94..b484c308f8da7343a731c3241656d11050c4417c 100644 (file)
@@ -627,7 +627,7 @@ namespace LinearAlgebra
     Vector<Number, MemorySpaceType>::reinit(
       const IndexSet &locally_owned_indices,
       const IndexSet &ghost_indices,
-      const MPI_Comm  communicator)
+      const MPI_Comm &communicator)
     {
       // set up parallel partitioner with index sets and communicator
       reinit(std::make_shared<Utilities::MPI::Partitioner>(
@@ -640,7 +640,7 @@ namespace LinearAlgebra
     void
     Vector<Number, MemorySpaceType>::reinit(
       const IndexSet &locally_owned_indices,
-      const MPI_Comm  communicator)
+      const MPI_Comm &communicator)
     {
       // set up parallel partitioner with index sets and communicator
       reinit(
@@ -719,7 +719,7 @@ namespace LinearAlgebra
     template <typename Number, typename MemorySpaceType>
     Vector<Number, MemorySpaceType>::Vector(const IndexSet &local_range,
                                             const IndexSet &ghost_indices,
-                                            const MPI_Comm  communicator)
+                                            const MPI_Comm &communicator)
       : allocated_size(0)
       , vector_is_ghosted(false)
       , comm_sm(MPI_COMM_SELF)
@@ -731,7 +731,7 @@ namespace LinearAlgebra
 
     template <typename Number, typename MemorySpaceType>
     Vector<Number, MemorySpaceType>::Vector(const IndexSet &local_range,
-                                            const MPI_Comm  communicator)
+                                            const MPI_Comm &communicator)
       : allocated_size(0)
       , vector_is_ghosted(false)
       , comm_sm(MPI_COMM_SELF)
index 73c44ef5b8b364803a2bdd19223a99278fa91301..7e46a63658eaf6e07b275ef022f802aa166f7ada 100644 (file)
@@ -164,7 +164,7 @@ namespace PETScWrappers
      * Intended to be used with SLEPc objects.
      */
     PreconditionJacobi(
-      const MPI_Comm        communicator,
+      const MPI_Comm &      communicator,
       const AdditionalData &additional_data = AdditionalData());
 
     /**
@@ -246,7 +246,7 @@ namespace PETScWrappers
      * Intended to be used with SLEPc objects.
      */
     PreconditionBlockJacobi(
-      const MPI_Comm        communicator,
+      const MPI_Comm &      communicator,
       const AdditionalData &additional_data = AdditionalData());
 
 
@@ -753,7 +753,7 @@ namespace PETScWrappers
      * Intended to be used with SLEPc objects.
      */
     PreconditionBoomerAMG(
-      const MPI_Comm        communicator,
+      const MPI_Comm &      communicator,
       const AdditionalData &additional_data = AdditionalData());
 
 
index a2e76105b455491beff12d69449ef844676755ed..86435ad9915ea24f3d0f953c7484cfa5f997f879 100644 (file)
@@ -510,7 +510,7 @@ namespace SUNDIALS
      * @param mpi_comm MPI communicator
      */
     ARKode(const AdditionalData &data     = AdditionalData(),
-           const MPI_Comm        mpi_comm = MPI_COMM_WORLD);
+           const MPI_Comm &      mpi_comm = MPI_COMM_WORLD);
 
     /**
      * Destructor.
index 0a920e06c4ee6469e3515a4554d02812edae12f2..d6cfca4df98104349453100cb33329957d63190d 100644 (file)
@@ -580,7 +580,7 @@ namespace SUNDIALS
      * @param mpi_comm MPI communicator
      */
     IDA(const AdditionalData &data     = AdditionalData(),
-        const MPI_Comm        mpi_comm = MPI_COMM_WORLD);
+        const MPI_Comm &      mpi_comm = MPI_COMM_WORLD);
 
     /**
      * Destructor.
index 1fa4fb885d626ab606c4b37e6890f1c4a7378dd4..88793da27a9b03fab6e1bc6441672481f445d51e 100644 (file)
@@ -454,7 +454,7 @@ namespace SUNDIALS
      * @param mpi_comm MPI communicator
      */
     KINSOL(const AdditionalData &data     = AdditionalData(),
-           const MPI_Comm        mpi_comm = MPI_COMM_WORLD);
+           const MPI_Comm &      mpi_comm = MPI_COMM_WORLD);
 
     /**
      * Destructor.
index a3979ee16ce0b59c2baa751c031b8018e1235a8d..0d91d13b26fbde9b81635621d407cd3f082ca9f2 100644 (file)
@@ -7120,7 +7120,7 @@ template <int dim, int spacedim>
 void
 DataOutInterface<dim, spacedim>::write_vtu_in_parallel(
   const std::string &filename,
-  MPI_Comm           comm) const
+  const MPI_Comm &   comm) const
 {
 #ifndef DEAL_II_WITH_MPI
   // without MPI fall back to the normal way to write a vtu file:
@@ -7333,7 +7333,7 @@ DataOutInterface<dim, spacedim>::create_xdmf_entry(
   const DataOutBase::DataOutFilter &data_filter,
   const std::string &               h5_filename,
   const double                      cur_time,
-  MPI_Comm                          comm) const
+  const MPI_Comm &                  comm) const
 {
   return create_xdmf_entry(
     data_filter, h5_filename, h5_filename, cur_time, comm);
@@ -7348,7 +7348,7 @@ DataOutInterface<dim, spacedim>::create_xdmf_entry(
   const std::string &               h5_mesh_filename,
   const std::string &               h5_solution_filename,
   const double                      cur_time,
-  MPI_Comm                          comm) const
+  const MPI_Comm &                  comm) const
 {
   unsigned int local_node_cell_count[2], global_node_cell_count[2];
 
@@ -7419,7 +7419,7 @@ void
 DataOutInterface<dim, spacedim>::write_xdmf_file(
   const std::vector<XDMFEntry> &entries,
   const std::string &           filename,
-  MPI_Comm                      comm) const
+  const MPI_Comm &              comm) const
 {
 #ifdef DEAL_II_WITH_MPI
   const int myrank = Utilities::MPI::this_mpi_process(comm);
@@ -7597,7 +7597,7 @@ void
 DataOutInterface<dim, spacedim>::write_hdf5_parallel(
   const DataOutBase::DataOutFilter &data_filter,
   const std::string &               filename,
-  MPI_Comm                          comm) const
+  const MPI_Comm &                  comm) const
 {
   DataOutBase::write_hdf5_parallel(get_patches(), data_filter, filename, comm);
 }
@@ -7611,7 +7611,7 @@ DataOutInterface<dim, spacedim>::write_hdf5_parallel(
   const bool                        write_mesh_file,
   const std::string &               mesh_filename,
   const std::string &               solution_filename,
-  MPI_Comm                          comm) const
+  const MPI_Comm &                  comm) const
 {
   DataOutBase::write_hdf5_parallel(get_patches(),
                                    data_filter,
@@ -7629,7 +7629,7 @@ DataOutBase::write_hdf5_parallel(
   const std::vector<Patch<dim, spacedim>> &patches,
   const DataOutBase::DataOutFilter &       data_filter,
   const std::string &                      filename,
-  MPI_Comm                                 comm)
+  const MPI_Comm &                         comm)
 {
   write_hdf5_parallel(patches, data_filter, true, filename, filename, comm);
 }
@@ -7644,7 +7644,7 @@ DataOutBase::write_hdf5_parallel(
   const bool                               write_mesh_file,
   const std::string &                      mesh_filename,
   const std::string &                      solution_filename,
-  MPI_Comm                                 comm)
+  const MPI_Comm &                         comm)
 {
   AssertThrow(
     spacedim >= 2,
index 99ee9f5c4eac1012bc8f9b79f1b1d26702831ef7..9218d409d1c3c908642f2051228b6ad11078e043 100644 (file)
@@ -209,7 +209,7 @@ for (deal_II_dimension : OUTPUT_DIMENSIONS;
           &                  patches,
         const DataOutFilter &data_filter,
         const std::string &  filename,
-        MPI_Comm             comm);
+        const MPI_Comm &     comm);
 
       template void
       write_filtered_data(
index ae14b4724122b900e8b00bbbb646b57eaa7aa859..2575786a8162c16efed58189750a7198c543fdf1 100644 (file)
@@ -354,7 +354,7 @@ namespace HDF5
 
   File::File(const std::string &  name,
              const FileAccessMode mode,
-             const MPI_Comm       mpi_communicator)
+             const MPI_Comm &     mpi_communicator)
     : File(name, mode, true, mpi_communicator)
   {}
 
@@ -363,7 +363,7 @@ namespace HDF5
   File::File(const std::string &  name,
              const FileAccessMode mode,
              const bool           mpi,
-             const MPI_Comm       mpi_communicator)
+             const MPI_Comm &     mpi_communicator)
     : Group(name, mpi)
   {
     hdf5_reference = std::shared_ptr<hid_t>(new hid_t, [](hid_t *pointer) {
index 269aea2e59ea802e989de8f3e867bade734209ee..68b334752683e85fcc988b273b1bb62d0df441b0 100644 (file)
@@ -1101,7 +1101,7 @@ namespace Utilities
 
 
     void
-    CollectiveMutex::lock(MPI_Comm comm)
+    CollectiveMutex::lock(const MPI_Comm &comm)
     {
       (void)comm;
 
@@ -1135,7 +1135,7 @@ namespace Utilities
 
 
     void
-    CollectiveMutex::unlock(MPI_Comm comm)
+    CollectiveMutex::unlock(const MPI_Comm &comm)
     {
       (void)comm;
 
index aec6633bf7584cc3a63035ce75837af3568d2ff6..b0eee440d6c1fe864331ac190a0f93b181cfc98a 100644 (file)
@@ -60,7 +60,7 @@ namespace Utilities
 
     Partitioner::Partitioner(const types::global_dof_index local_size,
                              const types::global_dof_index ghost_size,
-                             const MPI_Comm                communicator)
+                             const MPI_Comm &              communicator)
       : global_size(Utilities::MPI::sum<types::global_dof_index>(local_size,
                                                                  communicator))
       , locally_owned_range_data(global_size)
@@ -94,7 +94,7 @@ namespace Utilities
 
     Partitioner::Partitioner(const IndexSet &locally_owned_indices,
                              const IndexSet &ghost_indices_in,
-                             const MPI_Comm  communicator_in)
+                             const MPI_Comm &communicator_in)
       : global_size(
           static_cast<types::global_dof_index>(locally_owned_indices.size()))
       , n_ghost_indices_data(0)
@@ -112,7 +112,7 @@ namespace Utilities
 
 
     Partitioner::Partitioner(const IndexSet &locally_owned_indices,
-                             const MPI_Comm  communicator_in)
+                             const MPI_Comm &communicator_in)
       : global_size(
           static_cast<types::global_dof_index>(locally_owned_indices.size()))
       , n_ghost_indices_data(0)
index 3772704ad06d7e536c4ad375709a24c38aa5ef39..bf9502a2a58bc528a4b6da9ceb57ffe802db91a9 100644 (file)
@@ -34,7 +34,7 @@ namespace
    * https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91
    */
   inline std::pair<int, int>
-  compute_processor_grid_sizes(MPI_Comm           mpi_comm,
+  compute_processor_grid_sizes(const MPI_Comm &   mpi_comm,
                                const unsigned int m,
                                const unsigned int n,
                                const unsigned int block_size_m,
@@ -101,7 +101,7 @@ namespace Utilities
   namespace MPI
   {
     ProcessGrid::ProcessGrid(
-      MPI_Comm                                     mpi_comm,
+      const MPI_Comm &                             mpi_comm,
       const std::pair<unsigned int, unsigned int> &grid_dimensions)
       : mpi_communicator(mpi_comm)
       , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
@@ -206,7 +206,7 @@ namespace Utilities
 
 
 
-    ProcessGrid::ProcessGrid(MPI_Comm           mpi_comm,
+    ProcessGrid::ProcessGrid(const MPI_Comm &   mpi_comm,
                              const unsigned int n_rows_matrix,
                              const unsigned int n_columns_matrix,
                              const unsigned int row_block_size,
@@ -221,7 +221,7 @@ namespace Utilities
 
 
 
-    ProcessGrid::ProcessGrid(MPI_Comm           mpi_comm,
+    ProcessGrid::ProcessGrid(const MPI_Comm &   mpi_comm,
                              const unsigned int n_rows,
                              const unsigned int n_columns)
       : ProcessGrid(mpi_comm, std::make_pair(n_rows, n_columns))
index 8f1a875945878be19ba986148bc8ae43c695f7da..2f7e84ce8677bebc444ce575e9473956ee3e0028 100644 (file)
@@ -162,7 +162,7 @@ Timer::Timer()
 
 
 
-Timer::Timer(MPI_Comm mpi_communicator, const bool sync_lap_times_)
+Timer::Timer(const MPI_Comm &mpi_communicator, const bool sync_lap_times_)
   : running(false)
   , mpi_communicator(mpi_communicator)
   , sync_lap_times(sync_lap_times_)
@@ -322,7 +322,7 @@ TimerOutput::TimerOutput(ConditionalOStream &  stream,
 
 
 
-TimerOutput::TimerOutput(MPI_Comm              mpi_communicator,
+TimerOutput::TimerOutput(const MPI_Comm &      mpi_communicator,
                          std::ostream &        stream,
                          const OutputFrequency output_frequency,
                          const OutputType      output_type)
@@ -335,7 +335,7 @@ TimerOutput::TimerOutput(MPI_Comm              mpi_communicator,
 
 
 
-TimerOutput::TimerOutput(MPI_Comm              mpi_communicator,
+TimerOutput::TimerOutput(const MPI_Comm &      mpi_communicator,
                          ConditionalOStream &  stream,
                          const OutputFrequency output_frequency,
                          const OutputType      output_type)
@@ -841,8 +841,8 @@ TimerOutput::print_summary() const
 
 
 void
-TimerOutput::print_wall_time_statistics(const MPI_Comm mpi_comm,
-                                        const double   quantile) const
+TimerOutput::print_wall_time_statistics(const MPI_Comm &mpi_comm,
+                                        const double    quantile) const
 {
   // we are going to change the precision and width of output below. store the
   // old values so the get restored when exiting this function
index 80c59ecd30ab192d67d3a53a64078533b2f81c40..6bee001262bff8f55e5eed30e4d1f6581a82b8c9 100644 (file)
@@ -41,7 +41,8 @@ namespace parallel
   namespace fullydistributed
   {
     template <int dim, int spacedim>
-    Triangulation<dim, spacedim>::Triangulation(MPI_Comm mpi_communicator)
+    Triangulation<dim, spacedim>::Triangulation(
+      const MPI_Comm &mpi_communicator)
       : parallel::DistributedTriangulationBase<dim, spacedim>(mpi_communicator)
       , settings(TriangulationDescription::Settings::default_setting)
       , partitioner([](dealii::Triangulation<dim, spacedim> &tria,
index b3042b6c07549972afde1c13f7d2427cca9f781e..c0355617adcf6494a50d0bb3b1a3c11152698f23 100644 (file)
@@ -71,7 +71,7 @@ namespace
   template <typename number>
   double
   compute_global_sum(const dealii::Vector<number> &criteria,
-                     MPI_Comm                      mpi_communicator)
+                     const MPI_Comm &              mpi_communicator)
   {
     double my_sum =
       std::accumulate(criteria.begin(),
@@ -263,7 +263,7 @@ namespace internal
         std::pair<number, number>
         compute_global_min_and_max_at_root(
           const dealii::Vector<number> &criteria,
-          MPI_Comm                      mpi_communicator)
+          const MPI_Comm &              mpi_communicator)
         {
           // we'd like to compute the global max and min from the local ones in
           // one MPI communication. we can do that by taking the elementwise
@@ -296,7 +296,7 @@ namespace internal
           compute_threshold(const dealii::Vector<number> &   criteria,
                             const std::pair<double, double> &global_min_and_max,
                             const types::global_cell_index   n_target_cells,
-                            MPI_Comm                         mpi_communicator)
+                            const MPI_Comm &                 mpi_communicator)
           {
             double interesting_range[2] = {global_min_and_max.first,
                                            global_min_and_max.second};
@@ -379,7 +379,7 @@ namespace internal
           compute_threshold(const dealii::Vector<number> &   criteria,
                             const std::pair<double, double> &global_min_and_max,
                             const double                     target_error,
-                            MPI_Comm                         mpi_communicator)
+                            const MPI_Comm &                 mpi_communicator)
           {
             double interesting_range[2] = {global_min_and_max.first,
                                            global_min_and_max.second};
index c653059f77518cbc0b597db7add9b1322e83a4b9..1f5372af68552aeefd6036852d9437f52a376049 100644 (file)
@@ -27,7 +27,7 @@ for (S : REAL_SCALARS)
           \{
             template std::pair<S, S>
             compute_global_min_and_max_at_root<S>(const dealii::Vector<S> &,
-                                                  MPI_Comm);
+                                                  const MPI_Comm &);
 
             namespace RefineAndCoarsenFixedNumber
             \{
@@ -35,7 +35,7 @@ for (S : REAL_SCALARS)
               compute_threshold<S>(const dealii::Vector<S> &,
                                    const std::pair<double, double> &,
                                    const types::global_cell_index,
-                                   MPI_Comm);
+                                   const MPI_Comm &);
             \}
             namespace RefineAndCoarsenFixedFraction
             \{
@@ -43,7 +43,7 @@ for (S : REAL_SCALARS)
               compute_threshold<S>(const dealii::Vector<S> &,
                                    const std::pair<double, double> &,
                                    const double,
-                                   MPI_Comm);
+                                   const MPI_Comm &);
             \}
           \}
         \}
index 636f09f175b31bfcf17d3f2355317132d6351035..69a90bfe3e59ca219ab610738e57003675afddad 100644 (file)
@@ -37,7 +37,7 @@ namespace parallel
   {
     template <int dim, int spacedim>
     Triangulation<dim, spacedim>::Triangulation(
-      MPI_Comm mpi_communicator,
+      const MPI_Comm &mpi_communicator,
       const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
                      smooth_grid,
       const bool     allow_artificial_cells,
index 40c3148bce28ee399810648d00a2a23dba67eed3..383bab745a1be95798b8940faf760b484e584323 100644 (file)
@@ -1123,7 +1123,7 @@ namespace parallel
 
     template <int dim, int spacedim>
     Triangulation<dim, spacedim>::DataTransfer::DataTransfer(
-      MPI_Comm mpi_communicator)
+      const MPI_Comm &mpi_communicator)
       : mpi_communicator(mpi_communicator)
       , variable_size_data_stored(false)
     {}
@@ -2110,7 +2110,7 @@ namespace parallel
 
     template <int dim, int spacedim>
     Triangulation<dim, spacedim>::Triangulation(
-      MPI_Comm mpi_communicator,
+      const MPI_Comm &mpi_communicator,
       const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
                      smooth_grid,
       const Settings settings)
@@ -4967,7 +4967,7 @@ namespace parallel
 
     template <int spacedim>
     Triangulation<1, spacedim>::Triangulation(
-      MPI_Comm mpi_communicator,
+      const MPI_Comm &mpi_communicator,
       const typename dealii::Triangulation<1, spacedim>::MeshSmoothing
         smooth_grid,
       const Settings /*settings*/)
index c6fde425b5c35e9e81f3a629663b28147aecd3a0..e22a435e63c68dafb00b335bd7610d09e6d4d77b 100644 (file)
@@ -41,7 +41,7 @@ namespace parallel
 {
   template <int dim, int spacedim>
   TriangulationBase<dim, spacedim>::TriangulationBase(
-    MPI_Comm mpi_communicator,
+    const MPI_Comm &mpi_communicator,
     const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
                smooth_grid,
     const bool check_for_distorted_cells)
@@ -344,7 +344,7 @@ namespace parallel
 
   template <int dim, int spacedim>
   DistributedTriangulationBase<dim, spacedim>::DistributedTriangulationBase(
-    MPI_Comm mpi_communicator,
+    const MPI_Comm &mpi_communicator,
     const typename dealii::Triangulation<dim, spacedim>::MeshSmoothing
                smooth_grid,
     const bool check_for_distorted_cells)
index a677741df6f610fca3ba4268f4f84136ad65077b..a99613c6b722e65276740c1674b677b582463354 100644 (file)
@@ -85,7 +85,7 @@ namespace internal
 
     std::vector<types::global_dof_index>
     NumberCache::get_n_locally_owned_dofs_per_processor(
-      const MPI_Comm mpi_communicator) const
+      const MPI_Comm &mpi_communicator) const
     {
       if (n_global_dofs == 0)
         return std::vector<types::global_dof_index>();
@@ -108,7 +108,7 @@ namespace internal
 
     std::vector<IndexSet>
     NumberCache::get_locally_owned_dofs_per_processor(
-      const MPI_Comm mpi_communicator) const
+      const MPI_Comm &mpi_communicator) const
     {
       AssertDimension(locally_owned_dofs.size(), n_global_dofs);
       if (n_global_dofs == 0)
index a1162a3fd7efcf98602d050f6caa36bb5eaace06..f97d2eba4b80077d9c9aa1e2dd50698cf314a9a0 100644 (file)
@@ -5382,7 +5382,7 @@ namespace GridTools
   std::vector<std::vector<BoundingBox<spacedim>>>
   exchange_local_bounding_boxes(
     const std::vector<BoundingBox<spacedim>> &local_bboxes,
-    MPI_Comm                                  mpi_communicator)
+    const MPI_Comm &                          mpi_communicator)
   {
 #ifndef DEAL_II_WITH_MPI
     (void)local_bboxes;
@@ -5478,7 +5478,7 @@ namespace GridTools
   RTree<std::pair<BoundingBox<spacedim>, unsigned int>>
   build_global_description_tree(
     const std::vector<BoundingBox<spacedim>> &local_description,
-    MPI_Comm                                  mpi_communicator)
+    const MPI_Comm &                          mpi_communicator)
   {
 #ifndef DEAL_II_WITH_MPI
     (void)mpi_communicator;
index d2e0e811cf15c408685ba93cb3dd658d0eaf2d97..701a0444f6c5e1aa672a1b995963afed87e7aecc 100644 (file)
@@ -157,7 +157,8 @@ for (deal_II_space_dimension : SPACE_DIMENSIONS)
 
     template std::vector<std::vector<BoundingBox<deal_II_space_dimension>>>
     GridTools::exchange_local_bounding_boxes(
-      const std::vector<BoundingBox<deal_II_space_dimension>> &, MPI_Comm);
+      const std::vector<BoundingBox<deal_II_space_dimension>> &,
+      const MPI_Comm &);
 
     template std::tuple<std::vector<std::vector<unsigned int>>,
                         std::map<unsigned int, unsigned int>,
@@ -177,7 +178,8 @@ for (deal_II_space_dimension : SPACE_DIMENSIONS)
     template RTree<
       std::pair<BoundingBox<deal_II_space_dimension>, unsigned int>>
     GridTools::build_global_description_tree(
-      const std::vector<BoundingBox<deal_II_space_dimension>> &, MPI_Comm);
+      const std::vector<BoundingBox<deal_II_space_dimension>> &,
+      const MPI_Comm &);
 
     template Vector<double> GridTools::compute_aspect_ratio_of_cells(
       const Mapping<deal_II_space_dimension> &,
index 7dd871a6c515331cb8c7b8e688cd93f4e7e4a613..e9aa760f8b5c31dc4bfae6d4b909f5649d455c26 100644 (file)
@@ -100,7 +100,7 @@ namespace TriangulationDescription
     Description<dim, spacedim>
     create_description_from_triangulation(
       const dealii::Triangulation<dim, spacedim> &tria,
-      const MPI_Comm                              comm,
+      const MPI_Comm &                            comm,
       const TriangulationDescription::Settings    settings,
       const unsigned int                          my_rank_in)
     {
@@ -405,7 +405,7 @@ namespace TriangulationDescription
       const std::function<void(dealii::Triangulation<dim, spacedim> &,
                                const MPI_Comm,
                                const unsigned int)> &serial_grid_partitioner,
-      const MPI_Comm                                 comm,
+      const MPI_Comm &                               comm,
       const int                                      group_size,
       const typename Triangulation<dim, spacedim>::MeshSmoothing smoothing,
       const TriangulationDescription::Settings                   settings)
index 1bc17dcf55ca93884d4fa6d64fe026e4acb7b0ff..29af0fa2643c30cae2a59207dc5b61ba90b220d7 100644 (file)
@@ -26,7 +26,7 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : DIMENSIONS)
         create_description_from_triangulation(
           const dealii::Triangulation<deal_II_dimension,
                                       deal_II_space_dimension> &tria,
-          const MPI_Comm                                        comm,
+          const MPI_Comm &                                      comm,
           const TriangulationDescription::Settings              settings,
           const unsigned int                                    my_rank_in);
 
@@ -39,7 +39,7 @@ for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : DIMENSIONS)
             dealii::Triangulation<deal_II_dimension, deal_II_space_dimension> &,
             const MPI_Comm,
             const unsigned int)> &serial_grid_partitioner,
-          const MPI_Comm          comm,
+          const MPI_Comm &        comm,
           const int               group_size,
           const typename Triangulation<deal_II_dimension,
                                        deal_II_space_dimension>::MeshSmoothing
index 68c207353e62f3f93a082b6d5813aa2e4c6c93ee..de93af0f36d87fc101305c61f27b50fa2ab2d72e 100644 (file)
@@ -123,7 +123,7 @@ namespace PETScWrappers
 
 
   /* ----------------- PreconditionJacobi -------------------- */
-  PreconditionJacobi::PreconditionJacobi(const MPI_Comm        comm,
+  PreconditionJacobi::PreconditionJacobi(const MPI_Comm &      comm,
                                          const AdditionalData &additional_data_)
   {
     additional_data = additional_data_;
@@ -173,7 +173,7 @@ namespace PETScWrappers
 
   /* ----------------- PreconditionBlockJacobi -------------------- */
   PreconditionBlockJacobi::PreconditionBlockJacobi(
-    const MPI_Comm        comm,
+    const MPI_Comm &      comm,
     const AdditionalData &additional_data_)
   {
     additional_data = additional_data_;
@@ -450,7 +450,7 @@ namespace PETScWrappers
 
 
   PreconditionBoomerAMG::PreconditionBoomerAMG(
-    const MPI_Comm        comm,
+    const MPI_Comm &      comm,
     const AdditionalData &additional_data_)
   {
     additional_data = additional_data_;
index 8dd388dc8ebe4a5344d9b7668552a0f91f206768..51ffc58d02efb0dba7b70f9613de7ee2693d97cc 100644 (file)
@@ -165,7 +165,7 @@ namespace
       const MGConstrainedDoFs,
       MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>>
                                                 mg_constrained_dofs,
-    const MPI_Comm                              mpi_communicator,
+    const MPI_Comm &                            mpi_communicator,
     const bool                                  transfer_solution_vectors,
     std::vector<Table<2, unsigned int>> &       copy_indices,
     std::vector<Table<2, unsigned int>> &       copy_indices_global_mine,
index 64be7b34b266823ea161fdd36c8ef422f3e343e9..b0dd8c672d5a24856ccfc897dc2641136648df4e 100644 (file)
@@ -238,7 +238,7 @@ namespace SUNDIALS
 
   template <typename VectorType>
   ARKode<VectorType>::ARKode(const AdditionalData &data,
-                             const MPI_Comm        mpi_comm)
+                             const MPI_Comm &      mpi_comm)
     : data(data)
     , arkode_mem(nullptr)
     , yy(nullptr)
index ef5397a9df7f110cf9a0c1b81f046fd9a0ff918e..2e8454aebd0c82b440d19cd17c7eafc2f350410d 100644 (file)
@@ -153,7 +153,7 @@ namespace SUNDIALS
   } // namespace
 
   template <typename VectorType>
-  IDA<VectorType>::IDA(const AdditionalData &data, const MPI_Comm mpi_comm)
+  IDA<VectorType>::IDA(const AdditionalData &data, const MPI_Comm &mpi_comm)
     : data(data)
     , ida_mem(nullptr)
     , yy(nullptr)
index c463bdea41e169a6772d6b9262d527f0489139e1..ed2f26844b4ff064a880246dc4dd97cf8e7a6558 100644 (file)
@@ -151,7 +151,7 @@ namespace SUNDIALS
 
   template <typename VectorType>
   KINSOL<VectorType>::KINSOL(const AdditionalData &data,
-                             const MPI_Comm        mpi_comm)
+                             const MPI_Comm &      mpi_comm)
     : data(data)
     , kinsol_mem(nullptr)
     , solution(nullptr)

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.