]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Deprecate get_communicator() in favor of get_mpi_communicator(). 17551/head
authorDavid Wells <drwells@email.unc.edu>
Fri, 16 Aug 2024 16:09:24 +0000 (10:09 -0600)
committerDavid Wells <drwells@email.unc.edu>
Fri, 16 Aug 2024 17:10:02 +0000 (11:10 -0600)
We use the later name more often than the former.

121 files changed:
examples/step-37/doc/results.dox
examples/step-75/step-75.cc
examples/step-87/step-87.cc
examples/step-89/step-89.cc
include/deal.II/base/mpi_remote_point_evaluation.h
include/deal.II/distributed/tria_base.h
include/deal.II/dofs/dof_handler.h
include/deal.II/fe/fe_tools_extrapolate.templates.h
include/deal.II/grid/grid_tools.h
include/deal.II/grid/tria.h
include/deal.II/matrix_free/face_setup_internal.h
include/deal.II/matrix_free/matrix_free.templates.h
include/deal.II/matrix_free/portable_matrix_free.templates.h
include/deal.II/multigrid/mg_transfer.h
include/deal.II/multigrid/mg_transfer.templates.h
include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h
include/deal.II/multigrid/multigrid.h
include/deal.II/numerics/data_out_dof_data.templates.h
include/deal.II/numerics/vector_tools_integrate_difference.templates.h
include/deal.II/numerics/vector_tools_interpolate.templates.h
include/deal.II/numerics/vector_tools_mean_value.templates.h
include/deal.II/sundials/n_vector.templates.h
source/base/mpi_remote_point_evaluation.cc
source/distributed/grid_refinement.cc
source/distributed/repartitioning_policy_tools.cc
source/distributed/shared_tria.cc
source/distributed/tria.cc
source/distributed/tria_base.cc
source/dofs/dof_handler.cc
source/dofs/dof_handler_policy.cc
source/dofs/dof_renumbering.cc
source/dofs/dof_tools.cc
source/dofs/dof_tools_constraints.cc
source/fe/mapping_q_cache.cc
source/grid/grid_out.cc
source/grid/grid_tools.cc
source/grid/grid_tools_cache.cc
source/grid/grid_tools_geometry.cc
source/grid/tria.cc
source/grid/tria_description.cc
source/hp/refinement.cc
source/multigrid/mg_level_global_transfer.cc
source/multigrid/mg_tools.cc
source/multigrid/mg_transfer_internal.cc
source/multigrid/mg_transfer_prebuilt.cc
source/particles/generators.cc
source/particles/particle_handler.cc
tests/belos/solver_belos_01.cc
tests/dofs/nodal_renumbering_01.cc
tests/fullydistributed_grids/repartitioning_03.cc
tests/fullydistributed_grids/repartitioning_05.cc
tests/grid/grid_generator_marching_cube_algorithm_01.cc
tests/grid/grid_generator_marching_cube_algorithm_02.cc
tests/lac/constraints_make_consistent_in_parallel_01.cc
tests/lac/sparse_matrix_tools_01.cc
tests/lac/step-40-linear_operator_01.cc
tests/lac/step-40-linear_operator_02.cc
tests/lac/step-40-linear_operator_03.cc
tests/lac/step-40-linear_operator_04.cc
tests/lac/step-40-linear_operator_05.cc
tests/lac/step-40-linear_operator_06.cc
tests/mappings/mapping_fe_field_06.cc
tests/mappings/mapping_q_cache_06.cc
tests/mpi/cell_weights_01.cc
tests/mpi/cell_weights_01_back_and_forth_01.cc
tests/mpi/cell_weights_01_back_and_forth_02.cc
tests/mpi/cell_weights_02.cc
tests/mpi/cell_weights_03.cc
tests/mpi/cell_weights_04.cc
tests/mpi/cell_weights_05.cc
tests/mpi/cell_weights_06.cc
tests/mpi/compute_mean_value_02.cc
tests/mpi/hp_constraints_consistent_01.cc
tests/mpi/hp_constraints_consistent_02.cc
tests/mpi/hp_constraints_consistent_03.cc
tests/mpi/hp_step-40.cc
tests/mpi/hp_step-40_variable_01.cc
tests/mpi/limit_p_level_difference_01.cc
tests/mpi/limit_p_level_difference_02.cc
tests/mpi/periodicity_04.cc
tests/mpi/petsc_step-27.cc
tests/mpi/solution_transfer_11.cc
tests/mpi/step-40.cc
tests/mpi/step-40_cuthill_mckee.cc
tests/mpi/step-40_cuthill_mckee_MPI-subset.cc
tests/mpi/step-40_direct_solver.cc
tests/mpi/trilinos_step-27.cc
tests/multigrid-global-coarsening/fe_nothing_01.cc
tests/multigrid-global-coarsening/interpolate_01.cc
tests/multigrid-global-coarsening/mg_transfer_util.h
tests/numerics/matrix_creator_01.cc
tests/numerics/project_parallel_common.h
tests/particles/data_out_05.cc
tests/particles/exchange_ghosts_01.cc
tests/particles/exchange_ghosts_periodic_01.cc
tests/particles/particle_handler_04.cc
tests/particles/particle_handler_06.cc
tests/particles/particle_handler_19.cc
tests/particles/particle_handler_20.cc
tests/particles/particle_handler_fully_distributed_01.cc
tests/particles/particle_handler_shared_01.cc
tests/particles/particle_handler_sort_02.cc
tests/particles/particle_weights_01.cc
tests/remote_point_evaluation/data_out_resample_01.cc
tests/remote_point_evaluation/data_out_resample_02.cc
tests/remote_point_evaluation/mapping_02.cc
tests/remote_point_evaluation/remote_point_evaluation_01.cc
tests/remote_point_evaluation/remote_point_evaluation_02.cc
tests/remote_point_evaluation/remote_point_evaluation_03.cc
tests/remote_point_evaluation/vector_tools_evaluate_at_points_01.cc
tests/remote_point_evaluation/vector_tools_evaluate_at_points_02.cc
tests/remote_point_evaluation/vector_tools_evaluate_at_points_03.cc
tests/remote_point_evaluation/vector_tools_evaluate_at_points_04.cc
tests/remote_point_evaluation/vector_tools_evaluate_at_points_05.cc
tests/remote_point_evaluation/vector_tools_evaluate_at_points_06.cc
tests/remote_point_evaluation/vector_tools_evaluate_at_points_07.cc
tests/sharedtria/limit_p_level_difference_01.cc
tests/sharedtria/limit_p_level_difference_02.cc
tests/simplex/poisson_01.cc
tests/sundials/n_vector.cc
tests/trilinos/mg_transfer_prebuilt_01.cc

index 3a5194382aa9f8ce15d2cdc969c927f99bfa46fd..be78580208017bab37f1ca546e820ce33c31a8ca 100644 (file)
@@ -431,7 +431,7 @@ const IndexSet locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(d
 LinearAlgebra::distributed::Vector<double> copy_vec(solution);
 solution.reinit(dof_handler.locally_owned_dofs(),
                 locally_relevant_dofs,
-                triangulation.get_communicator());
+                triangulation.get_mpi_communicator());
 solution.copy_locally_owned_data_from(copy_vec);
 constraints.distribute(solution);
 solution.update_ghost_values();
index 69086cf6d7cff78975f527fc9b440bad5d4833f5..387219f135dbb57be9ba4ddd1d45a84a00e95b26 100644 (file)
@@ -457,7 +457,7 @@ namespace Step75
 
         TrilinosWrappers::SparsityPattern dsp(
           dof_handler.locally_owned_dofs(),
-          dof_handler.get_triangulation().get_communicator());
+          dof_handler.get_triangulation().get_mpi_communicator());
 
         DoFTools::make_sparsity_pattern(dof_handler, dsp, this->constraints);
 
index 5c8718632c7f83c1be9cc62ae3425c27201a0905..8cf8a59401f382c83a9cc07cce41d7baef4c203c 100644 (file)
@@ -1106,7 +1106,7 @@ namespace Step87
   {
     support_points.reinit(dof_handler.locally_owned_dofs(),
                           DoFTools::extract_locally_active_dofs(dof_handler),
-                          dof_handler.get_communicator());
+                          dof_handler.get_mpi_communicator());
 
     const auto &fe = dof_handler.get_fe();
 
index 0301bcbb4ee2ca90f7b70099432b0aa64f9b8b79..499956a43afbd5f49af09b1bd746bd7c82f15462 100644 (file)
@@ -208,7 +208,7 @@ namespace Step89
 
       data_out.build_patches(mapping, degree, DataOut<dim>::curved_inner_cells);
       data_out.write_vtu_in_parallel(name_prefix + ".vtu",
-                                     dof_handler.get_communicator());
+                                     dof_handler.get_mpi_communicator());
     }
   } // namespace HelperFunctions
 
@@ -1180,7 +1180,7 @@ namespace Step89
                    (cell->vertex(1) - cell->vertex(0)).norm_square());
       h_local_min = std::sqrt(h_local_min);
       const double h_min =
-        Utilities::MPI::min(h_local_min, dof_handler.get_communicator());
+        Utilities::MPI::min(h_local_min, dof_handler.get_mpi_communicator());
 
       const double dt =
         cr * HelperFunctions::compute_dt_cfl(h_min, degree, speed_of_sound);
index 0c05932d8e4d17810b4c9455bca9ddb056e7fd37..d1f844d3fc3db479961470c5bdf7e2e7f77d6c00 100644 (file)
@@ -721,10 +721,10 @@ namespace Utilities
       (void)sort_data;
 #else
       static CollectiveMutex      mutex;
-      CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator());
+      CollectiveMutex::ScopedLock lock(mutex, tria->get_mpi_communicator());
 
       const unsigned int my_rank =
-        Utilities::MPI::this_mpi_process(tria->get_communicator());
+        Utilities::MPI::this_mpi_process(tria->get_mpi_communicator());
 
       // allocate memory for output and buffer
       output.resize(point_ptrs.back() * n_components);
@@ -809,7 +809,7 @@ namespace Utilities
               (send_ptrs[i + 1] - send_ptrs[i]) * n_components),
             send_ranks[i],
             internal::Tags::remote_point_evaluation,
-            tria->get_communicator(),
+            tria->get_mpi_communicator(),
             send_buffers_packed,
             send_requests);
         }
@@ -848,7 +848,7 @@ namespace Utilities
           MPI_Status status;
           int        ierr = MPI_Probe(MPI_ANY_SOURCE,
                                internal::Tags::remote_point_evaluation,
-                               tria->get_communicator(),
+                               tria->get_mpi_communicator(),
                                &status);
           AssertThrowMPI(ierr);
 
@@ -867,7 +867,7 @@ namespace Utilities
             (recv_ptrs[j + 1] - recv_ptrs[j]) * n_components);
 
           internal::recv_and_unpack(recv_buffer,
-                                    tria->get_communicator(),
+                                    tria->get_mpi_communicator(),
                                     status,
                                     recv_buffer_packed);
 
@@ -933,10 +933,10 @@ namespace Utilities
       (void)sort_data;
 #else
       static CollectiveMutex      mutex;
-      CollectiveMutex::ScopedLock lock(mutex, tria->get_communicator());
+      CollectiveMutex::ScopedLock lock(mutex, tria->get_mpi_communicator());
 
       const unsigned int my_rank =
-        Utilities::MPI::this_mpi_process(tria->get_communicator());
+        Utilities::MPI::this_mpi_process(tria->get_mpi_communicator());
 
       // allocate memory for buffer
       const auto &point_ptrs = this->get_point_ptrs();
@@ -1031,7 +1031,7 @@ namespace Utilities
               (recv_ptrs[i + 1] - recv_ptrs[i]) * n_components),
             recv_ranks[i],
             internal::Tags::remote_point_evaluation,
-            tria->get_communicator(),
+            tria->get_mpi_communicator(),
             send_buffers_packed,
             send_requests);
         }
@@ -1069,7 +1069,7 @@ namespace Utilities
           MPI_Status status;
           int        ierr = MPI_Probe(MPI_ANY_SOURCE,
                                internal::Tags::remote_point_evaluation,
-                               tria->get_communicator(),
+                               tria->get_mpi_communicator(),
                                &status);
           AssertThrowMPI(ierr);
 
@@ -1089,7 +1089,7 @@ namespace Utilities
             (send_ptrs[j + 1] - send_ptrs[j]) * n_components);
 
           internal::recv_and_unpack(recv_buffer,
-                                    tria->get_communicator(),
+                                    tria->get_mpi_communicator(),
                                     status,
                                     recv_buffer_packed);
 
index 2ebec1f0777eaa4492075c91c4709f2aec094c39..95656183817094b25492187675b46cfa70c6e813 100644 (file)
@@ -97,7 +97,7 @@ namespace parallel
      * Return MPI communicator used by this triangulation.
      */
     virtual MPI_Comm
-    get_communicator() const override;
+    get_mpi_communicator() const override;
 
     /**
      * Return if multilevel hierarchy is supported and has been constructed.
index 5dabc8ae05feb61f86813307ecf006a2616b6489..c8f7299239a18cf1166384196651922efc7521b1 100644 (file)
@@ -1218,6 +1218,16 @@ public:
    * Return MPI communicator used by the underlying triangulation.
    */
   MPI_Comm
+  get_mpi_communicator() const;
+
+  /**
+   * Return MPI communicator used by the underlying triangulation.
+   *
+   * @deprecated Use get_mpi_communicator() instead.
+   */
+  DEAL_II_DEPRECATED_EARLY_WITH_COMMENT(
+    "Access the MPI communicator with get_mpi_communicator() instead.")
+  MPI_Comm
   get_communicator() const;
 
   /**
@@ -1909,12 +1919,21 @@ inline const Triangulation<dim, spacedim>
 
 template <int dim, int spacedim>
 DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
-inline MPI_Comm DoFHandler<dim, spacedim>::get_communicator() const
+inline MPI_Comm DoFHandler<dim, spacedim>::get_mpi_communicator() const
 {
   Assert(tria != nullptr,
          ExcMessage("This DoFHandler object has not been associated "
                     "with a triangulation."));
-  return tria->get_communicator();
+  return tria->get_mpi_communicator();
+}
+
+
+
+template <int dim, int spacedim>
+DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
+inline MPI_Comm DoFHandler<dim, spacedim>::get_communicator() const
+{
+  return get_mpi_communicator();
 }
 
 
index 81fa89fb47f30d43b4f1e398dd8af28b84dbab73..6ebcb4dd46a9c0724d28e0e6d81832418441d560 100644 (file)
@@ -1335,7 +1335,7 @@ namespace FETools
         ExcMessage(
           "Extrapolate in parallel only works for parallel distributed triangulations!"));
 
-      communicator = tr->get_communicator();
+      communicator = tr->get_mpi_communicator();
 
       compute_all_non_local_data(dof2, u2_relevant);
 
@@ -1492,7 +1492,7 @@ namespace FETools
       Assert(parallel_tria != nullptr, ExcNotImplemented());
 
       const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
-      vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+      vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
     }
 #endif // DEAL_II_WITH_PETSC
 
@@ -1509,7 +1509,7 @@ namespace FETools
       Assert(parallel_tria != nullptr, ExcNotImplemented());
 
       const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
-      vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+      vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
     }
 
 
@@ -1528,7 +1528,7 @@ namespace FETools
       Assert(parallel_tria != nullptr, ExcNotImplemented());
 
       const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
-      vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+      vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
     }
 #    endif
 
@@ -1544,7 +1544,7 @@ namespace FETools
       Assert(parallel_tria != nullptr, ExcNotImplemented());
 
       const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
-      vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+      vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
     }
 #  endif
 #endif // DEAL_II_WITH_TRILINOS
@@ -1561,7 +1561,7 @@ namespace FETools
       Assert(parallel_tria != nullptr, ExcNotImplemented());
 
       const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
-      vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+      vector.reinit(locally_owned_dofs, parallel_tria->get_mpi_communicator());
     }
 
 
@@ -1589,7 +1589,7 @@ namespace FETools
         DoFTools::extract_locally_relevant_dofs(dh);
       vector.reinit(locally_owned_dofs,
                     locally_relevant_dofs,
-                    parallel_tria->get_communicator());
+                    parallel_tria->get_mpi_communicator());
     }
 #endif // DEAL_II_WITH_PETSC
 
@@ -1609,7 +1609,7 @@ namespace FETools
         DoFTools::extract_locally_relevant_dofs(dh);
       vector.reinit(locally_owned_dofs,
                     locally_relevant_dofs,
-                    parallel_tria->get_communicator());
+                    parallel_tria->get_mpi_communicator());
     }
 #endif // DEAL_II_WITH_TRILINOS
 
@@ -1628,7 +1628,7 @@ namespace FETools
         DoFTools::extract_locally_relevant_dofs(dh);
       vector.reinit(locally_owned_dofs,
                     locally_relevant_dofs,
-                    parallel_tria->get_communicator());
+                    parallel_tria->get_mpi_communicator());
     }
 
 
index 61b22d0b1ea641bb7633da165a4f81069fd76492..7c191b68a600af23704f00e22df5a5822048bcf4 100644 (file)
@@ -3366,7 +3366,7 @@ namespace GridTools
       // a mutex:
       static Utilities::MPI::CollectiveMutex      mutex;
       Utilities::MPI::CollectiveMutex::ScopedLock lock(
-        mutex, tria->get_communicator());
+        mutex, tria->get_mpi_communicator());
 
       const int mpi_tag =
         Utilities::MPI::internal::Tags::exchange_cell_data_request;
@@ -3385,7 +3385,7 @@ namespace GridTools
                                        MPI_BYTE,
                                        it.first,
                                        mpi_tag,
-                                       tria->get_communicator(),
+                                       tria->get_mpi_communicator(),
                                        &requests[idx]);
             AssertThrowMPI(ierr);
             ++idx;
@@ -3401,7 +3401,7 @@ namespace GridTools
           MPI_Status status;
           int        ierr = MPI_Probe(MPI_ANY_SOURCE,
                                mpi_tag,
-                               tria->get_communicator(),
+                               tria->get_mpi_communicator(),
                                &status);
           AssertThrowMPI(ierr);
 
@@ -3424,7 +3424,7 @@ namespace GridTools
                           MPI_BYTE,
                           status.MPI_SOURCE,
                           status.MPI_TAG,
-                          tria->get_communicator(),
+                          tria->get_mpi_communicator(),
                           &status);
           AssertThrowMPI(ierr);
 
@@ -3477,7 +3477,7 @@ namespace GridTools
                            MPI_BYTE,
                            status.MPI_SOURCE,
                            mpi_tag_reply,
-                           tria->get_communicator(),
+                           tria->get_mpi_communicator(),
                            &reply_requests[idx]);
           AssertThrowMPI(ierr);
         }
@@ -3489,7 +3489,7 @@ namespace GridTools
           MPI_Status status;
           int        ierr = MPI_Probe(MPI_ANY_SOURCE,
                                mpi_tag_reply,
-                               tria->get_communicator(),
+                               tria->get_mpi_communicator(),
                                &status);
           AssertThrowMPI(ierr);
 
@@ -3504,7 +3504,7 @@ namespace GridTools
                           MPI_BYTE,
                           status.MPI_SOURCE,
                           status.MPI_TAG,
-                          tria->get_communicator(),
+                          tria->get_mpi_communicator(),
                           &status);
           AssertThrowMPI(ierr);
 
index bbd55ec65126511e068335b22b5ca48fe7ce0190..19d5f0663b9f4ebf183f681719a8d2f39b908ede 100644 (file)
@@ -1810,10 +1810,21 @@ public:
   clear();
 
   /**
-   * Return MPI communicator used by this triangulation. In the case of
-   * serial Triangulation object, MPI_COMM_SELF is returned.
+   * Return the MPI communicator used by this triangulation. In the case of a
+   * serial Triangulation object, MPI_COMM_SELF is returned.
    */
   virtual MPI_Comm
+  get_mpi_communicator() const;
+
+  /**
+   * Return the MPI communicator used by this triangulation. In the case of
+   * a serial Triangulation object, MPI_COMM_SELF is returned.
+   *
+   * @deprecated Use get_mpi_communicator() instead.
+   */
+  DEAL_II_DEPRECATED_EARLY_WITH_COMMENT(
+    "Access the MPI communicator with get_mpi_communicator() instead.")
+  MPI_Comm
   get_communicator() const;
 
   /**
index 23b573aa696112d4f0401e57bff06475e9b875f8..6a1ef52ffb31a7dea2c92a186acc19e1242d75d8 100644 (file)
@@ -296,7 +296,7 @@ namespace internal
               if (const dealii::parallel::TriangulationBase<dim> *ptria =
                     dynamic_cast<const dealii::parallel::TriangulationBase<dim>
                                    *>(&triangulation))
-                comm = ptria->get_communicator();
+                comm = ptria->get_mpi_communicator();
 
               MPI_Status   status;
               unsigned int mysize    = inner_face.second.shared_faces.size();
index 3e518b24aa3f7d51ce6964f09e123a08a1749a4a..f7c6d6ff61b3548c95d941111cbac20ea916e911 100644 (file)
@@ -442,7 +442,7 @@ MatrixFree<dim, Number, VectorizedArrayType>::internal_reinit(
       task_info.allow_ghosted_vectors_in_loops =
         additional_data.allow_ghosted_vectors_in_loops;
 
-      task_info.communicator    = dof_handler[0]->get_communicator();
+      task_info.communicator    = dof_handler[0]->get_mpi_communicator();
       task_info.communicator_sm = additional_data.communicator_sm;
       task_info.my_pid =
         Utilities::MPI::this_mpi_process(task_info.communicator);
index ebbaeef14d337b6141d53c7c9bc86c7a5ce731f2..3e33e35875c78fd328e99447c3a4751f4df6491e 100644 (file)
@@ -449,7 +449,7 @@ namespace Portable
                       quad,
                       iterator_filter,
                       std::make_shared<const MPI_Comm>(
-                        parallel_triangulation->get_communicator()),
+                        parallel_triangulation->get_mpi_communicator()),
                       additional_data);
     else
       internal_reinit(mapping,
index 68c30ec46a5503d217c4f7f5ec465fddd83d5254..d020c8cc613a9c14ab9ea053f2670265d814cde0 100644 (file)
@@ -83,7 +83,7 @@ namespace internal
            const SparsityPatternType       &sp,
            const DoFHandler<dim, spacedim> &dh)
     {
-      const MPI_Comm communicator = dh.get_communicator();
+      const MPI_Comm communicator = dh.get_mpi_communicator();
 
       matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
                     dh.locally_owned_mg_dofs(level),
@@ -109,7 +109,7 @@ namespace internal
            const SparsityPatternType       &sp,
            const DoFHandler<dim, spacedim> &dh)
     {
-      const MPI_Comm communicator = dh.get_communicator();
+      const MPI_Comm communicator = dh.get_mpi_communicator();
 
       matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
                     dh.locally_owned_mg_dofs(level),
@@ -137,7 +137,7 @@ namespace internal
            const SparsityPatternType       &sp,
            const DoFHandler<dim, spacedim> &dh)
     {
-      const MPI_Comm communicator = dh.get_communicator();
+      const MPI_Comm communicator = dh.get_mpi_communicator();
 
       matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
                     dh.locally_owned_mg_dofs(level),
@@ -164,7 +164,7 @@ namespace internal
            const SparsityPatternType       &sp,
            const DoFHandler<dim, spacedim> &dh)
     {
-      const MPI_Comm communicator = dh.get_communicator();
+      const MPI_Comm communicator = dh.get_mpi_communicator();
 
       matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
                     dh.locally_owned_mg_dofs(level),
@@ -220,7 +220,7 @@ namespace internal
            const SparsityPatternType       &sp,
            const DoFHandler<dim, spacedim> &dh)
     {
-      const MPI_Comm communicator = dh.get_communicator();
+      const MPI_Comm communicator = dh.get_mpi_communicator();
 
       // Reinit PETSc matrix
       matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
index a8ca892d1beda4a9e7be748b0ffdc6d0c6d9f6cb..4b89ad6ee923583354c0192173eddd2dc3b518d4 100644 (file)
@@ -129,7 +129,7 @@ namespace internal
       for (unsigned int level = v.min_level(); level <= v.max_level(); ++level)
         {
           v[level].reinit(dof_handler.locally_owned_mg_dofs(level),
-                          tria->get_communicator());
+                          tria->get_mpi_communicator());
         }
     }
 #endif
@@ -157,7 +157,7 @@ namespace internal
       for (unsigned int level = v.min_level(); level <= v.max_level(); ++level)
         {
           v[level].reinit(dof_handler.locally_owned_mg_dofs(level),
-                          tria->get_communicator());
+                          tria->get_mpi_communicator());
         }
     }
 #endif
@@ -216,7 +216,7 @@ MGLevelGlobalTransfer<VectorType>::copy_to_mg(
   internal::MGTransfer::reinit_vector(dof_handler, component_to_block_map, dst);
 #ifdef DEBUG_OUTPUT
   std::cout << "copy_to_mg src " << src.l2_norm() << std::endl;
-  int ierr = MPI_Barrier(dof_handler.get_communicator());
+  int ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
   AssertThrowMPI(ierr);
 #endif
 
@@ -236,7 +236,7 @@ MGLevelGlobalTransfer<VectorType>::copy_to_mg(
     {
       --level;
 #ifdef DEBUG_OUTPUT
-      ierr = MPI_Barrier(dof_handler.get_communicator());
+      ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
       AssertThrowMPI(ierr);
 #endif
 
@@ -257,7 +257,7 @@ MGLevelGlobalTransfer<VectorType>::copy_to_mg(
       dst_level.compress(VectorOperation::insert);
 
 #ifdef DEBUG_OUTPUT
-      ierr = MPI_Barrier(dof_handler.get_communicator());
+      ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
       AssertThrowMPI(ierr);
       std::cout << "copy_to_mg dst " << level << ' ' << dst_level.l2_norm()
                 << std::endl;
@@ -295,11 +295,11 @@ MGLevelGlobalTransfer<VectorType>::copy_from_mg(
   for (unsigned int level = src.min_level(); level <= src.max_level(); ++level)
     {
 #ifdef DEBUG_OUTPUT
-      int ierr = MPI_Barrier(dof_handler.get_communicator());
+      int ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
       AssertThrowMPI(ierr);
       std::cout << "copy_from_mg src " << level << ' ' << src[level].l2_norm()
                 << std::endl;
-      ierr = MPI_Barrier(dof_handler.get_communicator());
+      ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
       AssertThrowMPI(ierr);
 #endif
 
@@ -320,7 +320,7 @@ MGLevelGlobalTransfer<VectorType>::copy_from_mg(
 #ifdef DEBUG_OUTPUT
       {
         dst.compress(VectorOperation::insert);
-        ierr = MPI_Barrier(dof_handler.get_communicator());
+        ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
         AssertThrowMPI(ierr);
         std::cout << "copy_from_mg level=" << level << ' ' << dst.l2_norm()
                   << std::endl;
@@ -329,7 +329,7 @@ MGLevelGlobalTransfer<VectorType>::copy_from_mg(
     }
   dst.compress(VectorOperation::insert);
 #ifdef DEBUG_OUTPUT
-  const int ierr = MPI_Barrier(dof_handler.get_communicator());
+  const int ierr = MPI_Barrier(dof_handler.get_mpi_communicator());
   AssertThrowMPI(ierr);
   std::cout << "copy_from_mg " << dst.l2_norm() << std::endl;
 #endif
@@ -445,7 +445,7 @@ MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>::copy_to_mg(
           dst[level].reinit(ghosted_level_vector[level], false);
         else
           dst[level].reinit(dof_handler.locally_owned_mg_dofs(level),
-                            dof_handler.get_communicator());
+                            dof_handler.get_mpi_communicator());
       }
     else if ((perform_plain_copy == false &&
               perform_renumbered_plain_copy == false) ||
index fc502edd3fb6b64facbfa68abc1fe4205ee57314..7db8a1b7f1981ed62f6d788f06b7e88f5728753a 100644 (file)
@@ -806,7 +806,8 @@ namespace internal
       , dof_handler_coarse(dof_handler_coarse)
       , mg_level_fine(mg_level_fine)
       , communicator(
-          dof_handler_fine.get_communicator() /*TODO: fix for different comms*/)
+          dof_handler_fine
+            .get_mpi_communicator() /*TODO: fix for different comms*/)
       , cell_id_translator(
           dof_handler_fine.get_triangulation().n_global_coarse_cells(),
           dof_handler_fine.get_triangulation().n_global_levels())
@@ -1457,7 +1458,8 @@ namespace internal
           });
 
         return Utilities::MPI::min(static_cast<unsigned int>(flag),
-                                   dof_handler_fine.get_communicator()) == 1;
+                                   dof_handler_fine.get_mpi_communicator()) ==
+               1;
       }
     else
       {
@@ -1620,7 +1622,7 @@ namespace internal
           dof_handler_coarse.locally_owned_dofs() :
           dof_handler_coarse.locally_owned_mg_dofs(mg_level_coarse),
         locally_relevant_dofs,
-        dof_handler_coarse.get_communicator());
+        dof_handler_coarse.get_mpi_communicator());
     }
 
 
@@ -1691,9 +1693,9 @@ namespace internal
                                    cell->active_fe_index());
         });
 
-      const auto comm = dof_handler_fine.get_communicator();
+      const auto comm = dof_handler_fine.get_mpi_communicator();
 
-      Assert(comm == dof_handler_coarse.get_communicator(),
+      Assert(comm == dof_handler_coarse.get_mpi_communicator(),
              ExcNotImplemented());
 
       ArrayView<unsigned int> temp_min(min_active_fe_indices);
@@ -2022,11 +2024,11 @@ namespace internal
 
       {
         transfer.partitioner_coarse = transfer.constraint_info_coarse.finalize(
-          dof_handler_coarse.get_communicator());
+          dof_handler_coarse.get_mpi_communicator());
         transfer.vec_coarse.reinit(transfer.partitioner_coarse);
 
         transfer.partitioner_fine = transfer.constraint_info_fine.finalize(
-          dof_handler_fine.get_communicator());
+          dof_handler_fine.get_mpi_communicator());
         transfer.vec_fine.reinit(transfer.partitioner_fine);
       }
 
@@ -2583,11 +2585,11 @@ namespace internal
 
       {
         transfer.partitioner_coarse = transfer.constraint_info_coarse.finalize(
-          dof_handler_coarse.get_communicator());
+          dof_handler_coarse.get_mpi_communicator());
         transfer.vec_coarse.reinit(transfer.partitioner_coarse);
 
         transfer.partitioner_fine = transfer.constraint_info_fine.finalize(
-          dof_handler_fine.get_communicator());
+          dof_handler_fine.get_mpi_communicator());
         transfer.vec_fine.reinit(transfer.partitioner_fine);
       }
 
@@ -2795,7 +2797,7 @@ namespace MGTransferGlobalCoarseningTools
             &fine_triangulation_in))
         return std::make_shared<
           parallel::distributed::Triangulation<dim, spacedim>>(
-          fine_triangulation->get_communicator());
+          fine_triangulation->get_mpi_communicator());
       else
 #endif
 #ifdef DEAL_II_WITH_MPI
@@ -2803,7 +2805,7 @@ namespace MGTransferGlobalCoarseningTools
               const parallel::shared::Triangulation<dim, spacedim> *>(
               &fine_triangulation_in))
         return std::make_shared<parallel::shared::Triangulation<dim, spacedim>>(
-          fine_triangulation->get_communicator(),
+          fine_triangulation->get_mpi_communicator(),
           Triangulation<dim, spacedim>::none,
           fine_triangulation->with_artificial_cells());
       else
@@ -2865,7 +2867,7 @@ namespace MGTransferGlobalCoarseningTools
 
     Assert(fine_triangulation, ExcNotImplemented());
 
-    const auto comm = fine_triangulation->get_communicator();
+    const auto comm = fine_triangulation->get_mpi_communicator();
 
     if (keep_fine_triangulation == true &&
         repartition_fine_triangulation == false)
@@ -3957,7 +3959,7 @@ MGTwoLevelTransfer<dim, VectorType>::reinit(
                                 IteratorFilters::LocallyOwnedCell())
         is_locally_owned_coarse.add_index(cell_id_translator.translate(cell));
 
-      const MPI_Comm communicator = dof_handler_fine.get_communicator();
+      const MPI_Comm communicator = dof_handler_fine.get_mpi_communicator();
 
       std::vector<unsigned int> owning_ranks(
         is_locally_owned_coarse.n_elements());
@@ -4452,7 +4454,7 @@ MGTransferMF<dim, Number>::fill_and_communicate_copy_indices_global_coarsening(
 
   this->perform_plain_copy =
     Utilities::MPI::max(this->perform_plain_copy ? 1 : 0,
-                        dof_handler_out.get_communicator()) != 0;
+                        dof_handler_out.get_mpi_communicator()) != 0;
 
   if (this->perform_plain_copy)
     {
@@ -5009,12 +5011,12 @@ namespace internal
 
         const Utilities::MPI::Partitioner partitioner_support_points(
           dof_handler_support_points.locally_owned_dofs(),
-          dof_handler_support_points.get_communicator());
+          dof_handler_support_points.get_mpi_communicator());
 
         const Utilities::MPI::Partitioner partitioner_dof(
           dof_handler.locally_owned_dofs(),
           DoFTools::extract_locally_relevant_dofs(dof_handler),
-          dof_handler.get_communicator());
+          dof_handler.get_mpi_communicator());
 
         std::vector<bool> dof_processed(partitioner_dof.locally_owned_size() +
                                           partitioner_dof.n_ghost_indices(),
@@ -5324,7 +5326,7 @@ MGTwoLevelTransferNonNested<dim, VectorType>::reinit(
     this->partitioner_fine.reset(
       new Utilities::MPI::Partitioner(dof_handler_fine.locally_owned_dofs(),
                                       locally_relevant_dofs,
-                                      dof_handler_fine.get_communicator()));
+                                      dof_handler_fine.get_mpi_communicator()));
 
     this->vec_fine.reinit(this->partitioner_fine);
   }
index 5891ecc04259e9e61f2a979e0b98b7061d47e33c..2d75879019deb857970d3ca86fd71335fc7a3ba6 100644 (file)
@@ -915,7 +915,7 @@ PreconditionMG<dim, VectorType, TransferType>::get_mpi_communicator() const
   const parallel::TriangulationBase<dim> *ptria =
     dynamic_cast<const parallel::TriangulationBase<dim> *>(&tria);
   Assert(ptria != nullptr, ExcInternalError());
-  return ptria->get_communicator();
+  return ptria->get_mpi_communicator();
 }
 
 
index 67119696cc56b38a9bebd6713b72b3c35d191d28..8e22d75b0e77950e73f76880fadd8c3a76b27515 100644 (file)
@@ -920,7 +920,7 @@ namespace internal
 
             dst.block(b).reinit(locally_owned_dofs_b[b],
                                 locally_relevant_dofs_b[b],
-                                dof_handler.get_communicator());
+                                dof_handler.get_mpi_communicator());
             copy_locally_owned_data_from(src.block(b), dst.block(b));
           }
 
@@ -966,7 +966,7 @@ namespace internal
 
         dst.block(0).reinit(locally_owned_dofs,
                             locally_relevant_dofs,
-                            dof_handler.get_communicator());
+                            dof_handler.get_mpi_communicator());
         copy_locally_owned_data_from(src, dst.block(0));
 
         dst.collect_sizes();
index ccee24d8de2ae08285ea340018b4386b54ad22a3..aa7ab85083302df3e5c9134a0428c41ceebff6d1 100644 (file)
@@ -669,7 +669,7 @@ namespace VectorTools
     }
 #endif
 
-    const MPI_Comm comm = tria.get_communicator();
+    const MPI_Comm comm = tria.get_mpi_communicator();
 
     switch (norm)
       {
index f5fafb1488a592a442a442aef404f9fe134a4a65..a00e151c9d4542d91a46907d36c8d9d714b4d33d 100644 (file)
@@ -1075,7 +1075,7 @@ namespace VectorTools
 
       u.reinit(locally_owned_dofs,
                locally_relevant_dofs,
-               dof_handler.get_communicator());
+               dof_handler.get_mpi_communicator());
     }
 
 
index 676587696fc85f2b468bea309eeccbbdc4392fd8..a87c1c3d5259fbaa1e74325f8fe4f1efefdeb9a4 100644 (file)
@@ -353,7 +353,7 @@ namespace VectorTools
                                        3,
                                        MPI_DOUBLE,
                                        MPI_SUM,
-                                       p_triangulation->get_communicator());
+                                       p_triangulation->get_mpi_communicator());
         AssertThrowMPI(ierr);
 
         internal::set_possibly_complex_number(global_values[0],
index 7671ea7d120cbfab1d766821335bb13fbf29e3d2..3e54f4874fa911221a6ee28df6461c2805fd2369 100644 (file)
@@ -401,7 +401,7 @@ namespace SUNDIALS
 
       template <typename VectorType>
       const MPI_Comm &
-      get_communicator(N_Vector v);
+      get_mpi_communicator(N_Vector v);
 
 #  if DEAL_II_SUNDIALS_VERSION_GTE(7, 0, 0)
       /**
@@ -409,7 +409,7 @@ namespace SUNDIALS
        */
       template <typename VectorType>
       inline SUNComm
-      get_communicator_by_value(N_Vector v);
+      get_mpi_communicator_by_value(N_Vector v);
 #  else
       /**
        * Sundials likes a void* but we want to use the above functions
@@ -417,7 +417,7 @@ namespace SUNDIALS
        */
       template <typename VectorType>
       inline void *
-      get_communicator_as_void_ptr(N_Vector v);
+      get_mpi_communicator_as_void_ptr(N_Vector v);
 #  endif
     } // namespace NVectorOperations
   }   // namespace internal
@@ -726,7 +726,7 @@ namespace SUNDIALS
 
       template <typename VectorType>
       const MPI_Comm &
-      get_communicator(N_Vector v)
+      get_mpi_communicator(N_Vector v)
       {
         Assert(v != nullptr, ExcInternalError());
         Assert(v->content != nullptr, ExcInternalError());
@@ -740,7 +740,7 @@ namespace SUNDIALS
 #  if DEAL_II_SUNDIALS_VERSION_GTE(7, 0, 0)
       template <typename VectorType>
       SUNComm
-      get_communicator_by_value(N_Vector v)
+      get_mpi_communicator_by_value(N_Vector v)
       {
 #    ifndef DEAL_II_WITH_MPI
         (void)v;
@@ -753,7 +753,7 @@ namespace SUNDIALS
           //
           // Further, we need to cast away const here, as SUNDIALS demands the
           // communicator by value.
-          return const_cast<SUNComm>(get_communicator<VectorType>(v));
+          return const_cast<SUNComm>(get_mpi_communicator<VectorType>(v));
         else
           return SUN_COMM_NULL;
 #    endif
@@ -761,7 +761,7 @@ namespace SUNDIALS
 #  else
       template <typename VectorType>
       void *
-      get_communicator_as_void_ptr(N_Vector v)
+      get_mpi_communicator_as_void_ptr(N_Vector v)
       {
 #    ifndef DEAL_II_WITH_MPI
         (void)v;
@@ -770,7 +770,7 @@ namespace SUNDIALS
         if (is_serial_vector<VectorType>::value == false)
           // We need to cast away const here, as SUNDIALS demands a pure
           // `void*`.
-          return &(const_cast<MPI_Comm &>(get_communicator<VectorType>(v)));
+          return &(const_cast<MPI_Comm &>(get_mpi_communicator<VectorType>(v)));
         else
           return nullptr;
 #    endif
@@ -899,7 +899,7 @@ namespace SUNDIALS
       {
         ArrayView<realtype> products(d, nv);
         Utilities::MPI::sum(products,
-                            get_communicator<VectorType>(x),
+                            get_mpi_communicator<VectorType>(x),
                             products);
         return 0;
       }
@@ -1047,7 +1047,7 @@ namespace SUNDIALS
                                                  local_elements.end(),
                                                  indexed_less_than);
         return Utilities::MPI::min((*vector)[local_min],
-                                   get_communicator<VectorType>(x));
+                                   get_mpi_communicator<VectorType>(x));
       }
 
 
@@ -1087,7 +1087,7 @@ namespace SUNDIALS
           }
 
         return Utilities::MPI::min(proc_local_min,
-                                   get_communicator<VectorType>(x));
+                                   get_mpi_communicator<VectorType>(x));
       }
 
 
@@ -1272,10 +1272,10 @@ namespace SUNDIALS
       //  v->ops->nvspace           = undef;
 #  if DEAL_II_SUNDIALS_VERSION_GTE(7, 0, 0)
       v->ops->nvgetcommunicator =
-        &NVectorOperations::get_communicator_by_value<VectorType>;
+        &NVectorOperations::get_mpi_communicator_by_value<VectorType>;
 #  else
       v->ops->nvgetcommunicator =
-        &NVectorOperations::get_communicator_as_void_ptr<VectorType>;
+        &NVectorOperations::get_mpi_communicator_as_void_ptr<VectorType>;
 #  endif
       v->ops->nvgetlength = &NVectorOperations::get_global_length<VectorType>;
 
index 28bde276ccf1cc7d97c5445ef6cdbf35c110e08a..e2cfc9018a8c6d7498ba396457717ea098d5604d 100644 (file)
@@ -186,7 +186,7 @@ namespace Utilities
       const auto n_owning_processes_global =
         Utilities::MPI::all_reduce<std::tuple<unsigned int, unsigned int>>(
           n_owning_processes_local,
-          tria.get_communicator(),
+          tria.get_mpi_communicator(),
           [&](const auto &a,
               const auto &b) -> std::tuple<unsigned int, unsigned int> {
             if (a == n_owning_processes_default)
index 882783d4d74bc71b4e20a0bf7969e507faf91539..5800fae1e41a9ec03679f85de3d8f700d49d569c 100644 (file)
@@ -220,7 +220,7 @@ namespace
     Vector<Number> locally_owned_indicators(n_locally_owned_active_cells(tria));
     get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
 
-    MPI_Comm mpi_communicator = tria.get_communicator();
+    MPI_Comm mpi_communicator = tria.get_mpi_communicator();
 
     // figure out the global max and min of the indicators. we don't need it
     // here, but it's a collective communication call
@@ -532,7 +532,7 @@ namespace parallel
           n_locally_owned_active_cells(tria));
         get_locally_owned_indicators(tria, criteria, locally_owned_indicators);
 
-        MPI_Comm mpi_communicator = tria.get_communicator();
+        MPI_Comm mpi_communicator = tria.get_mpi_communicator();
 
         // figure out the global max and min of the indicators. we don't need it
         // here, but it's a collective communication call
index 6f9abad64ab74d538db95c1f4a4d5d0a3b3445d7..1377c7d1a45f82703c069862b1ec0feb4eb197d0 100644 (file)
@@ -71,7 +71,7 @@ namespace RepartitioningPolicyTools
     return {};
 #else
 
-    const auto comm = tria->get_communicator();
+    const auto comm = tria->get_mpi_communicator();
 
     const unsigned int process_has_active_locally_owned_cells =
       tria->n_locally_owned_active_cells() > 0;
@@ -134,7 +134,7 @@ namespace RepartitioningPolicyTools
   FirstChildPolicy<dim, spacedim>::partition(
     const Triangulation<dim, spacedim> &tria_coarse_in) const
   {
-    const auto communicator = tria_coarse_in.get_communicator();
+    const auto communicator = tria_coarse_in.get_mpi_communicator();
 
     const internal::CellIDTranslator<dim> cell_id_translator(n_coarse_cells,
                                                              n_global_levels);
@@ -212,7 +212,7 @@ namespace RepartitioningPolicyTools
                       tria_in.end()),
                     [](const auto &cell) { return cell.is_locally_owned(); });
 
-    const auto comm = tria_in.get_communicator();
+    const auto comm = tria_in.get_mpi_communicator();
 
     if (Utilities::MPI::min(n_locally_owned_active_cells, comm) >= n_min_cells)
       return {}; // all processes have enough cells
@@ -289,7 +289,7 @@ namespace RepartitioningPolicyTools
 
     std::vector<unsigned int> weights(partitioner->locally_owned_size());
 
-    const auto mpi_communicator = tria_in.get_communicator();
+    const auto mpi_communicator = tria_in.get_mpi_communicator();
     const auto n_subdomains = Utilities::MPI::n_mpi_processes(mpi_communicator);
 
     // determine weight of each cell
@@ -307,7 +307,7 @@ namespace RepartitioningPolicyTools
     // weight
     const auto [process_local_weight_offset, total_weight] =
       Utilities::MPI::partial_and_total_sum(process_local_weight,
-                                            tria->get_communicator());
+                                            tria->get_mpi_communicator());
 
     // set up partition
     LinearAlgebra::distributed::Vector<double> partition(partitioner);
index b9ce044f70a72ead9d465151864fd6afdacf0f25..f3bc4f3246f99ea4c21c5a1cb14f22befd2980bd 100644 (file)
@@ -90,7 +90,8 @@ namespace parallel
       // Check that all meshes are the same (or at least have the same
       // total number of active cells):
       const unsigned int max_active_cells =
-        Utilities::MPI::max(this->n_active_cells(), this->get_communicator());
+        Utilities::MPI::max(this->n_active_cells(),
+                            this->get_mpi_communicator());
       Assert(
         max_active_cells == this->n_active_cells(),
         ExcMessage(
@@ -293,7 +294,7 @@ namespace parallel
           [](const auto &i) { return (i.is_locally_owned()); });
 
         const unsigned int total_cells =
-          Utilities::MPI::sum(n_my_cells, this->get_communicator());
+          Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
         Assert(total_cells == this->n_active_cells(),
                ExcMessage("Not all cells are assigned to a processor."));
       }
@@ -309,7 +310,7 @@ namespace parallel
 
 
           const unsigned int total_cells =
-            Utilities::MPI::sum(n_my_cells, this->get_communicator());
+            Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
           Assert(total_cells == this->n_cells(),
                  ExcMessage("Not all cells are assigned to a processor."));
         }
@@ -387,7 +388,7 @@ namespace parallel
             }
 
         Utilities::MPI::max(refinement_configurations,
-                            this->get_communicator(),
+                            this->get_mpi_communicator(),
                             refinement_configurations);
 
         for (const auto &cell : this->active_cell_iterators())
index 27f27e1a53625e7f05d3f0a07c23b9f99d91cfe5..c08c96fda40ab9841bcd0092c86a7bc91eba2cc7 100644 (file)
@@ -3447,7 +3447,7 @@ namespace parallel
             this->local_cell_relations,
             this->cell_attached_data.pack_callbacks_fixed,
             this->cell_attached_data.pack_callbacks_variable,
-            this->get_communicator());
+            this->get_mpi_communicator());
         }
 
       // finally copy back from local part of tree to deal.II
@@ -3621,7 +3621,7 @@ namespace parallel
             this->local_cell_relations,
             this->cell_attached_data.pack_callbacks_fixed,
             this->cell_attached_data.pack_callbacks_variable,
-            this->get_communicator());
+            this->get_mpi_communicator());
         }
 
       try
index e044ba36d1c6f3c21661a41c31278d891a2cfdf0..821ebbd25f88822f4a7f257d21ad89924f7e93be 100644 (file)
@@ -157,7 +157,7 @@ namespace parallel
 
   template <int dim, int spacedim>
   DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
-  MPI_Comm TriangulationBase<dim, spacedim>::get_communicator() const
+  MPI_Comm TriangulationBase<dim, spacedim>::get_mpi_communicator() const
   {
     return mpi_communicator;
   }
index 8c2baebda20323e45bc162f02b6e40e2254c7466..e01d299e4a315344575fabea4f7ad3b42251f9ab 100644 (file)
@@ -1292,7 +1292,7 @@ namespace internal
                     cell->active_fe_index();
 
               Utilities::MPI::sum(active_fe_indices,
-                                  tr->get_communicator(),
+                                  tr->get_mpi_communicator(),
                                   active_fe_indices);
 
               // now go back and fill the active FE index on all other
@@ -1393,7 +1393,7 @@ namespace internal
                     .hp_cell_future_fe_indices[cell->level()][cell->index()];
 
               Utilities::MPI::sum(future_fe_indices,
-                                  tr->get_communicator(),
+                                  tr->get_mpi_communicator(),
                                   future_fe_indices);
 
               for (const auto &cell : dof_handler.active_cell_iterators())
index e225119128d029a03bf66f55065db7c53a725e24..d1e4204f638484d873bef0c9bce0d5865dc2d8f9 100644 (file)
@@ -2889,7 +2889,7 @@ namespace internal
         Assert(tr != nullptr, ExcInternalError());
 
         const unsigned int n_procs =
-          Utilities::MPI::n_mpi_processes(tr->get_communicator());
+          Utilities::MPI::n_mpi_processes(tr->get_mpi_communicator());
 
         // If an underlying shared::Tria allows artificial cells, we need to
         // restore the true cell owners temporarily.
@@ -3040,7 +3040,7 @@ namespace internal
                       "is set in the constructor."));
 
         const unsigned int n_procs =
-          Utilities::MPI::n_mpi_processes(tr->get_communicator());
+          Utilities::MPI::n_mpi_processes(tr->get_mpi_communicator());
         const unsigned int n_levels = tr->n_global_levels();
 
         std::vector<NumberCache> number_caches;
@@ -3252,7 +3252,7 @@ namespace internal
         Utilities::MPI::internal::all_reduce<bool>(
           MPI_LAND,
           ArrayView<const bool>(&uses_sequential_numbering, 1),
-          tr->get_communicator(),
+          tr->get_mpi_communicator(),
           ArrayView<bool>(&all_use_sequential_numbering, 1));
         if (all_use_sequential_numbering)
           {
@@ -3264,10 +3264,11 @@ namespace internal
                      this->dof_handler->locally_owned_dofs().n_elements(),
                    ExcInternalError());
             const unsigned int n_cpu =
-              Utilities::MPI::n_mpi_processes(tr->get_communicator());
+              Utilities::MPI::n_mpi_processes(tr->get_mpi_communicator());
             std::vector<types::global_dof_index> gathered_new_numbers(
               this->dof_handler->n_dofs(), 0);
-            Assert(Utilities::MPI::this_mpi_process(tr->get_communicator()) ==
+            Assert(Utilities::MPI::this_mpi_process(
+                     tr->get_mpi_communicator()) ==
                      this->dof_handler->get_triangulation()
                        .locally_owned_subdomain(),
                    ExcInternalError());
@@ -3290,7 +3291,7 @@ namespace internal
                                        rcounts.data(),
                                        1,
                                        MPI_INT,
-                                       tr->get_communicator());
+                                       tr->get_mpi_communicator());
               AssertThrowMPI(ierr);
 
               // compute the displacements (relative to recvbuf)
@@ -3304,7 +3305,7 @@ namespace internal
               Assert(new_numbers_copy.size() ==
                        static_cast<unsigned int>(
                          rcounts[Utilities::MPI::this_mpi_process(
-                           tr->get_communicator())]),
+                           tr->get_mpi_communicator())]),
                      ExcInternalError());
               ierr = MPI_Allgatherv(new_numbers_copy.data(),
                                     new_numbers_copy.size(),
@@ -3313,7 +3314,7 @@ namespace internal
                                     rcounts.data(),
                                     displacements.data(),
                                     DEAL_II_DOF_INDEX_MPI_TYPE,
-                                    tr->get_communicator());
+                                    tr->get_mpi_communicator());
               AssertThrowMPI(ierr);
             }
 
@@ -3327,7 +3328,7 @@ namespace internal
             std::vector<unsigned int> flag_2(this->dof_handler->n_dofs(), 0);
             std::vector<IndexSet>     locally_owned_dofs_per_processor =
               Utilities::MPI::all_gather(
-                tr->get_communicator(),
+                tr->get_mpi_communicator(),
                 this->dof_handler->locally_owned_dofs());
             for (unsigned int i = 0; i < n_cpu; ++i)
               {
@@ -3695,7 +3696,7 @@ namespace internal
         //                    range of indices
         const auto [my_shift, n_global_dofs] =
           Utilities::MPI::partial_and_total_sum(
-            n_locally_owned_dofs, triangulation->get_communicator());
+            n_locally_owned_dofs, triangulation->get_mpi_communicator());
 
 
         // make dof indices globally consecutive
@@ -3892,7 +3893,7 @@ namespace internal
             const auto [my_shift, n_global_dofs] =
               Utilities::MPI::partial_and_total_sum(
                 level_number_cache.n_locally_owned_dofs,
-                triangulation->get_communicator());
+                triangulation->get_mpi_communicator());
             level_number_cache.n_global_dofs = n_global_dofs;
 
             // assign appropriate indices
index 9733dbecdb4390fdce082544ba1ac4ef38aaeb28..77be552aa86aa26ed135c7ac3372ca8de719ca2c 100644 (file)
@@ -680,7 +680,7 @@ namespace DoFRenumbering
     // If we don't have a renumbering (i.e., when there is 1 component) then
     // return
     if (Utilities::MPI::max(renumbering.size(),
-                            dof_handler.get_communicator()) == 0)
+                            dof_handler.get_mpi_communicator()) == 0)
       return;
 
     // verify that the last numbered
@@ -726,7 +726,7 @@ namespace DoFRenumbering
     // If we don't have a renumbering (i.e., when there is 1 component) then
     // return
     if (Utilities::MPI::max(renumbering.size(),
-                            dof_handler.get_communicator()) == 0)
+                            dof_handler.get_mpi_communicator()) == 0)
       return;
 
     // verify that the last numbered
@@ -936,12 +936,12 @@ namespace DoFRenumbering
                                     n_buckets,
                                     DEAL_II_DOF_INDEX_MPI_TYPE,
                                     MPI_SUM,
-                                    tria->get_communicator());
+                                    tria->get_mpi_communicator());
         AssertThrowMPI(ierr);
 
         std::vector<types::global_dof_index> global_dof_count(n_buckets);
         Utilities::MPI::sum(local_dof_count,
-                            tria->get_communicator(),
+                            tria->get_mpi_communicator(),
                             global_dof_count);
 
         // calculate shifts
@@ -1055,7 +1055,7 @@ namespace DoFRenumbering
            ExcInternalError());
 
     if (Utilities::MPI::max(renumbering.size(),
-                            dof_handler.get_communicator()) > 0)
+                            dof_handler.get_mpi_communicator()) > 0)
       dof_handler.renumber_dofs(level, renumbering);
   }
 
@@ -1197,12 +1197,12 @@ namespace DoFRenumbering
                                     n_buckets,
                                     DEAL_II_DOF_INDEX_MPI_TYPE,
                                     MPI_SUM,
-                                    tria->get_communicator());
+                                    tria->get_mpi_communicator());
         AssertThrowMPI(ierr);
 
         std::vector<types::global_dof_index> global_dof_count(n_buckets);
         Utilities::MPI::sum(local_dof_count,
-                            tria->get_communicator(),
+                            tria->get_mpi_communicator(),
                             global_dof_count);
 
         // calculate shifts
@@ -1389,7 +1389,7 @@ namespace DoFRenumbering
                                     1,
                                     DEAL_II_DOF_INDEX_MPI_TYPE,
                                     MPI_SUM,
-                                    tria->get_communicator());
+                                    tria->get_mpi_communicator());
         AssertThrowMPI(ierr);
 #endif
       }
@@ -2262,7 +2262,7 @@ namespace DoFRenumbering
     // If there is only one component then there is nothing to do, so check
     // first:
     if (Utilities::MPI::max(renumbering.size(),
-                            dof_handler.get_communicator()) > 0)
+                            dof_handler.get_mpi_communicator()) > 0)
       dof_handler.renumber_dofs(renumbering);
   }
 
index 43f51b7c9f7f7e93b5b3bd51b7776b3aa3e73099..9abba66a669ccd4b1a13f31f2d6eb44e43614b3e 100644 (file)
@@ -1557,7 +1557,7 @@ namespace DoFTools
          Utilities::MPI::n_mpi_processes(
            dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
              &dof_handler.get_triangulation())
-             ->get_communicator()));
+             ->get_mpi_communicator()));
     Assert(n_subdomains > *std::max_element(subdomain_association.begin(),
                                             subdomain_association.end()),
            ExcInternalError());
@@ -2089,7 +2089,7 @@ namespace DoFTools
                                        n_target_components,
                                        DEAL_II_DOF_INDEX_MPI_TYPE,
                                        MPI_SUM,
-                                       tria->get_communicator());
+                                       tria->get_mpi_communicator());
         AssertThrowMPI(ierr);
       }
 #endif
@@ -2177,7 +2177,7 @@ namespace DoFTools
                                            n_target_blocks,
                                            DEAL_II_DOF_INDEX_MPI_TYPE,
                                            MPI_SUM,
-                                           tria->get_communicator());
+                                           tria->get_mpi_communicator());
             AssertThrowMPI(ierr);
           }
 #endif
index e32fcd5e3fc91c4dfc0c994fc2342ee81ba525ec..2a2fb00456cb919f84414fee2125750cd6a63234 100644 (file)
@@ -4135,7 +4135,7 @@ namespace DoFTools
                                          TriangulationBase<dim, spacedim> &>(
                     coarse_to_fine_grid_map.get_destination_grid()
                       .get_triangulation());
-                communicator          = tria.get_communicator();
+                communicator          = tria.get_mpi_communicator();
                 is_called_in_parallel = true;
               }
             catch (std::bad_cast &)
index 47f12ae8423b5e47447b4711b325623111e6c108..750a9602d12c2cfff61fa912ac8dd2895668274b 100644 (file)
@@ -317,7 +317,7 @@ MappingQCache<dim, spacedim>::initialize(
     DoFTools::extract_locally_relevant_dofs(dof_handler);
   vector_ghosted.reinit(dof_handler.locally_owned_dofs(),
                         locally_relevant_dofs,
-                        dof_handler.get_communicator());
+                        dof_handler.get_mpi_communicator());
   copy_locally_owned_data_from(vector, vector_ghosted);
   vector_ghosted.update_ghost_values();
 
@@ -521,7 +521,7 @@ MappingQCache<dim, spacedim>::initialize(
         DoFTools::extract_locally_relevant_level_dofs(dof_handler, l);
       vectors_ghosted[l].reinit(dof_handler.locally_owned_mg_dofs(l),
                                 locally_relevant_dofs,
-                                dof_handler.get_communicator());
+                                dof_handler.get_mpi_communicator());
       copy_locally_owned_data_from(vectors[l], vectors_ghosted[l]);
       vectors_ghosted[l].update_ghost_values();
     }
index d30eec1f0cda02d7015736d0bb4aef7775727a0f..a50cab110c16bd54837825b9afdc8ba7ca39732b 100644 (file)
@@ -3683,7 +3683,7 @@ GridOut::write_mesh_per_processor_as_vtu(
           else
             pos += 1;
           const unsigned int n_procs =
-            Utilities::MPI::n_mpi_processes(tr->get_communicator());
+            Utilities::MPI::n_mpi_processes(tr->get_mpi_communicator());
           for (unsigned int i = 0; i < n_procs; ++i)
             filenames.push_back(filename_without_extension.substr(pos) +
                                 ".proc" + Utilities::int_to_string(i, 4) +
index 92fdf88df594dfe6086d6d9257cbe095e0e1f1a1..596a10f63912cb92c843e3ae95c5a0597055a3a6 100644 (file)
@@ -1525,8 +1525,8 @@ namespace GridTools
       }
 
     // Get the size of the largest CellID string
-    max_cellid_size =
-      Utilities::MPI::max(max_cellid_size, triangulation.get_communicator());
+    max_cellid_size = Utilities::MPI::max(max_cellid_size,
+                                          triangulation.get_mpi_communicator());
 
     // Make indices global by getting the number of vertices owned by each
     // processors and shifting the indices accordingly
@@ -1536,7 +1536,7 @@ namespace GridTools
                           1,
                           DEAL_II_VERTEX_INDEX_MPI_TYPE,
                           MPI_SUM,
-                          triangulation.get_communicator());
+                          triangulation.get_mpi_communicator());
     AssertThrowMPI(ierr);
 
     for (auto &global_index_it : local_to_global_vertex_index)
@@ -1587,7 +1587,7 @@ namespace GridTools
                          DEAL_II_VERTEX_INDEX_MPI_TYPE,
                          destination,
                          mpi_tag,
-                         triangulation.get_communicator(),
+                         triangulation.get_mpi_communicator(),
                          &first_requests[i]);
         AssertThrowMPI(ierr);
       }
@@ -1612,7 +1612,7 @@ namespace GridTools
                         DEAL_II_VERTEX_INDEX_MPI_TYPE,
                         source,
                         mpi_tag,
-                        triangulation.get_communicator(),
+                        triangulation.get_mpi_communicator(),
                         MPI_STATUS_IGNORE);
         AssertThrowMPI(ierr);
       }
@@ -1658,7 +1658,7 @@ namespace GridTools
                          MPI_CHAR,
                          destination,
                          mpi_tag2,
-                         triangulation.get_communicator(),
+                         triangulation.get_mpi_communicator(),
                          &second_requests[i]);
         AssertThrowMPI(ierr);
       }
@@ -1681,7 +1681,7 @@ namespace GridTools
                         MPI_CHAR,
                         source,
                         mpi_tag2,
-                        triangulation.get_communicator(),
+                        triangulation.get_mpi_communicator(),
                         MPI_STATUS_IGNORE);
         AssertThrowMPI(ierr);
       }
@@ -1780,7 +1780,7 @@ namespace GridTools
               dynamic_cast<parallel::shared::Triangulation<dim, spacedim> *>(
                 &triangulation))
           Utilities::MPI::sum(cell_weights,
-                              shared_tria->get_communicator(),
+                              shared_tria->get_mpi_communicator(),
                               cell_weights);
 
         // verify that the global sum of weights is larger than 0
@@ -1880,7 +1880,7 @@ namespace GridTools
               dynamic_cast<parallel::shared::Triangulation<dim, spacedim> *>(
                 &triangulation))
           Utilities::MPI::sum(cell_weights,
-                              shared_tria->get_communicator(),
+                              shared_tria->get_mpi_communicator(),
                               cell_weights);
 
         // verify that the global sum of weights is larger than 0
@@ -2161,7 +2161,7 @@ namespace GridTools
                 cell_id.get_coarse_cell_id(),
                 &p4est_cell,
                 Utilities::MPI::this_mpi_process(
-                  triangulation.get_communicator()));
+                  triangulation.get_mpi_communicator()));
 
             Assert(owner >= 0, ExcMessage("p4est should know the owner."));
 
@@ -3745,7 +3745,7 @@ namespace GridTools
         &cache.get_locally_owned_cell_bounding_boxes_rtree());
 
       const unsigned int my_rank = Utilities::MPI::this_mpi_process(
-        cache.get_triangulation().get_communicator());
+        cache.get_triangulation().get_mpi_communicator());
 
       cell_hint = first_cell.first;
       if (cell_hint.state() == IteratorState::valid)
@@ -3934,7 +3934,7 @@ namespace GridTools
       auto &send_components = result.send_components;
       auto &recv_components = result.recv_components;
 
-      const auto comm = cache.get_triangulation().get_communicator();
+      const auto comm = cache.get_triangulation().get_mpi_communicator();
 
       const auto potential_owners = internal::guess_owners_of_entities(
         comm, global_bboxes, points, tolerance);
@@ -4155,7 +4155,7 @@ namespace GridTools
 
           // indices assigned at recv side needed to fill send_components
           indices_of_rank = communicate_indices(result.recv_components,
-                                                tria.get_communicator());
+                                                tria.get_mpi_communicator());
         }
 
       for (const auto &send_component : send_components)
@@ -4324,7 +4324,7 @@ namespace GridTools
           structdim,
           spacedim>::IntersectionType;
 
-      const auto comm = cache.get_triangulation().get_communicator();
+      const auto comm = cache.get_triangulation().get_mpi_communicator();
 
       DistributedComputeIntersectionLocationsInternal<structdim, spacedim>
         result;
index a09c18f634361be02b89a2f1d338074721db3a26..70095d85d93ccffa79572a5d32ccfe7a9a1a27a0 100644 (file)
@@ -286,7 +286,7 @@ namespace GridTools
                 &(*tria)))
           {
             covering_rtree[level] = GridTools::build_global_description_tree(
-              boxes, tria_mpi->get_communicator());
+              boxes, tria_mpi->get_mpi_communicator());
           }
         else
           {
index aa003d01b7acef098c9930c8a6821771b448238f..183ff6c3dc88053df331a0f8f3f872906b71a38d 100644 (file)
@@ -150,7 +150,7 @@ namespace GridTools
         }
 
     const double global_volume =
-      Utilities::MPI::sum(local_volume, triangulation.get_communicator());
+      Utilities::MPI::sum(local_volume, triangulation.get_mpi_communicator());
 
     return global_volume;
   }
@@ -413,7 +413,7 @@ namespace GridTools
         min_diameter = std::min(min_diameter, cell->diameter(mapping));
 
     const double global_min_diameter =
-      Utilities::MPI::min(min_diameter, triangulation.get_communicator());
+      Utilities::MPI::min(min_diameter, triangulation.get_mpi_communicator());
     return global_min_diameter;
   }
 
@@ -430,7 +430,7 @@ namespace GridTools
         max_diameter = std::max(max_diameter, cell->diameter(mapping));
 
     const double global_max_diameter =
-      Utilities::MPI::max(max_diameter, triangulation.get_communicator());
+      Utilities::MPI::max(max_diameter, triangulation.get_mpi_communicator());
     return global_max_diameter;
   }
 } /* namespace GridTools */
index 09c2c41140af50564744f49b35be18297f95e195..ceb0704c1add2a96a15dbba175bd873e31ab659f 100644 (file)
@@ -12135,13 +12135,22 @@ void Triangulation<dim, spacedim>::clear()
 
 template <int dim, int spacedim>
 DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
-MPI_Comm Triangulation<dim, spacedim>::get_communicator() const
+MPI_Comm Triangulation<dim, spacedim>::get_mpi_communicator() const
 {
   return MPI_COMM_SELF;
 }
 
 
 
+template <int dim, int spacedim>
+DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
+MPI_Comm Triangulation<dim, spacedim>::get_communicator() const
+{
+  return get_mpi_communicator();
+}
+
+
+
 template <int dim, int spacedim>
 DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
 std::weak_ptr<const Utilities::MPI::Partitioner> Triangulation<dim, spacedim>::
@@ -15760,7 +15769,7 @@ void Triangulation<dim, spacedim>::pack_data_serial()
         this->local_cell_relations,
         this->cell_attached_data.pack_callbacks_fixed,
         this->cell_attached_data.pack_callbacks_variable,
-        this->get_communicator());
+        this->get_mpi_communicator());
 
       // dummy copy of data
       this->data_serializer.dest_data_fixed =
@@ -16252,13 +16261,13 @@ void Triangulation<dim, spacedim>::save_attached_data(
         tria->local_cell_relations,
         tria->cell_attached_data.pack_callbacks_fixed,
         tria->cell_attached_data.pack_callbacks_variable,
-        this->get_communicator());
+        this->get_mpi_communicator());
 
       // then store buffers in file
       tria->data_serializer.save(global_first_cell,
                                  global_num_cells,
                                  file_basename,
-                                 this->get_communicator());
+                                 this->get_mpi_communicator());
 
       // and release the memory afterwards
       tria->data_serializer.clear();
@@ -16293,7 +16302,7 @@ void Triangulation<dim, spacedim>::load_attached_data(
                                  file_basename,
                                  n_attached_deserialize_fixed,
                                  n_attached_deserialize_variable,
-                                 this->get_communicator());
+                                 this->get_mpi_communicator());
 
       this->data_serializer.unpack_cell_status(this->local_cell_relations);
 
index d1f607d2d558c36abb51d94f22d02b6e85c0cd1c..0954b5ea3a5db5c5db12c4e7544aa4cfd43367e2 100644 (file)
@@ -724,7 +724,7 @@ namespace TriangulationDescription
             dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
               &tria))
         {
-          Assert(comm == ptria->get_communicator(),
+          Assert(comm == ptria->get_mpi_communicator(),
                  ExcMessage("MPI communicators do not match."));
           Assert(my_rank_in == numbers::invalid_unsigned_int ||
                    my_rank_in == dealii::Utilities::MPI::this_mpi_process(comm),
@@ -1013,16 +1013,15 @@ namespace TriangulationDescription
       const TriangulationDescription::Settings settings_in)
     {
 #ifdef DEAL_II_WITH_MPI
-      if (tria.get_communicator() == MPI_COMM_NULL)
+      if (tria.get_mpi_communicator() == MPI_COMM_NULL)
         AssertDimension(partition.locally_owned_size(), 0);
 #endif
 
       if (partition.size() == 0)
         {
           AssertDimension(partitions_mg.size(), 0);
-          return create_description_from_triangulation(tria,
-                                                       tria.get_communicator(),
-                                                       settings_in);
+          return create_description_from_triangulation(
+            tria, tria.get_mpi_communicator(), settings_in);
         }
 
       // Update partitioner ghost elements because we will later want
@@ -1141,7 +1140,7 @@ namespace TriangulationDescription
             mg_cell_to_future_owner,
             coinciding_vertex_groups,
             vertex_to_coinciding_vertex_group,
-            tria.get_communicator(),
+            tria.get_mpi_communicator(),
             rank,
             settings));
 
index b11a2ca88484186353f5a97e2182e2d40bc3fced..8e57fd28dafce29a9691660d73f544c7a1d6c2ee 100644 (file)
@@ -208,16 +208,16 @@ namespace hp
         {
           max_criterion_refine =
             Utilities::MPI::max(max_criterion_refine,
-                                parallel_tria->get_communicator());
+                                parallel_tria->get_mpi_communicator());
           min_criterion_refine =
             Utilities::MPI::min(min_criterion_refine,
-                                parallel_tria->get_communicator());
+                                parallel_tria->get_mpi_communicator());
           max_criterion_coarsen =
             Utilities::MPI::max(max_criterion_coarsen,
-                                parallel_tria->get_communicator());
+                                parallel_tria->get_mpi_communicator());
           min_criterion_coarsen =
             Utilities::MPI::min(min_criterion_coarsen,
-                                parallel_tria->get_communicator());
+                                parallel_tria->get_mpi_communicator());
         }
 
       // Absent any better strategies, we will set the threshold by linear
@@ -332,7 +332,7 @@ namespace hp
           // parallel implementation with distributed memory
           //
 
-          MPI_Comm mpi_communicator = parallel_tria->get_communicator();
+          MPI_Comm mpi_communicator = parallel_tria->get_mpi_communicator();
 
           // 2.) Communicate the number of cells scheduled for p-adaptation
           //     globally.
@@ -1038,7 +1038,7 @@ namespace hp
 
           levels_changed_in_cycle =
             Utilities::MPI::logical_or(levels_changed_in_cycle,
-                                       dof_handler.get_communicator());
+                                       dof_handler.get_mpi_communicator());
           levels_changed |= levels_changed_in_cycle;
         }
       while (levels_changed_in_cycle);
index 15539f892a2c74cdeb11b5267063d0edd91c6edf..429561c9dcc6cebee8c999d37425162a6b93cd4e 100644 (file)
@@ -85,8 +85,9 @@ MGLevelGlobalTransfer<VectorType>::fill_and_communicate_copy_indices(
   if (const parallel::TriangulationBase<dim, spacedim> *ptria =
         dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
           &mg_dof.get_triangulation()))
-    perform_plain_copy = (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
-                                              ptria->get_communicator()) == 1);
+    perform_plain_copy =
+      (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
+                           ptria->get_mpi_communicator()) == 1);
   else
     perform_plain_copy = my_perform_plain_copy;
 }
@@ -280,7 +281,7 @@ void
 MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>::
   fill_and_communicate_copy_indices(const DoFHandler<dim, spacedim> &mg_dof)
 {
-  const MPI_Comm mpi_communicator = mg_dof.get_communicator();
+  const MPI_Comm mpi_communicator = mg_dof.get_mpi_communicator();
 
   fill_internal(mg_dof,
                 mg_constrained_dofs,
index a6ea67bf92da0f49c22b8db84d667a0b85cf3510..62ecfa943e699a486190d15802a88c13c746c3e3 100644 (file)
@@ -1548,7 +1548,7 @@ namespace MGTools
     if (const parallel::TriangulationBase<dim, spacedim> *tr =
           dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
             &tria))
-      global_min = Utilities::MPI::min(min_level, tr->get_communicator());
+      global_min = Utilities::MPI::min(min_level, tr->get_mpi_communicator());
 
     AssertIndexRange(global_min, tria.n_global_levels());
 
@@ -1674,7 +1674,7 @@ namespace MGTools
   workload_imbalance(const Triangulation<dim, spacedim> &tria)
   {
     return internal::workload_imbalance(local_workload(tria),
-                                        tria.get_communicator());
+                                        tria.get_mpi_communicator());
   }
 
 
@@ -1686,7 +1686,7 @@ namespace MGTools
       &trias)
   {
     return internal::workload_imbalance(local_workload(trias),
-                                        trias.back()->get_communicator());
+                                        trias.back()->get_mpi_communicator());
   }
 
 
@@ -1700,7 +1700,7 @@ namespace MGTools
     std::vector<std::pair<types::global_dof_index, types::global_dof_index>>
       cells(n_global_levels);
 
-    const MPI_Comm communicator = tria.get_communicator();
+    const MPI_Comm communicator = tria.get_mpi_communicator();
 
     const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
 
@@ -1736,7 +1736,7 @@ namespace MGTools
     std::vector<std::pair<types::global_dof_index, types::global_dof_index>>
       cells(n_global_levels);
 
-    const MPI_Comm communicator = trias.back()->get_communicator();
+    const MPI_Comm communicator = trias.back()->get_mpi_communicator();
 
     const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
 
@@ -1805,7 +1805,7 @@ namespace MGTools
   vertical_communication_efficiency(const Triangulation<dim, spacedim> &tria)
   {
     return internal::vertical_communication_efficiency(
-      local_vertical_communication_cost(tria), tria.get_communicator());
+      local_vertical_communication_cost(tria), tria.get_mpi_communicator());
   }
 
 
@@ -1818,7 +1818,7 @@ namespace MGTools
   {
     return internal::vertical_communication_efficiency(
       local_vertical_communication_cost(trias),
-      trias.back()->get_communicator());
+      trias.back()->get_mpi_communicator());
   }
 
 } // namespace MGTools
index e64d722db54f0f039e30419b5f524b52bc37dc5a..e9bd7d7840aadd371888bc438e20be803394d171 100644 (file)
@@ -207,7 +207,7 @@ namespace internal
 
 #ifdef DEAL_II_WITH_MPI
       if (tria && Utilities::MPI::sum(send_data_temp.size(),
-                                      tria->get_communicator()) > 0)
+                                      tria->get_mpi_communicator()) > 0)
         {
           const std::set<types::subdomain_id> &neighbors =
             tria->level_ghost_owners();
@@ -262,10 +262,8 @@ namespace internal
               AssertThrow(level_dof_indices.size() == is_ghost.n_elements(),
                           ExcMessage("Size does not match!"));
 
-              const auto index_owner =
-                Utilities::MPI::compute_index_owner(owned_level_dofs,
-                                                    is_ghost,
-                                                    tria->get_communicator());
+              const auto index_owner = Utilities::MPI::compute_index_owner(
+                owned_level_dofs, is_ghost, tria->get_mpi_communicator());
 
               AssertThrow(level_dof_indices.size() == index_owner.size(),
                           ExcMessage("Size does not match!"));
@@ -280,7 +278,7 @@ namespace internal
           // Protect the send/recv logic with a mutex:
           static Utilities::MPI::CollectiveMutex      mutex;
           Utilities::MPI::CollectiveMutex::ScopedLock lock(
-            mutex, tria->get_communicator());
+            mutex, tria->get_mpi_communicator());
 
           const int mpi_tag =
             Utilities::MPI::internal::Tags::mg_transfer_fill_copy_indices;
@@ -299,7 +297,7 @@ namespace internal
                             MPI_BYTE,
                             dest,
                             mpi_tag,
-                            tria->get_communicator(),
+                            tria->get_mpi_communicator(),
                             &*requests.rbegin());
                 AssertThrowMPI(ierr);
               }
@@ -315,7 +313,7 @@ namespace internal
                 MPI_Status status;
                 int        ierr = MPI_Probe(MPI_ANY_SOURCE,
                                      mpi_tag,
-                                     tria->get_communicator(),
+                                     tria->get_mpi_communicator(),
                                      &status);
                 AssertThrowMPI(ierr);
                 int len;
@@ -329,7 +327,7 @@ namespace internal
                                     MPI_BYTE,
                                     status.MPI_SOURCE,
                                     status.MPI_TAG,
-                                    tria->get_communicator(),
+                                    tria->get_mpi_communicator(),
                                     &status);
                     AssertThrowMPI(ierr);
                     continue;
@@ -346,7 +344,7 @@ namespace internal
                                 MPI_BYTE,
                                 status.MPI_SOURCE,
                                 status.MPI_TAG,
-                                tria->get_communicator(),
+                                tria->get_mpi_communicator(),
                                 &status);
                 AssertThrowMPI(ierr);
 
@@ -371,7 +369,7 @@ namespace internal
           // Make sure in debug mode, that everybody sent/received all packages
           // on this level. If a deadlock occurs here, the list of expected
           // senders is not computed correctly.
-          const int ierr = MPI_Barrier(tria->get_communicator());
+          const int ierr = MPI_Barrier(tria->get_mpi_communicator());
           AssertThrowMPI(ierr);
 #  endif
         }
@@ -923,7 +921,7 @@ namespace internal
                                    external_partitioners.empty() ?
                                      nullptr :
                                      external_partitioners[level],
-                                   tria.get_communicator(),
+                                   tria.get_mpi_communicator(),
                                    target_partitioners[level],
                                    copy_indices_global_mine[level]);
 
@@ -942,7 +940,7 @@ namespace internal
                                        external_partitioners.empty() ?
                                          nullptr :
                                          external_partitioners[0],
-                                       tria.get_communicator(),
+                                       tria.get_mpi_communicator(),
                                        target_partitioners[0],
                                        copy_indices_global_mine[0]);
 
index d4e54b37d891091fc0cecd2a2bee355ab183fa44..6c0e7252e0d94fdae393dce37d453ca7854e51a6 100644 (file)
@@ -268,7 +268,7 @@ MGTransferPrebuilt<VectorType>::build(
           ::dealii::SparsityTools::distribute_sparsity_pattern(
             dsp,
             dof_handler.locally_owned_mg_dofs(level + 1),
-            dof_handler.get_communicator(),
+            dof_handler.get_mpi_communicator(),
             dsp.row_index_set());
         }
 #endif
index 35d23c3fbe93fb598eece786e711ea6c854d131c..77e7380bcf32e57442419fc01433e1d3ed912f3e 100644 (file)
@@ -204,7 +204,7 @@ namespace Particles
                                       1,
                                       DEAL_II_PARTICLE_INDEX_MPI_TYPE,
                                       MPI_SUM,
-                                      tria->get_communicator());
+                                      tria->get_mpi_communicator());
           AssertThrowMPI(ierr);
         }
 #endif
@@ -287,7 +287,7 @@ namespace Particles
               &triangulation))
         {
           const unsigned int my_rank =
-            Utilities::MPI::this_mpi_process(tria->get_communicator());
+            Utilities::MPI::this_mpi_process(tria->get_mpi_communicator());
           combined_seed += my_rank;
         }
       std::mt19937 random_number_generator(combined_seed);
@@ -320,8 +320,8 @@ namespace Particles
                 &triangulation))
           {
             std::tie(local_start_weight, global_weight_integral) =
-              Utilities::MPI::partial_and_total_sum(local_weight_integral,
-                                                    tria->get_communicator());
+              Utilities::MPI::partial_and_total_sum(
+                local_weight_integral, tria->get_mpi_communicator());
           }
         else
           {
index d250df29f6fc67fe2e007deaf6991a677d7a0b98..149dfc699bff3411b1dedc5733bd961907eb2529 100644 (file)
@@ -386,7 +386,7 @@ namespace Particles
 
     global_number_of_particles =
       dealii::Utilities::MPI::sum(number_of_locally_owned_particles,
-                                  triangulation->get_communicator());
+                                  triangulation->get_mpi_communicator());
 
     if (global_number_of_particles == 0)
       {
@@ -395,7 +395,9 @@ namespace Particles
       }
     else
       {
-        Utilities::MPI::max(result, triangulation->get_communicator(), result);
+        Utilities::MPI::max(result,
+                            triangulation->get_mpi_communicator(),
+                            result);
 
         next_free_particle_index      = result[1] + 1;
         global_max_particles_per_cell = result[0];
@@ -718,12 +720,13 @@ namespace Particles
             &*triangulation))
       {
         types::particle_index particles_to_add_locally = positions.size();
-        const int             ierr = MPI_Scan(&particles_to_add_locally,
-                                  &local_start_index,
-                                  1,
-                                  DEAL_II_PARTICLE_INDEX_MPI_TYPE,
-                                  MPI_SUM,
-                                  parallel_triangulation->get_communicator());
+        const int             ierr =
+          MPI_Scan(&particles_to_add_locally,
+                   &local_start_index,
+                   1,
+                   DEAL_II_PARTICLE_INDEX_MPI_TYPE,
+                   MPI_SUM,
+                   parallel_triangulation->get_mpi_communicator());
         AssertThrowMPI(ierr);
         local_start_index -= particles_to_add_locally;
       }
@@ -779,7 +782,7 @@ namespace Particles
     if (!ids.empty())
       AssertDimension(ids.size(), positions.size());
 
-    const auto comm = triangulation->get_communicator();
+    const auto comm = triangulation->get_mpi_communicator();
 
     const auto n_mpi_processes = Utilities::MPI::n_mpi_processes(comm);
 
@@ -1502,7 +1505,7 @@ namespace Particles
             &*triangulation))
       {
         if (dealii::Utilities::MPI::n_mpi_processes(
-              parallel_triangulation->get_communicator()) > 1)
+              parallel_triangulation->get_mpi_communicator()) > 1)
           send_recv_particles(moved_particles, moved_cells);
       }
 #endif
@@ -1540,7 +1543,7 @@ namespace Particles
     if (parallel_triangulation != nullptr)
       {
         if (dealii::Utilities::MPI::n_mpi_processes(
-              parallel_triangulation->get_communicator()) == 1)
+              parallel_triangulation->get_mpi_communicator()) == 1)
           return;
       }
     else
@@ -1647,7 +1650,7 @@ namespace Particles
         &*triangulation);
     if (parallel_triangulation == nullptr ||
         dealii::Utilities::MPI::n_mpi_processes(
-          parallel_triangulation->get_communicator()) == 1)
+          parallel_triangulation->get_mpi_communicator()) == 1)
       {
         return;
       }
@@ -1808,24 +1811,26 @@ namespace Particles
       std::vector<MPI_Request> n_requests(2 * n_neighbors);
       for (unsigned int i = 0; i < n_neighbors; ++i)
         {
-          const int ierr = MPI_Irecv(&(n_recv_data[i]),
-                                     1,
-                                     MPI_UNSIGNED,
-                                     neighbors[i],
-                                     mpi_tag,
-                                     parallel_triangulation->get_communicator(),
-                                     &(n_requests[2 * i]));
+          const int ierr =
+            MPI_Irecv(&(n_recv_data[i]),
+                      1,
+                      MPI_UNSIGNED,
+                      neighbors[i],
+                      mpi_tag,
+                      parallel_triangulation->get_mpi_communicator(),
+                      &(n_requests[2 * i]));
           AssertThrowMPI(ierr);
         }
       for (unsigned int i = 0; i < n_neighbors; ++i)
         {
-          const int ierr = MPI_Isend(&(n_send_data[i]),
-                                     1,
-                                     MPI_UNSIGNED,
-                                     neighbors[i],
-                                     mpi_tag,
-                                     parallel_triangulation->get_communicator(),
-                                     &(n_requests[2 * i + 1]));
+          const int ierr =
+            MPI_Isend(&(n_send_data[i]),
+                      1,
+                      MPI_UNSIGNED,
+                      neighbors[i],
+                      mpi_tag,
+                      parallel_triangulation->get_mpi_communicator(),
+                      &(n_requests[2 * i + 1]));
           AssertThrowMPI(ierr);
         }
       const int ierr =
@@ -1863,7 +1868,7 @@ namespace Particles
                         MPI_CHAR,
                         neighbors[i],
                         mpi_tag,
-                        parallel_triangulation->get_communicator(),
+                        parallel_triangulation->get_mpi_communicator(),
                         &(requests[send_ops]));
             AssertThrowMPI(ierr);
             ++send_ops;
@@ -1878,7 +1883,7 @@ namespace Particles
                         MPI_CHAR,
                         neighbors[i],
                         mpi_tag,
-                        parallel_triangulation->get_communicator(),
+                        parallel_triangulation->get_mpi_communicator(),
                         &(requests[send_ops + recv_ops]));
             AssertThrowMPI(ierr);
             ++recv_ops;
@@ -2021,7 +2026,7 @@ namespace Particles
                         MPI_CHAR,
                         neighbors[i],
                         mpi_tag,
-                        parallel_triangulation->get_communicator(),
+                        parallel_triangulation->get_mpi_communicator(),
                         &(requests[send_ops]));
             AssertThrowMPI(ierr);
             ++send_ops;
@@ -2036,7 +2041,7 @@ namespace Particles
                         MPI_CHAR,
                         neighbors[i],
                         mpi_tag,
-                        parallel_triangulation->get_communicator(),
+                        parallel_triangulation->get_mpi_communicator(),
                         &(requests[send_ops + recv_ops]));
             AssertThrowMPI(ierr);
             ++recv_ops;
index d5a21efb716686d0bd3d03301400f9a84b5e379e..9c1dbdef568a7cd215d0f4147412b2aef1e6e61d 100644 (file)
@@ -66,7 +66,7 @@ main(int argc, char *argv[])
   affine_constraints.close();
 
   TrilinosWrappers::SparsityPattern dsp(dof_handler.locally_owned_dofs(),
-                                        dof_handler.get_communicator());
+                                        dof_handler.get_mpi_communicator());
   DoFTools::make_sparsity_pattern(dof_handler, dsp, affine_constraints);
   dsp.compress();
 
@@ -111,15 +111,15 @@ main(int argc, char *argv[])
                      false);
       Teuchos::RCP<Epetra_MultiVector> B, X;
 
-      LinearAlgebra::EpetraWrappers::Vector x_(dof_handler.locally_owned_dofs(),
-                                               dof_handler.get_communicator());
+      LinearAlgebra::EpetraWrappers::Vector x_(
+        dof_handler.locally_owned_dofs(), dof_handler.get_mpi_communicator());
       LinearAlgebra::ReadWriteVector<Number> x_temp(
         dof_handler.locally_owned_dofs());
       x_temp.import_elements(x, VectorOperation::insert);
       x_.import_elements(x_temp, VectorOperation::insert);
 
-      LinearAlgebra::EpetraWrappers::Vector r_(dof_handler.locally_owned_dofs(),
-                                               dof_handler.get_communicator());
+      LinearAlgebra::EpetraWrappers::Vector r_(
+        dof_handler.locally_owned_dofs(), dof_handler.get_mpi_communicator());
       LinearAlgebra::ReadWriteVector<Number> r_temp(
         dof_handler.locally_owned_dofs());
       r_temp.import_elements(r, VectorOperation::insert);
index 8f4bb0f8d4fb3fc2a8a4caf2ecf3ab161ec861b3..26b1d58f2032773aaaee03f656143fff30052cd4 100644 (file)
@@ -73,7 +73,7 @@ void
 test(DoFHandler<2> &dof_handler, const hp::MappingCollection<2> &mappings)
 {
   DoFRenumbering::support_point_wise(dof_handler);
-  const MPI_Comm comm = dof_handler.get_communicator();
+  const MPI_Comm comm = dof_handler.get_mpi_communicator();
 
   const IndexSet &local_dofs = dof_handler.locally_owned_dofs();
   deallog << "new case with locally owned dofs = ";
index d9e96f074a798c41c93fd7bcf4e9fdc6f951d6a7..90f826af7835692b3c183948f6e2920fa924af6d 100644 (file)
@@ -71,7 +71,7 @@ output_grid(
   const std::string                                                      &label)
 {
   deallog.push(label);
-  const auto comm    = trias.front()->get_communicator();
+  const auto comm    = trias.front()->get_mpi_communicator();
   const auto my_rank = Utilities::MPI::this_mpi_process(comm);
 
   for (unsigned int i = 0; i < trias.size(); ++i)
index 08216bcd7d9d3449611dbf4f1a8cca9d077a3cb0..24f4b93c23eaf1e50fef246adf594ea24463d2dd 100644 (file)
@@ -64,7 +64,7 @@ LinearAlgebra::distributed::Vector<double>
 partition_distributed_triangulation(const Triangulation<dim, spacedim> &tria_in,
                                     const MPI_Comm                      comm)
 {
-  const auto comm_tria = tria_in.get_communicator();
+  const auto comm_tria = tria_in.get_mpi_communicator();
 
   const auto n_global_active_cells = Utilities::MPI::max(
     comm_tria == MPI_COMM_SELF ? 0 : tria_in.n_global_active_cells(), comm);
index 4a5da2f055877924c37ccd47d42dcdc11021dcf4..6b005105ab3e3431ddd091340fd95e1426e7e6d8 100644 (file)
@@ -117,7 +117,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index 9630978908164403ae14aefe5ca00d9066ad3382..5278bb498c3c9d240037873078406758b14856bd 100644 (file)
@@ -120,7 +120,7 @@ create_mca_tria(const unsigned int   n_subdivisions,
                                           "data_background_" +
                                             std::to_string(n_subdivisions),
                                           0,
-                                          triangulation.get_communicator());
+                                          triangulation.get_mpi_communicator());
     }
 }
 
index c11bed6ac5fe954363db566d7f36772a23ad9ebd..d3b45cb6d571117265532aca34f53f755b71b249 100644 (file)
@@ -83,7 +83,7 @@ test(const DoFHandler<dim, spacedim> &dof_handler,
 
   constraints.make_consistent_in_parallel(dof_handler.locally_owned_dofs(),
                                           locally_relevant_dofs,
-                                          dof_handler.get_communicator());
+                                          dof_handler.get_mpi_communicator());
 
   const auto b = collect_lines(constraints, dof_handler.n_dofs());
   b.print(deallog.get_file_stream());
index e2b3c5973e6d64804f522cc037f607ae3384c14c..a62800f4f22041a81f386d86dcc4b8eb77cfcd11 100644 (file)
@@ -63,7 +63,7 @@ reinit_sparsity_pattern(const DoFHandler<dim, spacedim>   &dof_handler,
                         TrilinosWrappers::SparsityPattern &sparsity_pattern)
 {
   sparsity_pattern.reinit(dof_handler.locally_owned_dofs(),
-                          dof_handler.get_communicator());
+                          dof_handler.get_mpi_communicator());
 }
 
 template <int dim,
index 68ba2203f2b1479de706233a32588b5208fed276..7ced2f48459b88e16978e300aa75255c7ceb0b67 100644 (file)
@@ -278,7 +278,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index 2da6523b0b8deb93b1e46ffab9ce0f2fa35e76f9..7d822280bc34905aed54d7eef3fafe5e447f79c2 100644 (file)
@@ -281,7 +281,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index c16cdc961e0ae723c5daa654a4aa86b6c921c643..52da8df4882d2760bd427f0b02c85a5379d5afdf 100644 (file)
@@ -282,7 +282,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index 777d7efc9c786f33a72476526e27fc2a25be4541..11edae8ed1cf0fc0cd680dad2d2aca4527279a7a 100644 (file)
@@ -282,7 +282,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index 808a3159a82f97764174cb97c73000fd2f8d69f2..c582dcb41c47e6a7521f18d98a0d30e886a49f13 100644 (file)
@@ -280,7 +280,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index c99f64ec7566820c8809d1ff8f4fd8967441c129..59aaec64020543d4604ac4f8efdb17848b517837 100644 (file)
@@ -280,7 +280,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index d9d72ab4685c2113bd5e04b6e2f90be91e3fca48..a0b2f1f4ffca5cc03ec06048fd86d562beae68e2 100644 (file)
@@ -78,7 +78,7 @@ test()
         DoFTools::extract_locally_relevant_level_dofs(dh, level);
       level_vectors[level].reinit(dh.locally_owned_mg_dofs(level),
                                   relevant_dofs,
-                                  tria.get_communicator());
+                                  tria.get_mpi_communicator());
       std::vector<types::global_dof_index> dof_indices(fe.dofs_per_cell);
       for (const auto &cell : dh.mg_cell_iterators_on_level(level))
         if (cell->level_subdomain_id() != numbers::artificial_subdomain_id)
index de3f94c48275ad325d54d1e8c7b68ce5df5cc002..fc77b77448bd6b6b5b71f0110533ad465712b305 100644 (file)
@@ -80,7 +80,7 @@ do_test(const unsigned int degree,
   LinearAlgebra::distributed::Vector<double> vector(
     dof_handler.locally_owned_dofs(),
     locally_relevant_dofs,
-    dof_handler.get_communicator());
+    dof_handler.get_mpi_communicator());
 
   VectorTools::interpolate(dof_handler, fu, vector);
 
index 71a7b18a6457d44a1a8c4892a372e43ddc266174..74744fd0ecec8eb2da360841df49ed5c018643f2 100644 (file)
@@ -53,7 +53,7 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    Utilities::MPI::all_gather(tr.get_communicator(),
+    Utilities::MPI::all_gather(tr.get_mpi_communicator(),
                                tr.n_locally_owned_active_cells());
   if (myid == 0)
     for (unsigned int p = 0; p < numproc; ++p)
index 086cbdaaf681a7d3c9cff77a24d666bba60cc196..680c17162d071f9a8f024383dfb847b2289624d1 100644 (file)
@@ -83,7 +83,7 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    Utilities::MPI::all_gather(tr.get_communicator(),
+    Utilities::MPI::all_gather(tr.get_mpi_communicator(),
                                tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
index c5b5fb5c8dcd6c226cc0195053c630ef940b7d90..748e9e457831a2c5f080490697d69e456ba6d96b 100644 (file)
@@ -70,7 +70,7 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    Utilities::MPI::all_gather(tr.get_communicator(),
+    Utilities::MPI::all_gather(tr.get_mpi_communicator(),
                                tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
index 4eb6064a1f990d808bece49c894abd30f71aae3c..4379bf9f0d7d585afe9516797a6b0e9b4122898c 100644 (file)
@@ -58,7 +58,7 @@ test()
   tr.refine_global(1);
 
   const auto n_locally_owned_active_cells_per_processor =
-    Utilities::MPI::all_gather(tr.get_communicator(),
+    Utilities::MPI::all_gather(tr.get_mpi_communicator(),
                                tr.n_locally_owned_active_cells());
   if (myid == 0)
     for (unsigned int p = 0; p < numproc; ++p)
index 89d6ef239575a9985c9ff616721f3863c5eecd83..d06bce6397ae8e8302fdcf7f03e62031f516abab 100644 (file)
@@ -67,7 +67,7 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    Utilities::MPI::all_gather(tr.get_communicator(),
+    Utilities::MPI::all_gather(tr.get_mpi_communicator(),
                                tr.n_locally_owned_active_cells());
   if (myid == 0)
     for (unsigned int p = 0; p < numproc; ++p)
index a9389ea645dc8dd312cf9cbfa51cee9a4da62765..192ff3c8a77796ceedc020cd96140f02bcaa6899 100644 (file)
@@ -61,7 +61,7 @@ test()
   tr.refine_global(1);
 
   const auto n_locally_owned_active_cells_per_processor =
-    Utilities::MPI::all_gather(tr.get_communicator(),
+    Utilities::MPI::all_gather(tr.get_mpi_communicator(),
                                tr.n_locally_owned_active_cells());
   if (myid == 0)
     for (unsigned int p = 0; p < numproc; ++p)
index 4d6833c988492b6262e5a4d390384af0746f18a1..bac3f7d97c337bfe7499e64021973f089215c906 100644 (file)
@@ -81,7 +81,7 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    Utilities::MPI::all_gather(tr.get_communicator(),
+    Utilities::MPI::all_gather(tr.get_mpi_communicator(),
                                tr.n_locally_owned_active_cells());
   if (myid == 0)
     for (unsigned int p = 0; p < numproc; ++p)
index 0f321326b162d3800f92c2da0eb5442e41ec9018..5fa87c433f32e1b37420d3d39666bd61c5dcb1da 100644 (file)
@@ -80,7 +80,7 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    Utilities::MPI::all_gather(tr.get_communicator(),
+    Utilities::MPI::all_gather(tr.get_mpi_communicator(),
                                tr.n_locally_owned_active_cells());
   if (myid == 0)
     for (unsigned int p = 0; p < numproc; ++p)
index bb43fdaea58e4ee1ce181520aa20942f43de4cb1..67615a77da0f40fcac5d795a5ab0af562b3d1bec 100644 (file)
@@ -94,10 +94,11 @@ test()
   }
 
   const IndexSet relevant_set = DoFTools::extract_locally_relevant_dofs(dofh);
-  TrilinosWrappers::MPI::Vector x_rel(relevant_set, dofh.get_communicator());
+  TrilinosWrappers::MPI::Vector x_rel(relevant_set,
+                                      dofh.get_mpi_communicator());
   {
     TrilinosWrappers::MPI::Vector interpolated(dofh.locally_owned_dofs(),
-                                               dofh.get_communicator());
+                                               dofh.get_mpi_communicator());
     VectorTools::interpolate(dofh, LinearFunction<dim>(), interpolated);
     x_rel = interpolated;
   }
index 91b9b6aaed7d2b48869c22ce020bb60ef3ffccb3..00e1054dfca80897927a0bd04f9f2aa77c1d054f 100644 (file)
@@ -114,7 +114,8 @@ test(const unsigned int degree_center,
 
   // ------ verify -----
   std::vector<IndexSet> locally_owned_dofs_per_processor =
-    Utilities::MPI::all_gather(dh.get_communicator(), dh.locally_owned_dofs());
+    Utilities::MPI::all_gather(dh.get_mpi_communicator(),
+                               dh.locally_owned_dofs());
 
   const IndexSet locally_active_dofs =
     DoFTools::extract_locally_active_dofs(dh);
index 99776eb38d72a4ebfd8060d684bd09e458c72697..ebaaf0af78cf0945cfb44ec01d7e7b8b7696b3cc 100644 (file)
@@ -120,7 +120,8 @@ test(const unsigned int degree_center,
 
   // ------ verify -----
   std::vector<IndexSet> locally_owned_dofs_per_processor =
-    Utilities::MPI::all_gather(dh.get_communicator(), dh.locally_owned_dofs());
+    Utilities::MPI::all_gather(dh.get_mpi_communicator(),
+                               dh.locally_owned_dofs());
 
   const IndexSet locally_active_dofs =
     DoFTools::extract_locally_active_dofs(dh);
index 857068bc000f2d0f7048f6c275c25e93f7ea89f3..bd3bb60865062b73c4e956c080a1ffdc140db917 100644 (file)
@@ -135,7 +135,8 @@ test(const unsigned int degree_center,
 
   // ------ verify -----
   std::vector<IndexSet> locally_owned_dofs_per_processor =
-    Utilities::MPI::all_gather(dh.get_communicator(), dh.locally_owned_dofs());
+    Utilities::MPI::all_gather(dh.get_mpi_communicator(),
+                               dh.locally_owned_dofs());
 
   const IndexSet locally_active_dofs =
     DoFTools::extract_locally_active_dofs(dh);
index 0ba071d66bf7a21aa2a89e54ee35ffb81532c34e..609fd303bf10e779241db06b29139fd1208fce88 100644 (file)
@@ -329,7 +329,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index 330f7783ecc961630e3a1ba6fabe186f437743cb..448ab7f6c54f3d63a7cf4ff814f651101823ff3c 100644 (file)
@@ -332,7 +332,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index 59c25d8f32cc543ccbd8a47b2d449de206f763da..51b5395a2c5840f64ffe291c0f3ddea5f57ed869 100644 (file)
@@ -96,7 +96,7 @@ test(const unsigned int fes_size, const unsigned int max_difference)
   for (const auto &cell :
        dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
     count[cell->active_fe_index()]++;
-  Utilities::MPI::sum(count, tria.get_communicator(), count);
+  Utilities::MPI::sum(count, tria.get_mpi_communicator(), count);
   deallog << "fe count:" << count << std::endl;
 
 #ifdef DEBUG
index 7eec841a2c11a302bf2fc496701e0059d07cd70c..6d732193636e84cfae927cfb39998d1b403cbd7d 100644 (file)
@@ -102,7 +102,7 @@ test(const unsigned int fes_size, const unsigned int max_difference)
       for (const auto &cell :
            dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
         count[cell->active_fe_index()]++;
-      Utilities::MPI::sum(count, tria.get_communicator(), count);
+      Utilities::MPI::sum(count, tria.get_mpi_communicator(), count);
       deallog << "cycle:" << i << ", fe count:" << count << std::endl;
     }
 
index 33b84467e75b3a71ea4465723f0750c4b89fe75d..f5e6163f7b841099f8f070327806535d741d4baa 100644 (file)
@@ -272,7 +272,7 @@ check(const unsigned int orientation, bool reverse)
                 1,
                 MPI_INT,
                 MPI_SUM,
-                triangulation.get_communicator());
+                triangulation.get_mpi_communicator());
   Assert(sum_of_pairs_global > 0, ExcInternalError());
   for (it = face_map.begin(); it != face_map.end(); ++it)
     {
index e950973cf2418c5bcbbee189d450ad3fb543042c..16fa25ed662892eabc188e38e6852a53d0a5c856 100644 (file)
@@ -225,7 +225,7 @@ namespace Step27
     // We have not dealt with chains of constraints on ghost cells yet.
     // Thus, we are content with verifying their consistency for now.
     std::vector<IndexSet> locally_owned_dofs_per_processor =
-      Utilities::MPI::all_gather(dof_handler.get_communicator(),
+      Utilities::MPI::all_gather(dof_handler.get_mpi_communicator(),
                                  dof_handler.locally_owned_dofs());
 
     const IndexSet locally_active_dofs =
index bd7727ffe3168781239ca75cfde143c16e660dc2..3e7c69d2af2604bea8ae618e5746a0a04924f655 100644 (file)
@@ -60,7 +60,7 @@ test()
   LinearAlgebra::distributed::Vector<double> solution;
   solution.reinit(locally_owned_dofs,
                   locally_relevant_dofs,
-                  dofh.get_communicator());
+                  dofh.get_mpi_communicator());
 
   for (unsigned int i = 0; i < solution.size(); ++i)
     if (locally_owned_dofs.is_element(i))
@@ -68,7 +68,7 @@ test()
   solution.update_ghost_values();
 
   double l1_norm = solution.l1_norm();
-  if (Utilities::MPI::this_mpi_process(dofh.get_communicator()) == 0)
+  if (Utilities::MPI::this_mpi_process(dofh.get_mpi_communicator()) == 0)
     deallog << "pre  refinement l1=" << l1_norm << std::endl;
 
   // set refine/coarsen flags manually
@@ -121,11 +121,11 @@ test()
 
   solution.reinit(locally_owned_dofs,
                   locally_relevant_dofs,
-                  dofh.get_communicator());
+                  dofh.get_mpi_communicator());
   soltrans.interpolate(solution);
 
   l1_norm = solution.l1_norm();
-  if (Utilities::MPI::this_mpi_process(dofh.get_communicator()) == 0)
+  if (Utilities::MPI::this_mpi_process(dofh.get_mpi_communicator()) == 0)
     deallog << "post refinement l1=" << l1_norm << std::endl;
 
   // make sure no processor is hanging
index 061ba3b24dca31d2046da384ada4d1de657ffbe8..580ed316b99134966b3931387e82de0301f7c3f5 100644 (file)
@@ -314,7 +314,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index c5a0fe810c89ec64aac6ba643882fac7acfd149c..ba6088b4cbccddc975c9da8627414e396520087d 100644 (file)
@@ -376,7 +376,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index 7adf7e93bdbe1d4c6a1ec802a40faf5b616c6543..1da956e15309d94fc7a681a91a8880a32916873c 100644 (file)
@@ -377,7 +377,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index b718525a28bd1e61d7585c01135b0600e04dafbf..13b6bc1badfdeda8a36d99b53a864561716e78db 100644 (file)
@@ -292,7 +292,7 @@ namespace Step40
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
           Utilities::MPI::all_gather(
-            triangulation.get_communicator(),
+            triangulation.get_mpi_communicator(),
             triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
index 39a63052360723fdfc81af42b284c3e0ff6350b7..5f2e5c0cdddea6b5e15e2057af342192e4d68211 100644 (file)
@@ -229,7 +229,7 @@ namespace Step27
     // We did not think about hp-constraints on ghost cells yet.
     // Thus, we are content with verifying their consistency for now.
     std::vector<IndexSet> locally_owned_dofs_per_processor =
-      Utilities::MPI::all_gather(dof_handler.get_communicator(),
+      Utilities::MPI::all_gather(dof_handler.get_mpi_communicator(),
                                  dof_handler.locally_owned_dofs());
 
     const IndexSet locally_active_dofs =
index cb25cf435cc234ab2363b2c6feeb398a9fec1647..60237476b5bde8eab19eae762ab0fb8f45c0c290 100644 (file)
@@ -141,7 +141,7 @@ do_test(const hp::FECollection<dim> &fe_fine,
 
 #if 0
   data_out.write_vtu_with_pvtu_record(
-    "./", "result", counter++, tria.get_communicator(), 3, 1);
+    "./", "result", counter++, tria.get_mpi_communicator(), 3, 1);
 #else
   deallog << std::endl;
   data_out.write_vtk(deallog.get_file_stream());
index 8ab61e1aef8d73c9a005fe825058419d6bdce6b4..fde1456211d17ffb88652f9ff8d9640e9e394607 100644 (file)
@@ -85,7 +85,7 @@ create_partitioner(const DoFHandler<dim> &dof_handler)
   return std::make_shared<Utilities::MPI::Partitioner>(
     dof_handler.locally_owned_dofs(),
     DoFTools::extract_locally_active_dofs(dof_handler),
-    dof_handler.get_communicator());
+    dof_handler.get_mpi_communicator());
 }
 
 
index 5706f322a0c4ee11c992d80f59485e0f52ad29ec..3e6cb212dc56fe79e4dbee01652c8a82fc760c66 100644 (file)
@@ -47,7 +47,7 @@ initialize_dof_vector(LinearAlgebra::distributed::Vector<Number> &vec,
       &(dof_handler.get_triangulation()));
 
   MPI_Comm comm =
-    dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
+    dist_tria != nullptr ? dist_tria->get_mpi_communicator() : MPI_COMM_SELF;
 
   vec.reinit(level == numbers::invalid_unsigned_int ?
                dof_handler.locally_owned_dofs() :
index ba93707cd62575bdf941cca29135b666236c31c9..d7d05dc9fd91fca3d4fbe879e940a8d7ec7d9fc5 100644 (file)
@@ -50,7 +50,7 @@ test()
   DoFHandler<dim> dof_handler(tria);
   dof_handler.distribute_dofs(fe);
 
-  const auto      mpi_comm   = dof_handler.get_communicator();
+  const auto      mpi_comm   = dof_handler.get_mpi_communicator();
   const IndexSet &owned_dofs = dof_handler.locally_owned_dofs();
 
   const IndexSet relevant_dofs =
index 4d37005d728a2dfa6dadbccaa6bb244f38735d8c..89dd2e991f356089d1b5fffcaa136c91e2985325 100644 (file)
@@ -111,7 +111,7 @@ do_project(const parallel::distributed::Triangulation<dim> &triangulation,
 
   deallog << "n_dofs=" << dof_handler.n_dofs() << std::endl;
 
-  const MPI_Comm mpi_communicator   = triangulation.get_communicator();
+  const MPI_Comm mpi_communicator   = triangulation.get_mpi_communicator();
   const IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs();
   const IndexSet locally_relevant_dofs =
     DoFTools::extract_locally_relevant_dofs(dof_handler);
index 82eab507dbfac177fe1c857defa6004cd37d8e87..5b6cb3d6a1c7c34a7acff3fa42f769e7fb0fda62 100644 (file)
@@ -69,7 +69,7 @@ test()
                                                                       2,
                                                                       0);
 
-    if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+    if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
       {
         auto particle_it = particle_handler.insert_particle(particle1, cell1);
         particle_it->set_properties(properties);
index 28c03ec784d69955769b354fb60339ed3c5dcb8d..f8740849d18e1cf47d4225e4d40c4130dadb8a7b 100644 (file)
@@ -46,7 +46,7 @@ test()
     Point<spacedim> position;
     Point<dim>      reference_position;
 
-    if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+    if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
       for (unsigned int i = 0; i < dim; ++i)
         position[i] = 0.475;
     else
@@ -56,7 +56,7 @@ test()
     Particles::Particle<dim, spacedim> particle(
       position,
       reference_position,
-      Utilities::MPI::this_mpi_process(tr.get_communicator()));
+      Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()));
 
     // We give a local random cell hint to check that sorting and
     // transferring ghost particles works.
@@ -72,13 +72,13 @@ test()
     deallog << "Before ghost exchange: "
             << particle_handler.n_locally_owned_particles()
             << " locally owned particles on process "
-            << Utilities::MPI::this_mpi_process(tr.get_communicator())
+            << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
             << std::endl;
 
     deallog << "Before ghost exchange: "
             << particle_handler.get_property_pool().n_registered_slots()
             << " stored particles on process "
-            << Utilities::MPI::this_mpi_process(tr.get_communicator())
+            << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
             << std::endl;
 
     particle_handler.exchange_ghost_particles();
@@ -92,13 +92,13 @@ test()
     deallog << "After ghost exchange: "
             << particle_handler.n_locally_owned_particles()
             << " locally owned particles on process "
-            << Utilities::MPI::this_mpi_process(tr.get_communicator())
+            << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
             << std::endl;
 
     deallog << "After ghost exchange: "
             << particle_handler.get_property_pool().n_registered_slots()
             << " stored particles on process "
-            << Utilities::MPI::this_mpi_process(tr.get_communicator())
+            << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
             << std::endl;
   }
 
index e8745b3dd88a58fb73c668ec2e9fa0b019b754db..20b229090152638359ced9d89f6accf0dade3e9e 100644 (file)
@@ -68,7 +68,7 @@ test()
     Point<spacedim> position;
     Point<dim>      reference_position;
 
-    if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+    if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
       position[0] = 0.001;
     else
       position[0] = 0.999;
@@ -76,7 +76,7 @@ test()
     Particles::Particle<dim, spacedim> particle(
       position,
       reference_position,
-      Utilities::MPI::this_mpi_process(tr.get_communicator()));
+      Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()));
 
     // We give a local random cell hint to check that sorting and
     // transferring ghost particles works.
index fe2231b944e1cb8da18c95db2b63d5a68aad6114..0696fa5be23f245ee4918fe30b5627d57231bdf2 100644 (file)
@@ -43,7 +43,7 @@ test()
     // particles
     Particles::ParticleHandler<dim, spacedim> particle_handler(tr, mapping);
 
-    if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+    if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
       {
         std::vector<Point<spacedim>> position(2);
         std::vector<Point<dim>>      reference_position(2);
@@ -75,7 +75,7 @@ test()
           deallog << "Before sort particle id " << particle.get_id()
                   << " is in cell " << particle.get_surrounding_cell()
                   << " on process "
-                  << Utilities::MPI::this_mpi_process(tr.get_communicator())
+                  << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
                   << std::flush << std::endl;
       }
 
@@ -87,7 +87,7 @@ test()
       deallog << "After sort particle id " << particle.get_id()
               << " is in cell " << particle.get_surrounding_cell()
               << " on process "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::flush << std::endl;
 
     // Move all points up by 0.5. This will change cell for particle 1, and will
@@ -103,7 +103,7 @@ test()
       deallog << "After shift particle id " << particle.get_id()
               << " is in cell " << particle.get_surrounding_cell()
               << " on process "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::flush << std::endl;
   }
 
index 2968be2c394d463e59bd600c3edc4717edc157b2..102c84b7bc2898a4caf4e5646492dc8ae4ed58d0 100644 (file)
@@ -45,7 +45,7 @@ test()
     Point<spacedim> position;
     Point<dim>      reference_position;
 
-    if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+    if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
       for (unsigned int i = 0; i < dim; ++i)
         position[i] = 0.475;
     else
@@ -55,7 +55,7 @@ test()
     Particles::Particle<dim, spacedim> particle(
       position,
       reference_position,
-      Utilities::MPI::this_mpi_process(tr.get_communicator()));
+      Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()));
 
     // We give a local random cell hint to check that sorting and
     // transferring ghost particles works.
@@ -74,7 +74,7 @@ test()
          ++particle)
       deallog << "Particle id " << particle->get_id()
               << " is local particle on process "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
 
     for (auto particle = particle_handler.begin_ghost();
@@ -82,7 +82,7 @@ test()
          ++particle)
       deallog << "Particle id " << particle->get_id()
               << " is ghost particle on process "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
   }
 
index 344a5067d092274273239befff173f31630caf38..2eb23139fad33ad48813a5947befcf21c4841b30 100644 (file)
@@ -47,7 +47,7 @@ test()
     Point<spacedim> position;
     Point<dim>      reference_position;
 
-    if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+    if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
       for (unsigned int i = 0; i < dim; ++i)
         position[i] = 0.475;
     else
@@ -57,7 +57,7 @@ test()
     Particles::Particle<dim, spacedim> particle(
       position,
       reference_position,
-      Utilities::MPI::this_mpi_process(tr.get_communicator()));
+      Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()));
     typename Triangulation<dim, spacedim>::active_cell_iterator cell =
       tr.begin_active();
     while (!cell->is_locally_owned())
@@ -73,9 +73,9 @@ test()
          ++particle)
       {
         particle->get_properties()[0] =
-          10 + Utilities::MPI::this_mpi_process(tr.get_communicator());
+          10 + Utilities::MPI::this_mpi_process(tr.get_mpi_communicator());
         particle->get_properties()[1] =
-          100 + Utilities::MPI::this_mpi_process(tr.get_communicator());
+          100 + Utilities::MPI::this_mpi_process(tr.get_mpi_communicator());
       }
 
 
@@ -88,7 +88,7 @@ test()
               << " location : " << particle->get_location()
               << " property : " << particle->get_properties()[0] << " and "
               << particle->get_properties()[1] << " is local on process : "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
 
     for (auto particle = particle_handler.begin_ghost();
@@ -98,7 +98,7 @@ test()
               << " location : " << particle->get_location()
               << " property : " << particle->get_properties()[0] << " and "
               << particle->get_properties()[1] << " is ghost on process : "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
 
     deallog << "Modifying particles positions and properties" << std::endl;
@@ -127,7 +127,7 @@ test()
               << " location : " << particle->get_location()
               << " property : " << particle->get_properties()[0] << " and "
               << particle->get_properties()[1] << " is local on process : "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
 
     for (auto particle = particle_handler.begin_ghost();
@@ -137,7 +137,7 @@ test()
               << " location : " << particle->get_location()
               << " property : " << particle->get_properties()[0] << " and "
               << particle->get_properties()[1] << " is ghost on process : "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
   }
 
index a9c12075476c1224dd6175f44e8be5851abc4cab..3cbcb8a10ad074c7786a5ea3c6b4f6df547ee98a 100644 (file)
@@ -50,7 +50,7 @@ test()
 
     for (unsigned int p = 0; p < n_particles; ++p)
       {
-        if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+        if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
           {
             for (unsigned int i = 0; i < dim; ++i)
               position[i] = 0.410 + 0.01 * p;
@@ -58,7 +58,7 @@ test()
             Particles::Particle<dim, spacedim> particle(
               position,
               reference_position,
-              Utilities::MPI::this_mpi_process(tr.get_communicator()) *
+              Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) *
                   n_particles +
                 p);
             typename Triangulation<dim, spacedim>::active_cell_iterator cell =
@@ -77,10 +77,12 @@ test()
          ++particle)
       {
         particle->get_properties()[0] =
-          1000 + 100 * Utilities::MPI::this_mpi_process(tr.get_communicator()) +
+          1000 +
+          100 * Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) +
           10 * particle->get_id();
         particle->get_properties()[1] =
-          2000 + 100 * Utilities::MPI::this_mpi_process(tr.get_communicator()) +
+          2000 +
+          100 * Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) +
           10 * particle->get_id();
         counter++;
       }
@@ -95,7 +97,7 @@ test()
               << " location : " << particle->get_location()
               << " property : " << particle->get_properties()[0] << " and "
               << particle->get_properties()[1] << " is local on process : "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
 
     for (auto particle = particle_handler.begin_ghost();
@@ -105,7 +107,7 @@ test()
               << " location : " << particle->get_location()
               << " property : " << particle->get_properties()[0] << " and "
               << particle->get_properties()[1] << " is ghost on process : "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
 
     deallog << "Modifying particles positions and properties" << std::endl;
@@ -134,7 +136,7 @@ test()
               << " location : " << particle->get_location()
               << " property : " << particle->get_properties()[0] << " and "
               << particle->get_properties()[1] << " is local on process : "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
 
     for (auto particle = particle_handler.begin_ghost();
@@ -144,7 +146,7 @@ test()
               << " location : " << particle->get_location()
               << " property : " << particle->get_properties()[0] << " and "
               << particle->get_properties()[1] << " is ghost on process : "
-              << Utilities::MPI::this_mpi_process(tr.get_communicator())
+              << Utilities::MPI::this_mpi_process(tr.get_mpi_communicator())
               << std::endl;
   }
 
index 916521bad50fa8824d66dc642b8d96df8f998ef1..2e23a19b01ecb0db1c2fd4fa8a603ebed9ea1c42 100644 (file)
@@ -59,7 +59,7 @@ test()
     Particles::ParticleHandler<dim, spacedim> particle_handler(tria_pft,
                                                                mapping);
 
-    if (Utilities::MPI::this_mpi_process(tria_pft.get_communicator()) == 0)
+    if (Utilities::MPI::this_mpi_process(tria_pft.get_mpi_communicator()) == 0)
       {
         std::vector<Point<spacedim>> position(2);
         std::vector<Point<dim>>      reference_position(2);
@@ -90,7 +90,7 @@ test()
                   << " is in cell " << particle.get_surrounding_cell()
                   << " on process "
                   << Utilities::MPI::this_mpi_process(
-                       tria_pft.get_communicator())
+                       tria_pft.get_mpi_communicator())
                   << std::flush << std::endl;
       }
 
@@ -102,7 +102,8 @@ test()
       deallog << "After sort particle id " << particle.get_id()
               << " is in cell " << particle.get_surrounding_cell()
               << " on process "
-              << Utilities::MPI::this_mpi_process(tria_pft.get_communicator())
+              << Utilities::MPI::this_mpi_process(
+                   tria_pft.get_mpi_communicator())
               << std::flush << std::endl;
 
     // Move all points up by 0.5. This will change cell for particle 1 and will
@@ -118,7 +119,8 @@ test()
       deallog << "After shift particle id " << particle.get_id()
               << " is in cell " << particle.get_surrounding_cell()
               << " on process "
-              << Utilities::MPI::this_mpi_process(tria_pft.get_communicator())
+              << Utilities::MPI::this_mpi_process(
+                   tria_pft.get_mpi_communicator())
               << std::flush << std::endl;
   }
 
index 00f4494ee085b734caa825a03db2a094a5052a39..6f063c1e98abbda540b914671f9e3f98acdf87f8 100644 (file)
@@ -55,7 +55,8 @@ test()
     Particles::ParticleHandler<dim, spacedim> particle_handler(tria_shared,
                                                                mapping);
 
-    if (Utilities::MPI::this_mpi_process(tria_shared.get_communicator()) == 0)
+    if (Utilities::MPI::this_mpi_process(tria_shared.get_mpi_communicator()) ==
+        0)
       {
         std::vector<Point<spacedim>> position(2);
         std::vector<Point<dim>>      reference_position(2);
@@ -86,7 +87,7 @@ test()
                   << " is in cell " << particle.get_surrounding_cell()
                   << " on process "
                   << Utilities::MPI::this_mpi_process(
-                       tria_shared.get_communicator())
+                       tria_shared.get_mpi_communicator())
                   << std::flush << std::endl;
       }
 
@@ -99,7 +100,7 @@ test()
               << " is in cell " << particle.get_surrounding_cell()
               << " on process "
               << Utilities::MPI::this_mpi_process(
-                   tria_shared.get_communicator())
+                   tria_shared.get_mpi_communicator())
               << std::flush << std::endl;
 
     // Move all points up by 0.5. This will change cell for particle 1 and will
@@ -116,7 +117,7 @@ test()
               << " is in cell " << particle.get_surrounding_cell()
               << " on process "
               << Utilities::MPI::this_mpi_process(
-                   tria_shared.get_communicator())
+                   tria_shared.get_mpi_communicator())
               << std::flush << std::endl;
   }
 
index 22c6051a2d5c96242b919be30af66d114aac0b62..29f541d255c8884b9fce3915f0a16c2374d4d9db 100644 (file)
@@ -50,7 +50,7 @@ test()
 
   std::vector<Point<spacedim>> position(1);
 
-  if (Utilities::MPI::this_mpi_process(tr.get_communicator()) == 0)
+  if (Utilities::MPI::this_mpi_process(tr.get_mpi_communicator()) == 0)
     for (unsigned int i = 0; i < dim; ++i)
       position[0][i] = 2;
 
index 2c82b1d094d5cb2d777f3f5bc4c18ec162fa8c5f..8ec7ab301356b1a6c4bd2bfa44727f385d5c84eb 100644 (file)
@@ -53,7 +53,7 @@ test()
       GridTools::compute_mesh_predicate_bounding_box(
         triangulation, IteratorFilters::LocallyOwnedCell());
     const auto global_bounding_boxes =
-      Utilities::MPI::all_gather(triangulation.get_communicator(),
+      Utilities::MPI::all_gather(triangulation.get_mpi_communicator(),
                                  local_bounding_box);
 
     Particles::Generators::quadrature_points(triangulation,
index 00028fc67c3ebd8787d72a01d2080d22a1a84b49..ad29c9cdb6fa58da8b527caaeb77c87ac1fba490 100644 (file)
@@ -61,7 +61,7 @@ create_partitioner(const DoFHandler<dim, spacedim> &dof_handler)
   return std::make_shared<const Utilities::MPI::Partitioner>(
     dof_handler.locally_owned_dofs(),
     locally_relevant_dofs,
-    dof_handler.get_communicator());
+    dof_handler.get_mpi_communicator());
 }
 
 
index 0e107437636bde414b50740d273b35b9bd255197..01d8588cda55173b15ee7e365c69c03410fa1333 100644 (file)
@@ -61,7 +61,7 @@ create_partitioner(const DoFHandler<dim, spacedim> &dof_handler)
   return std::make_shared<const Utilities::MPI::Partitioner>(
     dof_handler.locally_owned_dofs(),
     locally_relevant_dofs,
-    dof_handler.get_communicator());
+    dof_handler.get_mpi_communicator());
 }
 
 
index 202137ac4029cf2d7d6cef93fd761c0b7f65812c..0414720b256c9f4decf4486ba07d85072848b1f3 100644 (file)
@@ -65,7 +65,7 @@ test()
 
   // gather bounding boxes of other processes
   const auto global_bboxes =
-    Utilities::MPI::all_gather(tria.get_communicator(), local_reduced_box);
+    Utilities::MPI::all_gather(tria.get_mpi_communicator(), local_reduced_box);
 
   const GridTools::Cache<dim> cache(tria, mapping);
 
index d2bc96003743edf3fb53f2acfa69a563986c5db8..4177da89074650cc4994d8d815054325516207ab 100644 (file)
@@ -58,7 +58,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index f624f5cb16a82d3e2ce76f2c5684e539530e6615..6aed62cc2e32d8ee116212f244dece8eb406d257 100644 (file)
@@ -93,7 +93,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index 6738ea05be53853b5c18a7f62403b5cb0b178d3c..0cdaeaa66fcb8e4e5582f11273e1dbc023a70772 100644 (file)
@@ -76,7 +76,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index 71fc6784698c5e04c6138dab023ee2c7ce4cb11b..25c3e20900d6a449061b948e6f2b7347489dfcbc 100644 (file)
@@ -83,7 +83,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index 56132303c993ba9f0d2b3fdbc5c0adc747d62b8b..9de196f4a5b0295f7aa39084da8e3c6178af50d1 100644 (file)
@@ -56,7 +56,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index de116cae7cfbc7da8c444012daa6d3de5d198567..039c2cd1178d42b969b06ac44613998a800a7a4b 100644 (file)
@@ -52,7 +52,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index 3a4f63a995d877dd2fccacf2056da30ef0f80f48..f4149e52f4a6a75ab788c755ecaeadef08602c37 100644 (file)
@@ -51,7 +51,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index b0f944f2b7028ca0d90b0361ec1c9e0397a91f9f..34e152626c0bc51a68f74eb62c650c19924eade8 100644 (file)
@@ -51,7 +51,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index 1001fe7d215b6f1f408aa54ca19adb05245ff9e5..ad0d2f6186298456131e21d6013e1c46ee2ea047 100644 (file)
@@ -52,7 +52,7 @@ create_partitioner(const DoFHandler<dim, spacedim> &dof_handler)
   return std::make_shared<const Utilities::MPI::Partitioner>(
     dof_handler.locally_owned_dofs(),
     locally_relevant_dofs,
-    dof_handler.get_communicator());
+    dof_handler.get_mpi_communicator());
 }
 
 void
index 8f4641baa19f090a161ad8bfeb79a597daaca3a3..8a025768d95229b91b05c7e16d6e1ab4913b0ec9 100644 (file)
@@ -52,7 +52,7 @@ get_mpi_comm(const MeshType &mesh)
                                       MeshType::space_dimension> *>(
     &(mesh.get_triangulation()));
 
-  return tria_parallel != nullptr ? tria_parallel->get_communicator() :
+  return tria_parallel != nullptr ? tria_parallel->get_mpi_communicator() :
                                     MPI_COMM_SELF;
 }
 
index 521be682b3fb01561bb99b91e131e45d18a36a96..aa45fa3442d0828cfa7f48cdad793770195e2e62 100644 (file)
@@ -93,7 +93,7 @@ test(const unsigned int fes_size,
   for (const auto &cell :
        dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
     count[cell->active_fe_index()]++;
-  Utilities::MPI::sum(count, tria.get_communicator(), count);
+  Utilities::MPI::sum(count, tria.get_mpi_communicator(), count);
   deallog << "fe count:" << count << std::endl;
 
 #ifdef DEBUG
index d3ba5b96851da321493dec54d3fb10b62656f427..fc7093df88cc7d2231e4db79439375d2be03323c 100644 (file)
@@ -98,7 +98,7 @@ test(const unsigned int fes_size,
       for (const auto &cell :
            dofh.active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
         count[cell->active_fe_index()]++;
-      Utilities::MPI::sum(count, tria.get_communicator(), count);
+      Utilities::MPI::sum(count, tria.get_mpi_communicator(), count);
       deallog << "cycle:" << i << ", fe count:" << count << std::endl;
     }
 
index c48e5f591f1ec93acbc354dec1dff043aeb39e06..43e0276ea3de607c3ce50626ad68d13a18640b0e 100644 (file)
@@ -87,7 +87,7 @@ get_communicator(const Triangulation<dim, spacedim> &tria)
 {
   if (auto tria_ =
         dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(&tria))
-    return tria_->get_communicator();
+    return tria_->get_mpi_communicator();
 
   return MPI_COMM_SELF;
 }
index d93813771be2869fcd1ee5f7144cda0f285c414b..35017bea1c68c3ef13fe14632e9858b694bed69d 100644 (file)
@@ -346,7 +346,7 @@ test_destroy()
 template <typename VectorType,
           std::enable_if_t<is_serial_vector<VectorType>::value, int> = 0>
 void
-test_get_communicator()
+test_get_mpi_communicator()
 {
   auto vector   = create_test_vector<VectorType>();
   auto n_vector = make_nvector_view(vector
@@ -368,7 +368,7 @@ test_get_communicator()
 template <typename VectorType,
           std::enable_if_t<!is_serial_vector<VectorType>::value, int> = 0>
 void
-test_get_communicator()
+test_get_mpi_communicator()
 {
   auto vector   = create_test_vector<VectorType>();
   auto n_vector = make_nvector_view(vector
@@ -1081,7 +1081,7 @@ run_all_tests(const std::string &prefix)
   // test vector operations
   test_clone<VectorType>();
   test_destroy<VectorType>();
-  test_get_communicator<VectorType>();
+  test_get_mpi_communicator<VectorType>();
   test_length<VectorType>();
   test_linear_sum<VectorType>();
   test_linear_combination<VectorType>();
index 6a1ca4d69d4fb4a133b7453fa40628efbc631ff5..efef714ca2897d3e22a8dc728f7ebbad3dd8f283 100644 (file)
@@ -56,7 +56,7 @@ reinit_vector(const dealii::DoFHandler<dim, spacedim>      &mg_dof,
   for (unsigned int level = v.min_level(); level <= v.max_level(); ++level)
     {
       v[level].reinit(mg_dof.locally_owned_mg_dofs(level),
-                      tria->get_communicator());
+                      tria->get_mpi_communicator());
     }
 }
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.