]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Remove parallel::TriangulationBase::compute_n_locally_owned_active_cells_per_processor 9945/head
authorPeter Munch <peterrmuench@gmail.com>
Thu, 23 Apr 2020 08:01:38 +0000 (10:01 +0200)
committerPeter Munch <peterrmuench@gmail.com>
Tue, 28 Apr 2020 05:43:36 +0000 (07:43 +0200)
60 files changed:
examples/step-55/step-55.cc
include/deal.II/base/mpi.h
include/deal.II/distributed/tria_base.h
include/deal.II/dofs/dof_handler.h
include/deal.II/hp/dof_handler.h
source/distributed/tria_base.cc
source/dofs/number_cache.cc
tests/distributed_grids/dof_handler_number_cache.cc
tests/distributed_grids/hp_dof_handler_number_cache.cc
tests/dofs/dof_handler_number_cache.cc
tests/dofs/dof_handler_number_cache_02.cc
tests/dofs/dof_handler_number_cache_02.output
tests/gla/mat_04.cc
tests/hp/dof_handler_number_cache.cc
tests/matrix_free/dg_pbc_01.cc
tests/mpi/cell_weights_01.cc
tests/mpi/cell_weights_01_back_and_forth_01.cc
tests/mpi/cell_weights_01_back_and_forth_02.cc
tests/mpi/cell_weights_02.cc
tests/mpi/cell_weights_03.cc
tests/mpi/cell_weights_04.cc
tests/mpi/cell_weights_05.cc
tests/mpi/cell_weights_06.cc
tests/mpi/constraints_consistent_01.cc
tests/mpi/dof_handler_number_cache.cc
tests/mpi/hp_step-40.cc
tests/mpi/hp_step-40_variable_01.cc
tests/mpi/mg_02.cc
tests/mpi/p4est_2d_dofhandler_01.cc
tests/mpi/p4est_2d_dofhandler_02.cc
tests/mpi/p4est_2d_dofhandler_03.cc
tests/mpi/p4est_2d_dofhandler_04.cc
tests/mpi/p4est_2d_renumber_02.cc
tests/mpi/p4est_data_out_01.cc
tests/mpi/p4est_get_subdomain_association.cc
tests/mpi/periodicity_01.cc
tests/mpi/periodicity_02.cc
tests/mpi/periodicity_03.cc
tests/mpi/periodicity_04.cc
tests/mpi/periodicity_06.cc
tests/mpi/periodicity_07.cc
tests/mpi/renumber_cuthill_mckee.cc
tests/mpi/renumber_cuthill_mckee_02.cc
tests/mpi/step-40.cc
tests/mpi/step-40_cuthill_mckee.cc
tests/mpi/step-40_cuthill_mckee_MPI-subset.cc
tests/mpi/step-40_direct_solver.cc
tests/sharedtria/dof_01.cc
tests/sharedtria/dof_02.cc
tests/sharedtria/dof_03.cc
tests/sharedtria/dof_04.cc
tests/sharedtria/dof_05.cc
tests/sharedtria/dof_06.cc
tests/sharedtria/hp_dof_01.cc
tests/sharedtria/hp_dof_02.cc
tests/sharedtria/hp_dof_03.cc
tests/sharedtria/hp_dof_04.cc
tests/sharedtria/hp_no_cells_01.cc
tests/sharedtria/mg_dof_02.cc
tests/sharedtria/no_cells_01.cc

index 419ad7cfd61dae3bf155604401fea917a1baa0b7..ebc29436fa7e8a89d07d4501839514db000be6b2 100644 (file)
@@ -456,7 +456,8 @@ namespace Step55
         dof_handler, coupling, dsp, constraints, false);
       SparsityTools::distribute_sparsity_pattern(
         dsp,
-        dof_handler.compute_locally_owned_dofs_per_processor(),
+        Utilities::MPI::all_gather(mpi_communicator,
+                                   dof_handler.locally_owned_dofs()),
         mpi_communicator,
         locally_relevant_dofs);
       preconditioner_matrix.reinit(owned_partitioning,
index 589260d15cc1a8f61c34642c5216bedaa3bd7de0..3c23e0f1fd81a15672b1b5aa28d73de6281da5e6 100644 (file)
@@ -1200,6 +1200,9 @@ namespace Utilities
     std::vector<T>
     all_gather(const MPI_Comm &comm, const T &object)
     {
+      if (job_supports_mpi() == false)
+        return {object};
+
 #  ifndef DEAL_II_WITH_MPI
       (void)comm;
       std::vector<T> v(1, object);
index 6b6efd4b3889fc1ac4cfa0f2eb1d2b0782106735..4b9bcb034107faa70ea97b21502416c0f703cbba 100644 (file)
@@ -114,17 +114,6 @@ namespace parallel
     copy_triangulation(
       const dealii::Triangulation<dim, spacedim> &old_tria) override;
 
-    /**
-     * Return the number of active cells owned by each of the MPI processes
-     * that contribute to this triangulation. The element of this vector
-     * indexed by locally_owned_subdomain() equals the result of
-     * n_locally_owned_active_cells().
-     *
-     * @note This function involves global communication!
-     */
-    std::vector<unsigned int>
-    compute_n_locally_owned_active_cells_per_processor() const;
-
     /**
      * Return the number of active cells in the triangulation that are locally
      * owned, i.e. that have a subdomain_id equal to
index db99c0dd48a6c674ff1e90bc891e46689f1daaeb..2243ee16da0f668238d4cbb706cc323f2cf3190e 100644 (file)
@@ -1014,78 +1014,17 @@ public:
   const IndexSet &
   locally_owned_mg_dofs(const unsigned int level) const;
 
-  /**
-   * Compute a vector with the locally owned DoFs of each processor.
-   *
-   * This function involves global communication via the @p MPI_Allgather
-   * function, so it must be called on all processors participating in the MPI
-   * communicator underlying the triangulation.
-   *
-   * If you are only interested in the number of elements each processor owns
-   * then compute_n_locally_owned_dofs_per_processor() is a better choice.
-   *
-   * If this is a sequential DoFHandler, then the vector has a single element
-   * that equals the IndexSet representing the entire range [0,n_dofs()]. (Here,
-   * "sequential" means that either the whole program does not use MPI, or that
-   * it uses MPI but only uses a single MPI process, or that there are multiple
-   * MPI processes but the Triangulation on which this DoFHandler builds works
-   * only on one MPI process.)
-   */
-  std::vector<IndexSet>
-  compute_locally_owned_dofs_per_processor() const;
-
-  /**
-   * Compute a vector with the number of degrees of freedom each
-   * processor that participates in this triangulation owns locally. The sum
-   * of all these numbers equals the number of degrees of freedom that exist
-   * globally, i.e. what n_dofs() returns.
-   *
-   * This function involves global communication via the @p MPI_Allgather
-   * function, so it must be called on all processors participating in the MPI
-   * communicator underlying the triangulation.
-   *
-   * Each element of the vector returned by this function equals the number of
-   * elements of the corresponding sets returned by
-   * compute_locally_owned_dofs_per_processor().
-   *
-   * If this is a sequential DoFHandler, then the vector has a single element
-   * equal to n_dofs(). (Here, "sequential" means that either the whole program
-   * does not use MPI, or that it uses MPI but only uses a single MPI process,
-   * or that there are multiple MPI processes but the Triangulation on which
-   * this DoFHandler builds works only on one MPI process.)
-   */
-  std::vector<types::global_dof_index>
-  compute_n_locally_owned_dofs_per_processor() const;
-
-  /**
-   * Compute a vector with the locally owned DoFs of each processor on
-   * the given level @p level for geometric multigrid.
-   *
-   * This function involves global communication via the @p MPI_Allgather
-   * function, so it must be called on all processors participating in the MPI
-   * communicator underlying the triangulation.
-   *
-   * If this is a sequential DoFHandler, then the vector has a single element
-   * that equals the IndexSet representing the entire range [0,n_dofs()]. (Here,
-   * "sequential" means that either the whole program does not use MPI, or that
-   * it uses MPI but only uses a single MPI process, or that there are multiple
-   * MPI processes but the Triangulation on which this DoFHandler builds works
-   * only on one MPI process.)
-   */
-  std::vector<IndexSet>
-  compute_locally_owned_mg_dofs_per_processor(const unsigned int level) const;
-
   /**
    * Return a vector that stores the locally owned DoFs of each processor.
    *
    * @deprecated As of deal.II version 9.2, we do not populate a vector with
    * the index sets of all processors by default any more due to a possibly
    * large memory footprint on many processors. As a consequence, this
-   * function needs to call compute_locally_owned_dofs_per_processor() upon
-   * the first invocation, including global communication. Use
-   * compute_locally_owned_dofs_per_processor() instead if using up to a few
-   * thousands of MPI ranks or some variant involving local communication with
-   * more processors.
+   * function needs to call `Utilities::all_gather(comm, locally_owned_dofs())`
+   * upon the first invocation, including global communication. Use
+   * `Utilities::all_gather(comm, dof_handler.locally_owned_dofs())` instead if
+   * using up to a few thousands of MPI ranks or some variant involving local
+   * communication with more processors.
    */
   DEAL_II_DEPRECATED const std::vector<IndexSet> &
                            locally_owned_dofs_per_processor() const;
@@ -1099,11 +1038,12 @@ public:
    * @deprecated As of deal.II version 9.2, we do not populate a vector with
    * the numbers of dofs of all processors by default any more due to a
    * possibly large memory footprint on many processors. As a consequence,
-   * this function needs to call compute_n_locally_owned_dofs_per_processor()
-   * upon the first invocation, including global communication. Use
-   * compute_n_locally_owned_dofs_per_processor() instead if using up to a few
-   * thousands of MPI ranks or some variant involving local communication with
-   * more processors.
+   * this function needs to call `Utilities::all_gather(comm,
+   * n_locally_owned_dofs()` upon the first invocation, including global
+   * communication. Use `Utilities::all_gather(comm,
+   * dof_handler.n_locally_owned_dofs()` instead if using up to a few thousands
+   * of MPI ranks or some variant involving local communication with more
+   * processors.
    */
   DEAL_II_DEPRECATED const std::vector<types::global_dof_index> &
                            n_locally_owned_dofs_per_processor() const;
@@ -1115,9 +1055,10 @@ public:
    * @deprecated As of deal.II version 9.2, we do not populate a vector with
    * the index sets of all processors by default any more due to a possibly
    * large memory footprint on many processors. As a consequence, this
-   * function needs to call compute_locally_owned_dofs_mg_per_processor() upon
-   * the first invocation, including global communication. Use
-   * compute_locally_owned_mg_dofs_per_processor() instead if using up to a few
+   * function needs to call `Utilities::all_gather(comm,
+   * locally_owned_dofs_mg())` upon the first invocation, including global
+   * communication. Use `Utilities::all_gather(comm,
+   * dof_handler.locally_owned_dofs_mg())` instead if using up to a few
    * thousands of MPI ranks or some variant involving local communication with
    * more processors.
    */
@@ -1508,10 +1449,20 @@ DoFHandler<dim, spacedim>::n_locally_owned_dofs_per_processor() const
   if (number_cache.n_locally_owned_dofs_per_processor.empty() &&
       number_cache.n_global_dofs > 0)
     {
+      MPI_Comm comm;
+
+      const parallel::TriangulationBase<dim, spacedim> *tr =
+        (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+          &this->get_triangulation()));
+      if (tr != nullptr)
+        comm = tr->get_communicator();
+      else
+        comm = MPI_COMM_SELF;
+
       const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
         number_cache)
         .n_locally_owned_dofs_per_processor =
-        compute_n_locally_owned_dofs_per_processor();
+        number_cache.get_n_locally_owned_dofs_per_processor(comm);
     }
   return number_cache.n_locally_owned_dofs_per_processor;
 }
@@ -1525,10 +1476,20 @@ DoFHandler<dim, spacedim>::locally_owned_dofs_per_processor() const
   if (number_cache.locally_owned_dofs_per_processor.empty() &&
       number_cache.n_global_dofs > 0)
     {
+      MPI_Comm comm;
+
+      const parallel::TriangulationBase<dim, spacedim> *tr =
+        (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+          &this->get_triangulation()));
+      if (tr != nullptr)
+        comm = tr->get_communicator();
+      else
+        comm = MPI_COMM_SELF;
+
       const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
         number_cache)
         .locally_owned_dofs_per_processor =
-        compute_locally_owned_dofs_per_processor();
+        number_cache.get_locally_owned_dofs_per_processor(comm);
     }
   return number_cache.locally_owned_dofs_per_processor;
 }
@@ -1550,73 +1511,26 @@ DoFHandler<dim, spacedim>::locally_owned_mg_dofs_per_processor(
   if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() &&
       mg_number_cache[level].n_global_dofs > 0)
     {
+      MPI_Comm comm;
+
+      const parallel::TriangulationBase<dim, spacedim> *tr =
+        (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+          &this->get_triangulation()));
+      if (tr != nullptr)
+        comm = tr->get_communicator();
+      else
+        comm = MPI_COMM_SELF;
+
       const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
         mg_number_cache[level])
         .locally_owned_dofs_per_processor =
-        compute_locally_owned_mg_dofs_per_processor(level);
+        mg_number_cache[level].get_locally_owned_dofs_per_processor(comm);
     }
   return mg_number_cache[level].locally_owned_dofs_per_processor;
 }
 
 
 
-template <int dim, int spacedim>
-std::vector<types::global_dof_index>
-DoFHandler<dim, spacedim>::compute_n_locally_owned_dofs_per_processor() const
-{
-  const parallel::TriangulationBase<dim, spacedim> *tr =
-    (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
-      &this->get_triangulation()));
-  if (tr != nullptr)
-    return number_cache.get_n_locally_owned_dofs_per_processor(
-      tr->get_communicator());
-  else
-    return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF);
-}
-
-
-
-template <int dim, int spacedim>
-std::vector<IndexSet>
-DoFHandler<dim, spacedim>::compute_locally_owned_dofs_per_processor() const
-{
-  const parallel::TriangulationBase<dim, spacedim> *tr =
-    (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
-      &this->get_triangulation()));
-  if (tr != nullptr)
-    return number_cache.get_locally_owned_dofs_per_processor(
-      tr->get_communicator());
-  else
-    return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF);
-}
-
-
-
-template <int dim, int spacedim>
-std::vector<IndexSet>
-DoFHandler<dim, spacedim>::compute_locally_owned_mg_dofs_per_processor(
-  const unsigned int level) const
-{
-  Assert(level < this->get_triangulation().n_global_levels(),
-         ExcMessage("The given level index exceeds the number of levels "
-                    "present in the triangulation"));
-  Assert(
-    mg_number_cache.size() == this->get_triangulation().n_global_levels(),
-    ExcMessage(
-      "The level dofs are not set up properly! Did you call distribute_mg_dofs()?"));
-  const parallel::TriangulationBase<dim, spacedim> *tr =
-    (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
-      &this->get_triangulation()));
-  if (tr != nullptr)
-    return mg_number_cache[level].get_locally_owned_dofs_per_processor(
-      tr->get_communicator());
-  else
-    return mg_number_cache[level].get_locally_owned_dofs_per_processor(
-      MPI_COMM_SELF);
-}
-
-
-
 template <int dim, int spacedim>
 inline const FiniteElement<dim, spacedim> &
 DoFHandler<dim, spacedim>::get_fe(const unsigned int index) const
index a6514f9447513e86b4edf503ffb8ae604f2941d5..4f7dad905803c4f8001692b36d609c148c8f3e18 100644 (file)
@@ -821,60 +821,18 @@ namespace hp
     const IndexSet &
     locally_owned_dofs() const;
 
-    /**
-     * Compute a vector with the locally owned DoFs of each processor.
-     *
-     * This function involves global communication via the @p MPI_Allgather
-     * function, so it must be called on all processors participating in the MPI
-     * communicator underlying the triangulation.
-     *
-     * If you are only interested in the number of elements each processor owns
-     * then compute_n_locally_owned_dofs_per_processor() is a better choice.
-     *
-     * If this is a sequential DoFHandler, then the vector has a single element
-     * that equals the IndexSet representing the entire range [0,n_dofs()].
-     * (Here, "sequential" means that either the whole program does not use MPI,
-     * or that it uses MPI but only uses a single MPI process, or that there are
-     * multiple MPI processes but the Triangulation on which this DoFHandler
-     * builds works only on one MPI process.)
-     */
-    std::vector<IndexSet>
-    compute_locally_owned_dofs_per_processor() const;
-
-    /**
-     * Compute a vector with the number of degrees of freedom each
-     * processor that participates in this triangulation owns locally. The sum
-     * of all these numbers equals the number of degrees of freedom that exist
-     * globally, i.e. what n_dofs() returns.
-     *
-     * This function involves global communication via the @p MPI_Allgather
-     * function, so it must be called on all processors participating in the MPI
-     * communicator underlying the triangulation.
-     *
-     * Each element of the vector returned by this function equals the number of
-     * elements of the corresponding sets returned by
-     * compute_locally_owned_dofs_per_processor().
-     *
-     * If this is a sequential DoFHandler, then the vector has a single element
-     * equal to n_dofs(). (Here, "sequential" means that either the whole
-     * program does not use MPI, or that it uses MPI but only uses a single MPI
-     * process, or that there are multiple MPI processes but the Triangulation
-     * on which this DoFHandler builds works only on one MPI process.)
-     */
-    std::vector<types::global_dof_index>
-    compute_n_locally_owned_dofs_per_processor() const;
-
     /**
      * Return a vector that stores the locally owned DoFs of each processor.
      *
      * @deprecated As of deal.II version 9.2, we do not populate a vector with
      * the index sets of all processors by default any more due to a possibly
      * large memory footprint on many processors. As a consequence, this
-     * function needs to call compute_locally_owned_dofs_per_processor() upon
-     * the first invocation, including global communication. Use
-     * compute_locally_owned_dofs_per_processor() instead if using up to a few
-     * thousands of MPI ranks or some variant involving local communication with
-     * more processors.
+     * function needs to call `Utilities::all_gather(comm,
+     * locally_owned_dofs())` upon the first invocation, including global
+     * communication. Use `Utilities::all_gather(comm,
+     * dof_handler.locally_owned_dofs())` instead if using up to a few thousands
+     * of MPI ranks or some variant involving local communication with more
+     * processors.
      */
     DEAL_II_DEPRECATED const std::vector<IndexSet> &
                              locally_owned_dofs_per_processor() const;
@@ -888,9 +846,10 @@ namespace hp
      * @deprecated As of deal.II version 9.2, we do not populate a vector with
      * the numbers of dofs of all processors by default any more due to a
      * possibly large memory footprint on many processors. As a consequence,
-     * this function needs to call compute_n_locally_owned_dofs_per_processor()
-     * upon the first invocation, including global communication. Use
-     * compute_n_locally_owned_dofs_per_processor() instead if using up to a few
+     * this function needs to call `Utilities::all_gather(comm,
+     * n_locally_owned_dofs()` upon the first invocation, including global
+     * communication. Use `Utilities::all_gather(comm,
+     * dof_handler.n_locally_owned_dofs()` instead if using up to a few
      * thousands of MPI ranks or some variant involving local communication with
      * more processors.
      */
@@ -906,24 +865,6 @@ namespace hp
     const IndexSet &
     locally_owned_mg_dofs(const unsigned int level) const;
 
-    /**
-     * Compute a vector with the locally owned DoFs of each processor on
-     * the given level @p level for geometric multigrid.
-     *
-     * This function involves global communication via the @p MPI_Allgather
-     * function, so it must be called on all processors participating in the MPI
-     * communicator underlying the triangulation.
-     *
-     * If this is a sequential DoFHandler, then the vector has a single element
-     * that equals the IndexSet representing the entire range [0,n_dofs()].
-     * (Here, "sequential" means that either the whole program does not use MPI,
-     * or that it uses MPI but only uses a single MPI process, or that there are
-     * multiple MPI processes but the Triangulation on which this DoFHandler
-     * builds works only on one MPI process.)
-     */
-    std::vector<IndexSet>
-    compute_locally_owned_mg_dofs_per_processor(const unsigned int level) const;
-
     /**
      * Return a vector that stores the locally owned DoFs of each processor on
      * the given level @p level.
@@ -931,11 +872,12 @@ namespace hp
      * @deprecated As of deal.II version 9.2, we do not populate a vector with
      * the index sets of all processors by default any more due to a possibly
      * large memory footprint on many processors. As a consequence, this
-     * function needs to call compute_locally_owned_dofs_mg_per_processor() upon
-     * the first invocation, including global communication. Use
-     * compute_locally_owned_mg_dofs_per_processor() instead if using up to a
-     * few thousands of MPI ranks or some variant involving local communication
-     * with more processors.
+     * function needs to call `Utilities::all_gather(comm,
+     * locally_owned_dofs_mg())` upon the first invocation, including global
+     * communication. Use `Utilities::all_gather(comm,
+     * dof_handler.locally_owned_dofs_mg())` instead if using up to a few
+     * thousands of MPI ranks or some variant involving local communication with
+     * more processors.
      */
     DEAL_II_DEPRECATED const std::vector<IndexSet> &
                              locally_owned_mg_dofs_per_processor(const unsigned int level) const;
@@ -1573,10 +1515,20 @@ namespace hp
     if (number_cache.n_locally_owned_dofs_per_processor.empty() &&
         number_cache.n_global_dofs > 0)
       {
+        MPI_Comm comm;
+
+        const parallel::TriangulationBase<dim, spacedim> *tr =
+          (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+            &this->get_triangulation()));
+        if (tr != nullptr)
+          comm = tr->get_communicator();
+        else
+          comm = MPI_COMM_SELF;
+
         const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
           number_cache)
           .n_locally_owned_dofs_per_processor =
-          compute_n_locally_owned_dofs_per_processor();
+          number_cache.get_n_locally_owned_dofs_per_processor(comm);
       }
     return number_cache.n_locally_owned_dofs_per_processor;
   }
@@ -1590,48 +1542,26 @@ namespace hp
     if (number_cache.locally_owned_dofs_per_processor.empty() &&
         number_cache.n_global_dofs > 0)
       {
+        MPI_Comm comm;
+
+        const parallel::TriangulationBase<dim, spacedim> *tr =
+          (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+            &this->get_triangulation()));
+        if (tr != nullptr)
+          comm = tr->get_communicator();
+        else
+          comm = MPI_COMM_SELF;
+
         const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
           number_cache)
           .locally_owned_dofs_per_processor =
-          compute_locally_owned_dofs_per_processor();
+          number_cache.get_locally_owned_dofs_per_processor(comm);
       }
     return number_cache.locally_owned_dofs_per_processor;
   }
 
 
 
-  template <int dim, int spacedim>
-  std::vector<types::global_dof_index>
-  DoFHandler<dim, spacedim>::compute_n_locally_owned_dofs_per_processor() const
-  {
-    const parallel::TriangulationBase<dim, spacedim> *tr =
-      (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
-        &this->get_triangulation()));
-    if (tr != nullptr)
-      return number_cache.get_n_locally_owned_dofs_per_processor(
-        tr->get_communicator());
-    else
-      return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF);
-  }
-
-
-
-  template <int dim, int spacedim>
-  std::vector<IndexSet>
-  DoFHandler<dim, spacedim>::compute_locally_owned_dofs_per_processor() const
-  {
-    const parallel::TriangulationBase<dim, spacedim> *tr =
-      (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
-        &this->get_triangulation()));
-    if (tr != nullptr)
-      return number_cache.get_locally_owned_dofs_per_processor(
-        tr->get_communicator());
-    else
-      return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF);
-  }
-
-
-
   template <int dim, int spacedim>
   const IndexSet &
   DoFHandler<dim, spacedim>::locally_owned_mg_dofs(
@@ -1662,39 +1592,26 @@ namespace hp
     if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() &&
         mg_number_cache[level].n_global_dofs > 0)
       {
+        MPI_Comm comm;
+
+        const parallel::TriangulationBase<dim, spacedim> *tr =
+          (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+            &this->get_triangulation()));
+        if (tr != nullptr)
+          comm = tr->get_communicator();
+        else
+          comm = MPI_COMM_SELF;
+
         const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
           mg_number_cache[level])
           .locally_owned_dofs_per_processor =
-          compute_locally_owned_mg_dofs_per_processor(level);
+          mg_number_cache[level].get_locally_owned_dofs_per_processor(comm);
       }
     return mg_number_cache[level].locally_owned_dofs_per_processor;
   }
 
 
 
-  template <int dim, int spacedim>
-  std::vector<IndexSet>
-  DoFHandler<dim, spacedim>::compute_locally_owned_mg_dofs_per_processor(
-    const unsigned int level) const
-  {
-    Assert(false, ExcNotImplemented());
-    (void)level;
-    Assert(level < this->get_triangulation().n_global_levels(),
-           ExcMessage("The given level index exceeds the number of levels "
-                      "present in the triangulation"));
-    const parallel::TriangulationBase<dim, spacedim> *tr =
-      (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
-        &this->get_triangulation()));
-    if (tr != nullptr)
-      return mg_number_cache[level].get_locally_owned_dofs_per_processor(
-        tr->get_communicator());
-    else
-      return mg_number_cache[level].get_locally_owned_dofs_per_processor(
-        MPI_COMM_SELF);
-  }
-
-
-
   template <int dim, int spacedim>
   inline const FiniteElement<dim, spacedim> &
   DoFHandler<dim, spacedim>::get_fe(const unsigned int number) const
index beecfd94862b1acad6d0fb038b02c60721337721..1a27650ed58403b5a1ffaa0ad2131323b0125b64 100644 (file)
@@ -133,35 +133,6 @@ namespace parallel
     return number_cache.n_global_active_cells;
   }
 
-  template <int dim, int spacedim>
-  std::vector<unsigned int>
-  TriangulationBase<dim, spacedim>::
-    compute_n_locally_owned_active_cells_per_processor() const
-  {
-    ;
-#ifdef DEAL_II_WITH_MPI
-    std::vector<unsigned int> n_locally_owned_active_cells_per_processor(
-      Utilities::MPI::n_mpi_processes(this->mpi_communicator), 0);
-
-    if (this->n_levels() > 0)
-      {
-        const int ierr =
-          MPI_Allgather(&number_cache.n_locally_owned_active_cells,
-                        1,
-                        MPI_UNSIGNED,
-                        n_locally_owned_active_cells_per_processor.data(),
-                        1,
-                        MPI_UNSIGNED,
-                        this->mpi_communicator);
-        AssertThrowMPI(ierr);
-      }
-
-    return n_locally_owned_active_cells_per_processor;
-#else
-    return {number_cache.n_locally_owned_active_cells};
-#endif
-  }
-
   template <int dim, int spacedim>
   const MPI_Comm &
   TriangulationBase<dim, spacedim>::get_communicator() const
index 42e97076df74cd83d4eca2b5cfbd22c9cf1e7a5c..48528622362017af39ac879881f33e983ca70ea0 100644 (file)
@@ -87,32 +87,20 @@ namespace internal
     NumberCache::get_n_locally_owned_dofs_per_processor(
       const MPI_Comm mpi_communicator) const
     {
-      const unsigned int n_procs =
-        Utilities::MPI::job_supports_mpi() ?
-          Utilities::MPI::n_mpi_processes(mpi_communicator) :
-          1;
       if (n_global_dofs == 0)
         return std::vector<types::global_dof_index>();
       else if (n_locally_owned_dofs_per_processor.empty() == false)
         {
-          AssertDimension(n_locally_owned_dofs_per_processor.size(), n_procs);
+          AssertDimension(n_locally_owned_dofs_per_processor.size(),
+                          (Utilities::MPI::job_supports_mpi() ?
+                             Utilities::MPI::n_mpi_processes(mpi_communicator) :
+                             1));
           return n_locally_owned_dofs_per_processor;
         }
       else
         {
-          std::vector<types::global_dof_index> result(n_procs,
-                                                      n_locally_owned_dofs);
-#ifdef DEAL_II_WITH_MPI
-          if (n_procs > 1)
-            MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs),
-                          1,
-                          DEAL_II_DOF_INDEX_MPI_TYPE,
-                          result.data(),
-                          1,
-                          DEAL_II_DOF_INDEX_MPI_TYPE,
-                          mpi_communicator);
-#endif
-          return result;
+          return Utilities::MPI::all_gather(mpi_communicator,
+                                            n_locally_owned_dofs);
         }
     }
 
@@ -123,108 +111,20 @@ namespace internal
       const MPI_Comm mpi_communicator) const
     {
       AssertDimension(locally_owned_dofs.size(), n_global_dofs);
-      const unsigned int n_procs =
-        Utilities::MPI::job_supports_mpi() ?
-          Utilities::MPI::n_mpi_processes(mpi_communicator) :
-          1;
       if (n_global_dofs == 0)
         return std::vector<IndexSet>();
       else if (locally_owned_dofs_per_processor.empty() == false)
         {
-          AssertDimension(locally_owned_dofs_per_processor.size(), n_procs);
+          AssertDimension(locally_owned_dofs_per_processor.size(),
+                          (Utilities::MPI::job_supports_mpi() ?
+                             Utilities::MPI::n_mpi_processes(mpi_communicator) :
+                             1));
           return locally_owned_dofs_per_processor;
         }
       else
         {
-          std::vector<IndexSet> locally_owned_dofs_per_processor(
-            n_procs, locally_owned_dofs);
-
-#ifdef DEAL_II_WITH_MPI
-          if (n_procs > 1)
-            {
-              // this step is substantially more complicated because indices
-              // might be distributed arbitrarily among the processors. Here we
-              // have to serialize the IndexSet objects and shop them across the
-              // network.
-              std::vector<char> my_data;
-              {
-#  ifdef DEAL_II_WITH_ZLIB
-
-                boost::iostreams::filtering_ostream out;
-                out.push(boost::iostreams::gzip_compressor(
-                  boost::iostreams::gzip_params(
-                    boost::iostreams::gzip::best_speed)));
-                out.push(boost::iostreams::back_inserter(my_data));
-
-                boost::archive::binary_oarchive archive(out);
-
-                archive << locally_owned_dofs;
-                out.flush();
-#  else
-                std::ostringstream              out;
-                boost::archive::binary_oarchive archive(out);
-                archive << locally_owned_dofs;
-                const std::string &s = out.str();
-                my_data.reserve(s.size());
-                my_data.assign(s.begin(), s.end());
-#  endif
-              }
-
-              // determine maximum size of IndexSet
-              const unsigned int max_size =
-                Utilities::MPI::max(my_data.size(), mpi_communicator);
-
-              // as the MPI_Allgather call will be reading max_size elements,
-              // and as this may be past the end of my_data, we need to increase
-              // the size of the local buffer. This is filled with zeros.
-              my_data.resize(max_size);
-
-              std::vector<char> buffer(max_size * n_procs);
-              const int         ierr = MPI_Allgather(my_data.data(),
-                                             max_size,
-                                             MPI_BYTE,
-                                             buffer.data(),
-                                             max_size,
-                                             MPI_BYTE,
-                                             mpi_communicator);
-              AssertThrowMPI(ierr);
-
-              for (unsigned int i = 0; i < n_procs; ++i)
-                if (i == Utilities::MPI::this_mpi_process(mpi_communicator))
-                  locally_owned_dofs_per_processor[i] = locally_owned_dofs;
-                else
-                  {
-                    // copy the data previously received into a stringstream
-                    // object and then read the IndexSet from it
-                    std::string decompressed_buffer;
-
-                    // first decompress the buffer
-                    {
-#  ifdef DEAL_II_WITH_ZLIB
-
-                      boost::iostreams::filtering_ostream decompressing_stream;
-                      decompressing_stream.push(
-                        boost::iostreams::gzip_decompressor());
-                      decompressing_stream.push(
-                        boost::iostreams::back_inserter(decompressed_buffer));
-
-                      decompressing_stream.write(&buffer[i * max_size],
-                                                 max_size);
-#  else
-                      decompressed_buffer.assign(&buffer[i * max_size],
-                                                 max_size);
-#  endif
-                    }
-
-                    // then restore the object from the buffer
-                    std::istringstream              in(decompressed_buffer);
-                    boost::archive::binary_iarchive archive(in);
-
-                    archive >> locally_owned_dofs_per_processor[i];
-                  }
-            }
-#endif
-          return locally_owned_dofs_per_processor;
+          return Utilities::MPI::all_gather(mpi_communicator,
+                                            locally_owned_dofs);
         }
     }
 
index 85626e9bbf4565400be14c78ca452e0050d02566..00c8c5d301cacabd12b8e0d5ee90bbbe687d9aa8 100644 (file)
@@ -99,10 +99,12 @@ test()
 
       AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
       AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError());
-      AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+      AssertThrow(Utilities::MPI::all_gather(
+                    MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) ==
                     std::vector<types::global_dof_index>(1, N),
                   ExcInternalError());
-      AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() ==
+      AssertThrow(Utilities::MPI::all_gather(
+                    MPI_COMM_SELF, dof_handler.locally_owned_dofs()) ==
                     std::vector<IndexSet>(1, all),
                   ExcInternalError());
     }
index c24a60f0c51629394230e7e07695faada84e321d..c8e2089dbb2688e2a8de67b730046bc2fa2bb99f 100644 (file)
@@ -106,10 +106,12 @@ test()
 
       AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
       AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError());
-      AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+      AssertThrow(Utilities::MPI::all_gather(
+                    MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) ==
                     std::vector<types::global_dof_index>(1, N),
                   ExcInternalError());
-      AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() ==
+      AssertThrow(Utilities::MPI::all_gather(
+                    MPI_COMM_SELF, dof_handler.locally_owned_dofs()) ==
                     std::vector<IndexSet>(1, all),
                   ExcInternalError());
     }
index 39c14497bf0770734102171a3511e35e8b2dae0e..da5f2f5a3b6d8372cdef52cb529dc301b849da56 100644 (file)
@@ -96,10 +96,12 @@ test()
 
       AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
       AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError());
-      AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+      AssertThrow(Utilities::MPI::all_gather(
+                    MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) ==
                     std::vector<types::global_dof_index>(1, N),
                   ExcInternalError());
-      AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() ==
+      AssertThrow(Utilities::MPI::all_gather(
+                    MPI_COMM_SELF, dof_handler.locally_owned_dofs()) ==
                     std::vector<IndexSet>(1, all),
                   ExcInternalError());
     }
index a6c9124f8774cda1ae80a3863a48b90e7195aa13..180f651e85b23389c4eb86c631873799add73091 100644 (file)
@@ -95,17 +95,21 @@ test()
 
       Assert(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
       Assert(dof_handler.locally_owned_dofs() == all, ExcInternalError());
-      Assert(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+      Assert(Utilities::MPI::all_gather(MPI_COMM_SELF,
+                                        dof_handler.n_locally_owned_dofs()) ==
                std::vector<types::global_dof_index>(1, N),
              ExcInternalError());
-      Assert(dof_handler.compute_locally_owned_dofs_per_processor() ==
+      Assert(Utilities::MPI::all_gather(MPI_COMM_SELF,
+                                        dof_handler.locally_owned_dofs()) ==
                std::vector<IndexSet>(1, all),
              ExcInternalError());
 
       dof_handler.clear();
       deallog << "those should be zero: " << dof_handler.n_locally_owned_dofs()
               << " "
-              << dof_handler.compute_n_locally_owned_dofs_per_processor().size()
+              << Utilities::MPI::all_gather(MPI_COMM_SELF,
+                                            dof_handler.n_locally_owned_dofs())
+                   .size()
               << " " << dof_handler.n_dofs() << std::endl;
     }
 }
index 5da9c71e964eee24cf6fc7165a7c8b8f9890438f..1da0caa9813d7e4315134a8e63e5f720188e27b6 100644 (file)
@@ -1,19 +1,19 @@
 
 DEAL:1d::50
-DEAL:1d::those should be zero: 0 0 0
+DEAL:1d::those should be zero: 0 1 0
 DEAL:1d::82
-DEAL:1d::those should be zero: 0 0 0
+DEAL:1d::those should be zero: 0 1 0
 DEAL:1d::90
-DEAL:1d::those should be zero: 0 0 0
+DEAL:1d::those should be zero: 0 1 0
 DEAL:1d::90
-DEAL:1d::those should be zero: 0 0 0
+DEAL:1d::those should be zero: 0 1 0
 DEAL:2d::816
-DEAL:2d::those should be zero: 0 0 0
+DEAL:2d::those should be zero: 0 1 0
 DEAL:2d::1264
-DEAL:2d::those should be zero: 0 0 0
+DEAL:2d::those should be zero: 0 1 0
 DEAL:2d::2192
-DEAL:2d::those should be zero: 0 0 0
+DEAL:2d::those should be zero: 0 1 0
 DEAL:3d::13524
-DEAL:3d::those should be zero: 0 0 0
+DEAL:3d::those should be zero: 0 1 0
 DEAL:3d::42768
-DEAL:3d::those should be zero: 0 0 0
+DEAL:3d::those should be zero: 0 1 0
index f69091af485feff4932fd4af9cd58ba16cde5484..3b7d3d9cdf9c3f361902abaa3d95c096f21ad395 100644 (file)
@@ -85,7 +85,8 @@ test()
                                     MPI_COMM_WORLD));
   SparsityTools::distribute_sparsity_pattern(
     sp,
-    dof_handler.compute_n_locally_owned_dofs_per_processor(),
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.n_locally_owned_dofs()),
     MPI_COMM_WORLD,
     relevant);
   sp.compress();
index 07a5813c33b853b632a0cc030c32bf9a498349cd..0e4d55235fa6fc18fc0b8f8e225acebe4e8ecb0b 100644 (file)
@@ -107,10 +107,12 @@ test()
 
       AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
       AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError());
-      AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+      AssertThrow(Utilities::MPI::all_gather(
+                    MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) ==
                     std::vector<types::global_dof_index>(1, N),
                   ExcInternalError());
-      AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() ==
+      AssertThrow(Utilities::MPI::all_gather(
+                    MPI_COMM_SELF, dof_handler.locally_owned_dofs()) ==
                     std::vector<IndexSet>(1, all),
                   ExcInternalError());
     }
index f6ea0ed1008618a026fa3c0e9b7ce2c54607783a..d3842a17cbc2a12e8b4c47fb572bc9e4159d3958 100644 (file)
@@ -96,7 +96,7 @@ test()
   solver.solve(mf, sol, rhs, PreconditionIdentity());
 
   const std::vector<IndexSet> locally_owned_dofs_per_processor =
-    dof.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, dof.locally_owned_dofs());
   // gather all data at root
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     {
index 932e9a28588af6cd49e91c3bc45bf9c22f148843..e8b52acd398e841cc9111dcfe3ce122bc8524821 100644 (file)
@@ -54,7 +54,8 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    tr.compute_n_locally_owned_active_cells_per_processor();
+    Utilities::MPI::all_gather(tr.get_communicator(),
+                               tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
       deallog << "processor " << p << ": "
index 2686080140c18754b3387a3742a1c4b7244aaa9e..305c0deddf2be4624d58d39ef3e1138feb4fccf0 100644 (file)
@@ -88,7 +88,8 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    tr.compute_n_locally_owned_active_cells_per_processor();
+    Utilities::MPI::all_gather(tr.get_communicator(),
+                               tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
       deallog << "processor " << p << ": "
index edbffd902f23f4ea38f1c275298356c5dd0a7c39..e5ab0c5127d7206c3ba2f357a210fb2866f715c1 100644 (file)
@@ -73,7 +73,8 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    tr.compute_n_locally_owned_active_cells_per_processor();
+    Utilities::MPI::all_gather(tr.get_communicator(),
+                               tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
       deallog << "processor " << p << ": "
index 862b0644fc8ed8353985b035a7996c43d816da26..9cf48d36f53c5213a97f49ad25e65cbe994a55b4 100644 (file)
@@ -60,7 +60,8 @@ test()
   tr.refine_global(1);
 
   const auto n_locally_owned_active_cells_per_processor =
-    tr.compute_n_locally_owned_active_cells_per_processor();
+    Utilities::MPI::all_gather(tr.get_communicator(),
+                               tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
       deallog << "processor " << p << ": "
index 5a44ca85eba0ae0c3dae8c4673947b61d455f145..c8cf1ae011fbea4d9a2d2c49165fecbd00d3cb82 100644 (file)
@@ -68,7 +68,8 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    tr.compute_n_locally_owned_active_cells_per_processor();
+    Utilities::MPI::all_gather(tr.get_communicator(),
+                               tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
       deallog << "processor " << p << ": "
index fab4b09040b4e5482b53140bce7715066ffa295c..ee8419f3450494142458dbaf19a0f0d56b106402 100644 (file)
@@ -62,7 +62,8 @@ test()
   tr.refine_global(1);
 
   const auto n_locally_owned_active_cells_per_processor =
-    tr.compute_n_locally_owned_active_cells_per_processor();
+    Utilities::MPI::all_gather(tr.get_communicator(),
+                               tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
       deallog << "processor " << p << ": "
index b0c2bee4d9c5d05bcac462c7396beaa20d8d9331..95fb7f724f49792229b3a788960290a73ac56154 100644 (file)
@@ -81,7 +81,8 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    tr.compute_n_locally_owned_active_cells_per_processor();
+    Utilities::MPI::all_gather(tr.get_communicator(),
+                               tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
       deallog << "processor " << p << ": "
index b57d4f3f102fd74a0d3e33643dd77d1824267f6d..6ecc9ee634534090a72e8d4d7c178be728cf86d5 100644 (file)
@@ -80,7 +80,8 @@ test()
   tr.repartition();
 
   const auto n_locally_owned_active_cells_per_processor =
-    tr.compute_n_locally_owned_active_cells_per_processor();
+    Utilities::MPI::all_gather(tr.get_communicator(),
+                               tr.n_locally_owned_active_cells());
   if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
     for (unsigned int p = 0; p < numproc; ++p)
       deallog << "processor " << p << ": "
index 2dae4d400839f40831739a8fe917600e5b393a19..668bb336c73c65b366dd96400432a80d47d53b1e 100644 (file)
@@ -84,7 +84,8 @@ check(parallel::distributed::Triangulation<dim> &tria)
   constraints.print(deallog.get_file_stream());
   deallog << "consistent? "
           << constraints.is_consistent_in_parallel(
-               dof_handler.compute_locally_owned_dofs_per_processor(),
+               Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                          dof_handler.locally_owned_dofs()),
                locally_active_dofs,
                MPI_COMM_WORLD,
                true)
index 507db4136b1dbd1750e4497bd392754dc9efb51a..df6989aca9648dfda95fdf11862a367cde15f681 100644 (file)
@@ -106,22 +106,22 @@ test()
         deallog << N << std::endl;
 
       Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError());
-      for (unsigned int i = 0;
-           i < dof_handler.compute_n_locally_owned_dofs_per_processor().size();
-           ++i)
-        AssertThrow(
-          dof_handler.compute_n_locally_owned_dofs_per_processor()[i] <= N,
-          ExcInternalError());
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
+      for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size();
+           ++i)
+        AssertThrow(n_locally_owned_dofs_per_processor[i] <= N,
+                    ExcInternalError());
       AssertThrow(std::accumulate(n_locally_owned_dofs_per_processor.begin(),
                                   n_locally_owned_dofs_per_processor.end(),
                                   0U) == N,
                   ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N), really_all(N);
       // poor man's union operation
       for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size();
index 47df172800ff07c60fbd3bded4f3e2e6ca8890ca..0b4a386c40f95d76d1df391e6ddc66e953c6074b 100644 (file)
@@ -165,8 +165,10 @@ namespace Step40
     system_matrix.reinit(
       mpi_communicator,
       csp,
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.n_locally_owned_dofs()),
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.n_locally_owned_dofs()),
       Utilities::MPI::this_mpi_process(mpi_communicator));
   }
 
@@ -326,7 +328,9 @@ namespace Step40
               << triangulation.n_global_active_cells() << std::endl
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
-          triangulation.compute_n_locally_owned_active_cells_per_processor();
+          Utilities::MPI::all_gather(
+            triangulation.get_communicator(),
+            triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
@@ -339,7 +343,8 @@ namespace Step40
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
-          pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+          pcout << Utilities::MPI::all_gather(
+                     MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i]
                 << '+';
         pcout << std::endl;
 
index ccdef1e3ee0252e218d1b3f5c6cc077ef442d0aa..fe592fbf3c81379fa470f5d9c20f5f4f1f00edec 100644 (file)
@@ -171,8 +171,10 @@ namespace Step40
     system_matrix.reinit(
       mpi_communicator,
       csp,
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.n_locally_owned_dofs()),
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.n_locally_owned_dofs()),
       Utilities::MPI::this_mpi_process(mpi_communicator));
   }
 
@@ -329,7 +331,9 @@ namespace Step40
               << triangulation.n_global_active_cells() << std::endl
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
-          triangulation.compute_n_locally_owned_active_cells_per_processor();
+          Utilities::MPI::all_gather(
+            triangulation.get_communicator(),
+            triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
@@ -342,7 +346,8 @@ namespace Step40
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
-          pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+          pcout << Utilities::MPI::all_gather(
+                     MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i]
                 << '+';
         pcout << std::endl;
 
index 5b11bb691e0c53f21ad76f30bb3d3d048e55d027..89a4f744c6ad7aeb10e4b7bc0b7675920c82eb1c 100644 (file)
@@ -96,7 +96,7 @@ test()
 
     const std::vector<types::global_dof_index>
       n_locally_owned_dofs_per_processor =
-        dofh.compute_n_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
     deallog << "n_locally_owned_dofs_per_processor:" << std::endl;
     for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size(); ++i)
       deallog << n_locally_owned_dofs_per_processor[i] << std::endl;
@@ -107,7 +107,8 @@ test()
         deallog << "level " << lvl << ":" << std::endl;
 
         const std::vector<IndexSet> vec =
-          dofh.compute_locally_owned_mg_dofs_per_processor(lvl);
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dofh.locally_owned_mg_dofs(lvl));
 
         for (unsigned int i = 0; i < vec.size(); ++i)
           deallog << vec[i].n_elements() << std::endl;
index 28ebb0fcb551194fab9b159541b4b783abad9e9e..f5cd4dec1a8ef14a91d3410482c849d0ca0632b0 100644 (file)
@@ -57,7 +57,7 @@ test()
 
   const std::vector<types::global_dof_index>
     n_locally_owned_dofs_per_processor =
-      dofh.compute_n_locally_owned_dofs_per_processor();
+      Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
   if (myid == 0)
     {
       deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
index 71df8c5d5b143ece2b6bf2f8a2388b82b31c3f71..489b8f3542429d7e645c956ed1e995be354fff3d 100644 (file)
@@ -90,7 +90,7 @@ test()
       dofh.distribute_dofs(fe);
 
       std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor =
-        dofh.compute_n_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
       if (myid == 0)
         {
           deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
index a0f9e534175b78e2a44e77d2636f5d4a67bc9235..9b2d7c5e39b3651ddcb8fa3fef30b5a83ab29163 100644 (file)
@@ -91,7 +91,7 @@ test()
       dofh.distribute_dofs(fe);
 
       std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor =
-        dofh.compute_n_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
       if (myid == 0)
         {
           deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
index 6d202c2c02c2788fb9df966aa4de4633857cd0d4..63b7a64e50ac782a885ab8fedc7e20eccac904d8 100644 (file)
@@ -91,7 +91,7 @@ test()
       dofh.distribute_dofs(fe);
 
       std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor =
-        dofh.compute_n_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
       if (myid == 0)
         {
           deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
index 87dacefe41496273abe88c1c254b4c1bcb2a9753..2af6c4506dafea3f8e4a5313aa5e16b920f44da8 100644 (file)
@@ -81,7 +81,7 @@ test()
     DoFTools::extract_locally_active_dofs(dofh, dof_set);
 
     const std::vector<IndexSet> owned_dofs =
-      dofh.compute_locally_owned_dofs_per_processor();
+      Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs());
     if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
       {
         dof_set.print(deallog);
index aaa5aa3a028bbab36489829b62e3cda016d12f83..62e95eafd6f6c449a615acbfe9faf020cb7fa7f0 100644 (file)
@@ -75,7 +75,7 @@ test()
   data_out.build_patches();
 
   std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor =
-    dofh.compute_n_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
   if (myid == 0)
     {
       for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size();
index 73d4485a44c140bf17df481a91436b8e602f26f1..9e3aa7f5c252848af164c7b16291389f6b700f63 100644 (file)
@@ -61,7 +61,7 @@ test()
 
   const std::vector<types::global_dof_index>
     n_locally_owned_dofs_per_processor =
-      dofh.compute_n_locally_owned_dofs_per_processor();
+      Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
   if (myid == 1)
     {
       deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
index be0454f1679d910ebb916e2d5e5aa90bc3d66e59..6fab1533c361621f8453247abe41f9013c6a1471 100644 (file)
@@ -166,7 +166,8 @@ namespace Step40
     constraints.close();
 
     const std::vector<IndexSet> &locally_owned_dofs =
-      dof_handler.compute_locally_owned_dofs_per_processor();
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.locally_owned_dofs());
     IndexSet locally_active_dofs;
     DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
     AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs,
@@ -186,8 +187,10 @@ namespace Step40
     system_matrix.reinit(
       mpi_communicator,
       csp,
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.n_locally_owned_dofs()),
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.n_locally_owned_dofs()),
       Utilities::MPI::this_mpi_process(mpi_communicator));
   }
 
index ef7198dfa9c4e207e0813a43ea3a06f51ce200ec..e98eee8f7addddf3cbdb09b584a92cc435f82334 100644 (file)
@@ -394,7 +394,8 @@ namespace Step22
     constraints.close();
 
     const std::vector<IndexSet> &locally_owned_dofs =
-      dof_handler.compute_locally_owned_dofs_per_processor();
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.locally_owned_dofs());
     IndexSet locally_active_dofs;
     DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
     AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs,
index 3898e35009349f2a5be59209f2eb542239719277..a7c0cb4240ec69162a42c7ecaba229afc53fa594 100644 (file)
@@ -324,7 +324,8 @@ namespace Step22
     constraints.close();
 
     const std::vector<IndexSet> &locally_owned_dofs =
-      dof_handler.compute_locally_owned_dofs_per_processor();
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.locally_owned_dofs());
     IndexSet locally_active_dofs;
     DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
     AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs,
index c5b32fff20da79d8055598d0f559a7061bff0487..56cd78cfba78346dda67a07abf9b1dcbf2482d33 100644 (file)
@@ -205,7 +205,8 @@ check(const unsigned int orientation, bool reverse)
   constraints.print(deallog.get_file_stream());
 
   const std::vector<IndexSet> locally_owned_dofs_vector =
-    dof_handler.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.locally_owned_dofs());
   IndexSet locally_active_dofs;
   DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
   AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs_vector,
index 5782c1c907bc475ad2bef7b50b748d32b08c6f5e..5698c5172181ea5536e8420c45cca61294ce4d9c 100644 (file)
@@ -188,7 +188,8 @@ test(const unsigned numRefinementLevels = 2)
   constraints.close();
 
   const std::vector<IndexSet> &locally_owned_dofs =
-    dof_handler.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.locally_owned_dofs());
   IndexSet locally_active_dofs;
   DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
   AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs,
index 2a1bf187eaf0fead2efd668a871d89d8334a6e0e..638526a31ec80cfdfe97421a6ec08330d31ce68f 100644 (file)
@@ -139,7 +139,8 @@ test(const unsigned numRefinementLevels = 2)
   DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
 
   const std::vector<IndexSet> locally_owned_dofs =
-    dof_handler.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.locally_owned_dofs());
 
   std::map<types::global_dof_index, Point<dim>> supportPoints;
   DoFTools::map_dofs_to_support_points(MappingQ1<dim>(),
index b4ef68f8fcfeb27ca0e6c4ce613065d18aa503f6..aae1916677aff6bcc8ec88d006d558e61a1a26bd 100644 (file)
@@ -76,7 +76,7 @@ test()
             complete_renumbering.begin());
   unsigned int                offset = renumbering.size();
   const std::vector<IndexSet> dofs_per_proc =
-    dofh.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs());
   for (unsigned int i = 1; i < nprocs; ++i)
     {
       if (myid == i)
index a8c4c356f7d69d4187c983570565ffb0aa49c190..ca3068cae10160d6bd834ae2b609a8a4e26713fc 100644 (file)
@@ -79,7 +79,7 @@ test()
             complete_renumbering.begin());
   unsigned int                offset = renumbering.size();
   const std::vector<IndexSet> locally_owned_dofs_per_processor =
-    dofh.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs());
   for (unsigned int i = 1; i < nprocs; ++i)
     {
       if (myid == i)
index 200f282abaa171cc2d3d8f20d499a3e588eaeeee..85e669cf458953aba393de6dd77e4f50653f5ecf 100644 (file)
@@ -159,8 +159,10 @@ namespace Step40
     system_matrix.reinit(
       mpi_communicator,
       csp,
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.n_locally_owned_dofs()),
+      Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                 dof_handler.n_locally_owned_dofs()),
       Utilities::MPI::this_mpi_process(mpi_communicator));
   }
 
@@ -311,7 +313,9 @@ namespace Step40
               << triangulation.n_global_active_cells() << std::endl
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
-          triangulation.compute_n_locally_owned_active_cells_per_processor();
+          Utilities::MPI::all_gather(
+            triangulation.get_communicator(),
+            triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
@@ -324,7 +328,8 @@ namespace Step40
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
-          pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+          pcout << Utilities::MPI::all_gather(
+                     MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i]
                 << '+';
         pcout << std::endl;
 
index c3cb0e2812eee297a2aaf7222ccaa7378e6cb876..245b86e4960eb9ae1ed0bab9061c3e425f3473bc 100644 (file)
@@ -221,8 +221,10 @@ namespace Step40
     system_matrix.reinit(
       mpi_communicator,
       csp,
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
+      Utilities::MPI::all_gather(mpi_communicator,
+                                 dof_handler.n_locally_owned_dofs()),
+      Utilities::MPI::all_gather(mpi_communicator,
+                                 dof_handler.n_locally_owned_dofs()),
       Utilities::MPI::this_mpi_process(mpi_communicator));
   }
 
@@ -373,7 +375,9 @@ namespace Step40
               << triangulation.n_global_active_cells() << std::endl
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
-          triangulation.compute_n_locally_owned_active_cells_per_processor();
+          Utilities::MPI::all_gather(
+            triangulation.get_communicator(),
+            triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
@@ -386,7 +390,8 @@ namespace Step40
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
-          pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+          pcout << Utilities::MPI::all_gather(
+                     mpi_communicator, dof_handler.n_locally_owned_dofs())[i]
                 << '+';
         pcout << std::endl;
 
index f4bb8844e56a3201be4f9203858c88f6ed452e96..8ee0de28f3512bc3a2379d516b7b62f88c28f2bd 100644 (file)
@@ -222,8 +222,10 @@ namespace Step40
     system_matrix.reinit(
       mpi_communicator,
       csp,
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
+      Utilities::MPI::all_gather(mpi_communicator,
+                                 dof_handler.n_locally_owned_dofs()),
+      Utilities::MPI::all_gather(mpi_communicator,
+                                 dof_handler.n_locally_owned_dofs()),
       Utilities::MPI::this_mpi_process(mpi_communicator));
   }
 
@@ -374,7 +376,9 @@ namespace Step40
               << triangulation.n_global_active_cells() << std::endl
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
-          triangulation.compute_n_locally_owned_active_cells_per_processor();
+          Utilities::MPI::all_gather(
+            triangulation.get_communicator(),
+            triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
@@ -386,7 +390,8 @@ namespace Step40
               << "      ";
         const std::vector<types::global_dof_index>
           n_locally_owned_dofs_per_processor =
-            dof_handler.compute_n_locally_owned_dofs_per_processor();
+            Utilities::MPI::all_gather(mpi_communicator,
+                                       dof_handler.n_locally_owned_dofs());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
index d53f1b62d982b8d598c5ba1105f7a8eaad6e2ce4..428ff3be1803481ffd07effab7ade84e38d4e9fc 100644 (file)
@@ -159,8 +159,10 @@ namespace Step40
     system_matrix.reinit(
       mpi_communicator,
       csp,
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
-      dof_handler.compute_n_locally_owned_dofs_per_processor(),
+      Utilities::MPI::all_gather(mpi_communicator,
+                                 dof_handler.n_locally_owned_dofs()),
+      Utilities::MPI::all_gather(mpi_communicator,
+                                 dof_handler.n_locally_owned_dofs()),
       Utilities::MPI::this_mpi_process(mpi_communicator));
   }
 
@@ -289,7 +291,9 @@ namespace Step40
               << triangulation.n_global_active_cells() << std::endl
               << "      ";
         const auto n_locally_owned_active_cells_per_processor =
-          triangulation.compute_n_locally_owned_active_cells_per_processor();
+          Utilities::MPI::all_gather(
+            triangulation.get_communicator(),
+            triangulation.n_locally_owned_active_cells());
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
@@ -302,7 +306,8 @@ namespace Step40
         for (unsigned int i = 0;
              i < Utilities::MPI::n_mpi_processes(mpi_communicator);
              ++i)
-          pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+          pcout << Utilities::MPI::all_gather(
+                     mpi_communicator, dof_handler.locally_owned_dofs())[i]
                 << '+';
         pcout << std::endl;
 
index a1a7fd2d37dcb4352e688827d18cebda061ed8ed..4896d3838c5c8a66f64e5ad4135e6ccff745d7fd 100644 (file)
@@ -117,7 +117,8 @@ test()
 
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
       Assert(dof_handler.n_locally_owned_dofs() ==
                n_locally_owned_dofs_per_processor[triangulation
                                                     .locally_owned_subdomain()],
@@ -135,7 +136,8 @@ test()
              ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N);
       for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
         {
index de3b9d271a5c1ed0312fdb23f0ac0cf718a8d313..857b35be365884bfdeea33549b77bd2c9eca35a0 100644 (file)
@@ -117,7 +117,8 @@ test()
 
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
       Assert(dof_handler.n_locally_owned_dofs() ==
                n_locally_owned_dofs_per_processor[triangulation
                                                     .locally_owned_subdomain()],
@@ -135,7 +136,8 @@ test()
              ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N);
       for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
         {
index 58618ed41d00159296a39effee1d43138ca7eb46..6a6d60193d6a3fdd23b65f15983604812cd998b5 100644 (file)
@@ -115,7 +115,8 @@ test()
 
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
       Assert(dof_handler.n_locally_owned_dofs() ==
                n_locally_owned_dofs_per_processor[triangulation
                                                     .locally_owned_subdomain()],
@@ -133,7 +134,8 @@ test()
              ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N);
       for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
         {
index d74b1c795b40d1564307645926b985e480d1175e..5600d047bf9f00b510e9e56543dd389ed3a4e3ca 100644 (file)
@@ -117,7 +117,8 @@ test()
 
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
       Assert(dof_handler.n_locally_owned_dofs() ==
                n_locally_owned_dofs_per_processor[triangulation
                                                     .locally_owned_subdomain()],
@@ -135,7 +136,8 @@ test()
              ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N);
       for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
         {
index 8c92c472ecfd83fc1343da94d648998b00fd7ffc..157ef34bdfb8940a07afd78b5ed51001dcd99dc7 100644 (file)
@@ -57,9 +57,11 @@ compare_meshes(DoFHandler<dim> &shared_dof_handler,
   shared_dofs.print(deallog.get_file_stream());
 
   std::vector<IndexSet> shared_dofs_per_proc =
-    shared_dof_handler.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               shared_dof_handler.locally_owned_dofs());
   std::vector<IndexSet> distributed_dofs_per_proc =
-    distributed_dof_handler.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               distributed_dof_handler.locally_owned_dofs());
   for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
        ++i)
     Assert(shared_dofs_per_proc[i] == distributed_dofs_per_proc[i],
index 715b4b6d07a45a596fb74cb5330016cb2a0b802c..d2d83ed72376c6e4b30017776e230cece642a231 100644 (file)
@@ -73,7 +73,8 @@ test()
           << dof_handler.n_locally_owned_dofs() << std::endl;
 
   std::vector<IndexSet> shared_dofs_per_proc =
-    dof_handler.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.locally_owned_dofs());
   for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
        ++i)
     shared_dofs_per_proc[i].print(deallog.get_file_stream());
index 45539023d14b6f14dd1ca3f4ba7f27f94e46cf77..805556f016c6011a439351ac3d1fe7ba53698833 100644 (file)
@@ -122,7 +122,8 @@ test()
 
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
       Assert(dof_handler.n_locally_owned_dofs() ==
                n_locally_owned_dofs_per_processor[triangulation
                                                     .locally_owned_subdomain()],
@@ -140,7 +141,8 @@ test()
              ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N);
       for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
         {
index 6aaee1de86265640cfda5684529f7194a86afe7b..23a52ab5605d7a61add6a39bdf247d1637f937e4 100644 (file)
@@ -123,7 +123,8 @@ test()
 
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
       Assert(dof_handler.n_locally_owned_dofs() ==
                n_locally_owned_dofs_per_processor[triangulation
                                                     .locally_owned_subdomain()],
@@ -141,7 +142,8 @@ test()
              ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N);
       for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
         {
index a410b009e589711eac0034501f59fc2ad1ecc701..cf23a218b8a813c88363ffff1934ffed915f4718 100644 (file)
@@ -120,7 +120,8 @@ test()
 
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
       Assert(dof_handler.n_locally_owned_dofs() ==
                n_locally_owned_dofs_per_processor[triangulation
                                                     .locally_owned_subdomain()],
@@ -138,7 +139,8 @@ test()
              ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N);
       for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
         {
index 7c56973b12aaae98c9d9f6ec202b415dfcd3e0ab..9f5cdd21923d725712d35c07826f5d20dadfda0f 100644 (file)
@@ -123,7 +123,8 @@ test()
 
       const std::vector<types::global_dof_index>
         n_locally_owned_dofs_per_processor =
-          dof_handler.compute_n_locally_owned_dofs_per_processor();
+          Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                     dof_handler.n_locally_owned_dofs());
       Assert(dof_handler.n_locally_owned_dofs() ==
                n_locally_owned_dofs_per_processor[triangulation
                                                     .locally_owned_subdomain()],
@@ -141,7 +142,8 @@ test()
              ExcInternalError());
 
       const std::vector<IndexSet> locally_owned_dofs_per_processor =
-        dof_handler.compute_locally_owned_dofs_per_processor();
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_dofs());
       IndexSet all(N);
       for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
         {
index a2ec552783f2e3cafa9d234859ed73eeeff61f11..02b196308c07cc3eab316d0d3640eaf93d0778c9 100644 (file)
@@ -80,7 +80,8 @@ test()
 
   deallog << "n_locally_owned_dofs_per_processor: ";
   std::vector<types::global_dof_index> v =
-    dof_handler.compute_n_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.n_locally_owned_dofs());
   unsigned int sum = 0;
   for (unsigned int i = 0; i < v.size(); ++i)
     {
@@ -105,7 +106,8 @@ test()
   Assert(std::accumulate(v.begin(), v.end(), 0U) == N, ExcInternalError());
 
   std::vector<IndexSet> locally_owned_dofs_per_processor =
-    dof_handler.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.locally_owned_dofs());
   IndexSet all(N);
   for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
     {
index 83b6712eef746576b1228bee971fb09585d9cb53..c00041c11ecd84d794e96b6e313f25bb2bfc5c27 100644 (file)
@@ -52,7 +52,8 @@ write_dof_data(DoFHandler<dim> &dof_handler)
   for (unsigned int lvl = 0; lvl < n_levels; ++lvl)
     {
       std::vector<IndexSet> dof_index_per_proc =
-        dof_handler.compute_locally_owned_mg_dofs_per_processor(lvl);
+        Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                                   dof_handler.locally_owned_mg_dofs(lvl));
       for (unsigned int i = 0; i < dof_index_per_proc.size(); ++i)
         dof_index_per_proc[i].print(deallog);
 
index 661c37e96ed123ca612620a377676cf6cbd00565..6c950547dce898776b57ed2354c35be67f1d76b2 100644 (file)
@@ -74,7 +74,8 @@ test()
 
   deallog << "n_locally_owned_dofs_per_processor: ";
   const std::vector<types::global_dof_index> v =
-    dof_handler.compute_n_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.n_locally_owned_dofs());
   unsigned int sum = 0;
   for (unsigned int i = 0; i < v.size(); ++i)
     {
@@ -86,10 +87,13 @@ test()
   dof_handler.locally_owned_dofs().write(deallog.get_file_stream());
   deallog << std::endl;
 
-  Assert(dof_handler.n_locally_owned_dofs() ==
-           dof_handler.compute_n_locally_owned_dofs_per_processor()
-             [triangulation.locally_owned_subdomain()],
-         ExcInternalError());
+  Assert(
+    dof_handler.n_locally_owned_dofs() ==
+      Utilities::MPI::all_gather(
+        MPI_COMM_WORLD,
+        dof_handler
+          .n_locally_owned_dofs())[triangulation.locally_owned_subdomain()],
+    ExcInternalError());
   Assert(dof_handler.n_locally_owned_dofs() ==
            dof_handler.locally_owned_dofs().n_elements(),
          ExcInternalError());
@@ -98,12 +102,14 @@ test()
 
   Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError());
   const std::vector<types::global_dof_index> n_owned_dofs =
-    dof_handler.compute_n_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.n_locally_owned_dofs());
   Assert(std::accumulate(n_owned_dofs.begin(), n_owned_dofs.end(), 0U) == N,
          ExcInternalError());
 
   const std::vector<IndexSet> owned_dofs =
-    dof_handler.compute_locally_owned_dofs_per_processor();
+    Utilities::MPI::all_gather(MPI_COMM_WORLD,
+                               dof_handler.locally_owned_dofs());
   IndexSet all(N);
   for (unsigned int i = 0; i < owned_dofs.size(); ++i)
     {

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.