]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Replace MPI_Allgather by MPI_Exscan
authorMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Fri, 31 May 2019 16:30:47 +0000 (18:30 +0200)
committerMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Fri, 7 Jun 2019 09:42:33 +0000 (11:42 +0200)
Do not store the index sets of all processors on each processor.
Fill them on demand.

include/deal.II/dofs/dof_handler.h
include/deal.II/dofs/number_cache.h
include/deal.II/hp/dof_handler.h
source/distributed/tria.cc
source/dofs/dof_handler_policy.cc
source/dofs/dof_renumbering.cc
source/dofs/number_cache.cc
source/grid/grid_tools.cc
source/multigrid/mg_transfer_internal.cc
source/multigrid/mg_transfer_prebuilt.cc
tests/sharedtria/no_cells_01.cc

index bac0319cfa9dd63d94286661fd1fd95db4e10fe1..980cf95f3aa2abe488b4caefd6e75a9c14a45d19 100644 (file)
@@ -1046,7 +1046,7 @@ public:
    * MPI processes but the Triangulation on which this DoFHandler builds works
    * only on one MPI process.)
    */
-  const std::vector<IndexSet> &
+  std::vector<IndexSet>
   locally_owned_dofs_per_processor() const;
 
   /**
@@ -1065,7 +1065,7 @@ public:
    * or that there are multiple MPI processes but the Triangulation on which
    * this DoFHandler builds works only on one MPI process.)
    */
-  const std::vector<types::global_dof_index> &
+  std::vector<types::global_dof_index>
   n_locally_owned_dofs_per_processor() const;
 
   /**
@@ -1079,7 +1079,7 @@ public:
    * MPI processes but the Triangulation on which this DoFHandler builds works
    * only on one MPI process.)
    */
-  const std::vector<IndexSet> &
+  std::vector<IndexSet>
   locally_owned_mg_dofs_per_processor(const unsigned int level) const;
 
   /**
@@ -1461,25 +1461,39 @@ DoFHandler<dim, spacedim>::locally_owned_mg_dofs(const unsigned int level) const
 
 
 template <int dim, int spacedim>
-const std::vector<types::global_dof_index> &
+std::vector<types::global_dof_index>
 DoFHandler<dim, spacedim>::n_locally_owned_dofs_per_processor() const
 {
-  return number_cache.n_locally_owned_dofs_per_processor;
+  const parallel::Triangulation<dim, spacedim> *tr =
+    (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+      &this->get_triangulation()));
+  if (tr != nullptr)
+    return number_cache.get_n_locally_owned_dofs_per_processor(
+      tr->get_communicator());
+  else
+    return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF);
 }
 
 
 
 template <int dim, int spacedim>
-const std::vector<IndexSet> &
+std::vector<IndexSet>
 DoFHandler<dim, spacedim>::locally_owned_dofs_per_processor() const
 {
-  return number_cache.locally_owned_dofs_per_processor;
+  const parallel::Triangulation<dim, spacedim> *tr =
+    (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+      &this->get_triangulation()));
+  if (tr != nullptr)
+    return number_cache.get_locally_owned_dofs_per_processor(
+      tr->get_communicator());
+  else
+    return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF);
 }
 
 
 
 template <int dim, int spacedim>
-const std::vector<IndexSet> &
+std::vector<IndexSet>
 DoFHandler<dim, spacedim>::locally_owned_mg_dofs_per_processor(
   const unsigned int level) const
 {
@@ -1489,7 +1503,15 @@ DoFHandler<dim, spacedim>::locally_owned_mg_dofs_per_processor(
     mg_number_cache.size() == this->get_triangulation().n_global_levels(),
     ExcMessage(
       "The level dofs are not set up properly! Did you call distribute_mg_dofs()?"));
-  return mg_number_cache[level].locally_owned_dofs_per_processor;
+  const parallel::Triangulation<dim, spacedim> *tr =
+    (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+      &this->get_triangulation()));
+  if (tr != nullptr)
+    return mg_number_cache[level].get_locally_owned_dofs_per_processor(
+      tr->get_communicator());
+  else
+    return mg_number_cache[level].get_locally_owned_dofs_per_processor(
+      MPI_COMM_SELF);
 }
 
 
index 2d273d94406065ed85a186941d7a927286693964..56e8b792f0a3be0e2527f032193771963589cfa0 100644 (file)
@@ -19,6 +19,7 @@
 #include <deal.II/base/config.h>
 
 #include <deal.II/base/index_set.h>
+#include <deal.II/base/mpi.h>
 
 #include <vector>
 
@@ -107,6 +108,28 @@ namespace internal
       void
       clear();
 
+      /**
+       * Return a representation of @p n_locally_owned_dofs_per_processor both
+       * in case it was set up (directly returning the array) or in case we
+       * need to accumulate some information over all processors. The latter
+       * case involves global communication and is typically expensive to set
+       * up because it invokes MPI_Allgather.
+       */
+      std::vector<types::global_dof_index>
+      get_n_locally_owned_dofs_per_processor(
+        const MPI_Comm mpi_communicator) const;
+
+      /**
+       * Return a representation of @p locally_owned_dofs_per_processor both
+       * in case it was set up (directly returning the array of IndexSet
+       * fields) or in case we need to accumulate some information over all
+       * processors. The latter case involves global communication and is
+       * typically expensive to set up because it invokes MPI_Allgather.
+       */
+      std::vector<IndexSet>
+      get_locally_owned_dofs_per_processor(
+        const MPI_Comm mpi_communicator) const;
+
       /**
        * Total number of dofs, accumulated over all processors that may
        * participate on this mesh.
index 20a83a24dc8801ad042620c56d154b23e1fbac16..622cacb9e9bc0520f1363c584354ca2aab9ed92c 100644 (file)
@@ -838,7 +838,7 @@ namespace hp
      * processes but the Triangulation on which this DoFHandler builds
      * works only on one MPI process.)
      */
-    const std::vector<IndexSet> &
+    std::vector<IndexSet>
     locally_owned_dofs_per_processor() const;
 
     /**
@@ -857,7 +857,7 @@ namespace hp
      * process, or that there are multiple MPI processes but the Triangulation
      * on which this DoFHandler builds works only on one MPI process.)
      */
-    const std::vector<types::global_dof_index> &
+    std::vector<types::global_dof_index>
     n_locally_owned_dofs_per_processor() const;
 
     /**
@@ -875,7 +875,7 @@ namespace hp
      * support multilevel methods yet, this function throws an exception
      * ExcNotImplemented() independent of its argument.
      */
-    const std::vector<IndexSet> &
+    std::vector<IndexSet>
     locally_owned_mg_dofs_per_processor(const unsigned int level) const;
 
     /**
@@ -1487,19 +1487,33 @@ namespace hp
 
 
   template <int dim, int spacedim>
-  const std::vector<types::global_dof_index> &
+  std::vector<types::global_dof_index>
   DoFHandler<dim, spacedim>::n_locally_owned_dofs_per_processor() const
   {
-    return number_cache.n_locally_owned_dofs_per_processor;
+    const parallel::Triangulation<dim, spacedim> *tr =
+      (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+        &this->get_triangulation()));
+    if (tr != nullptr)
+      return number_cache.get_n_locally_owned_dofs_per_processor(
+        tr->get_communicator());
+    else
+      return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF);
   }
 
 
 
   template <int dim, int spacedim>
-  const std::vector<IndexSet> &
+  std::vector<IndexSet>
   DoFHandler<dim, spacedim>::locally_owned_dofs_per_processor() const
   {
-    return number_cache.locally_owned_dofs_per_processor;
+    const parallel::Triangulation<dim, spacedim> *tr =
+      (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+        &this->get_triangulation()));
+    if (tr != nullptr)
+      return number_cache.get_locally_owned_dofs_per_processor(
+        tr->get_communicator());
+    else
+      return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF);
   }
 
 
@@ -1518,7 +1532,7 @@ namespace hp
 
 
   template <int dim, int spacedim>
-  const std::vector<IndexSet> &
+  std::vector<IndexSet>
   DoFHandler<dim, spacedim>::locally_owned_mg_dofs_per_processor(
     const unsigned int level) const
   {
@@ -1526,7 +1540,15 @@ namespace hp
     (void)level;
     Assert(level < this->get_triangulation().n_global_levels(),
            ExcMessage("invalid level in locally_owned_mg_dofs_per_processor"));
-    return mg_number_cache[0].locally_owned_dofs_per_processor;
+    const parallel::Triangulation<dim, spacedim> *tr =
+      (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+        &this->get_triangulation()));
+    if (tr != nullptr)
+      return mg_number_cache[level].get_locally_owned_dofs_per_processor(
+        tr->get_communicator());
+    else
+      return mg_number_cache[level].get_locally_owned_dofs_per_processor(
+        MPI_COMM_SELF);
   }
 
 
index 1281b79598e80730c3ca5ab9e5826b86d0a69870..f9f267bb8dce2e670b670df5c9299fd18f8d9b4c 100644 (file)
@@ -1862,8 +1862,6 @@ namespace parallel
           const std::string fname_variable =
             std::string(filename) + "_variable.data";
 
-          const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
-
           MPI_Info info;
           int      ierr = MPI_Info_create(&info);
           AssertThrowMPI(ierr);
@@ -1906,36 +1904,26 @@ namespace parallel
           // Gather size of data in bytes we want to store from this processor.
           const unsigned int size_on_proc = src_data_variable.size();
 
-          // Share information among all processors.
-          std::vector<unsigned int> sizes_on_all_procs(n_procs);
-          ierr = MPI_Allgather(DEAL_II_MPI_CONST_CAST(&size_on_proc),
-                               1,
-                               MPI_UNSIGNED,
-                               sizes_on_all_procs.data(),
-                               1,
-                               MPI_UNSIGNED,
-                               mpi_communicator);
+          // Compute prefix sum
+          unsigned int prefix_sum = 0;
+          ierr = MPI_Exscan(DEAL_II_MPI_CONST_CAST(&size_on_proc),
+                            &prefix_sum,
+                            1,
+                            MPI_UNSIGNED,
+                            MPI_SUM,
+                            mpi_communicator);
           AssertThrowMPI(ierr);
 
-          // Generate accumulated sum to get an offset for writing variable
-          // size data.
-          std::partial_sum(sizes_on_all_procs.begin(),
-                           sizes_on_all_procs.end(),
-                           sizes_on_all_procs.begin());
-
           const char *data = src_data_variable.data();
 
           // Write data consecutively into file.
-          ierr = MPI_File_write_at(
-            fh,
-            offset_variable +
-              ((myrank == 0) ?
-                 0 :
-                 sizes_on_all_procs[myrank - 1]), // global position in file
-            DEAL_II_MPI_CONST_CAST(data),
-            src_data_variable.size(), // local buffer
-            MPI_CHAR,
-            MPI_STATUS_IGNORE);
+          ierr = MPI_File_write_at(fh,
+                                   offset_variable +
+                                     prefix_sum, // global position in file
+                                   DEAL_II_MPI_CONST_CAST(data),
+                                   src_data_variable.size(), // local buffer
+                                   MPI_CHAR,
+                                   MPI_STATUS_IGNORE);
           AssertThrowMPI(ierr);
 
           ierr = MPI_File_close(&fh);
@@ -2033,8 +2021,6 @@ namespace parallel
           const std::string fname_variable =
             std::string(filename) + "_variable.data";
 
-          const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
-
           MPI_Info info;
           int      ierr = MPI_Info_create(&info);
           AssertThrowMPI(ierr);
@@ -2070,27 +2056,19 @@ namespace parallel
                             dest_sizes_variable.end(),
                             0);
 
-          // share information among all processors
-          std::vector<unsigned int> sizes_on_all_procs(n_procs);
-          ierr = MPI_Allgather(DEAL_II_MPI_CONST_CAST(&size_on_proc),
-                               1,
-                               MPI_UNSIGNED,
-                               sizes_on_all_procs.data(),
-                               1,
-                               MPI_UNSIGNED,
-                               mpi_communicator);
+          // share information among all processors by prefix sum
+          unsigned int prefix_sum = 0;
+          ierr = MPI_Exscan(DEAL_II_MPI_CONST_CAST(&size_on_proc),
+                            &prefix_sum,
+                            1,
+                            MPI_UNSIGNED,
+                            MPI_SUM,
+                            mpi_communicator);
           AssertThrowMPI(ierr);
 
-          // generate accumulated sum
-          std::partial_sum(sizes_on_all_procs.begin(),
-                           sizes_on_all_procs.end(),
-                           sizes_on_all_procs.begin());
-
           dest_data_variable.resize(size_on_proc);
           ierr = MPI_File_read_at(fh,
-                                  offset + ((myrank == 0) ?
-                                              0 :
-                                              sizes_on_all_procs[myrank - 1]),
+                                  offset + prefix_sum,
                                   dest_data_variable.data(),
                                   dest_data_variable.size(),
                                   MPI_CHAR,
index c8cf270131a83ca1213bd530c073c27baa44d55d..50e6b1f64c9e269435933fa15ffec90d0d0bf8dd 100644 (file)
@@ -5020,9 +5020,6 @@ namespace internal
               &dof_handler->get_triangulation())));
         Assert(triangulation != nullptr, ExcInternalError());
 
-        const unsigned int n_cpus =
-          Utilities::MPI::n_mpi_processes(triangulation->get_communicator());
-
         const types::subdomain_id subdomain_id =
           triangulation->locally_owned_subdomain();
 
@@ -5091,25 +5088,16 @@ namespace internal
 
         // --------- Phase 4: shift indices so that each processor has a unique
         //                    range of indices
-        std::vector<dealii::types::global_dof_index>
-          n_locally_owned_dofs_per_processor(n_cpus);
-
-        const int ierr =
-          MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs),
-                        1,
-                        DEAL_II_DOF_INDEX_MPI_TYPE,
-                        n_locally_owned_dofs_per_processor.data(),
-                        1,
-                        DEAL_II_DOF_INDEX_MPI_TYPE,
-                        triangulation->get_communicator());
+        dealii::types::global_dof_index my_shift = 0;
+        const int                       ierr =
+          MPI_Exscan(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs),
+                     &my_shift,
+                     1,
+                     DEAL_II_DOF_INDEX_MPI_TYPE,
+                     MPI_SUM,
+                     triangulation->get_communicator());
         AssertThrowMPI(ierr);
 
-        const dealii::types::global_dof_index my_shift =
-          std::accumulate(n_locally_owned_dofs_per_processor.begin(),
-                          n_locally_owned_dofs_per_processor.begin() +
-                            subdomain_id,
-                          static_cast<dealii::types::global_dof_index>(0));
-
         // make dof indices globally consecutive
         for (auto &new_index : renumbering)
           if (new_index != numbers::invalid_dof_index)
@@ -5125,39 +5113,17 @@ namespace internal
 
         // now a little bit of housekeeping
         const dealii::types::global_dof_index n_global_dofs =
-          std::accumulate(n_locally_owned_dofs_per_processor.begin(),
-                          n_locally_owned_dofs_per_processor.end(),
-                          dealii::types::global_dof_index(0));
-
-        std::vector<IndexSet> locally_owned_dofs_per_processor(
-          n_cpus, IndexSet(n_global_dofs));
-        {
-          dealii::types::global_dof_index current_shift = 0;
-          for (unsigned int i = 0; i < n_cpus; ++i)
-            {
-              locally_owned_dofs_per_processor[i].add_range(
-                current_shift,
-                current_shift + n_locally_owned_dofs_per_processor[i]);
-              current_shift += n_locally_owned_dofs_per_processor[i];
-            }
-        }
-        NumberCache number_cache(locally_owned_dofs_per_processor,
-                                 triangulation->locally_owned_subdomain());
-        Assert(number_cache
-                   .locally_owned_dofs_per_processor
-                     [triangulation->locally_owned_subdomain()]
-                   .n_elements() == number_cache.n_locally_owned_dofs,
-               ExcInternalError());
-        Assert(
-          !number_cache
-              .locally_owned_dofs_per_processor[triangulation
-                                                  ->locally_owned_subdomain()]
-              .n_elements() ||
-            number_cache
-                .locally_owned_dofs_per_processor[triangulation
-                                                    ->locally_owned_subdomain()]
-                .nth_index_in_set(0) == my_shift,
-          ExcInternalError());
+          Utilities::MPI::sum(n_locally_owned_dofs,
+                              triangulation->get_communicator());
+
+        NumberCache number_cache;
+        number_cache.n_global_dofs        = n_global_dofs;
+        number_cache.n_locally_owned_dofs = n_locally_owned_dofs;
+        number_cache.locally_owned_dofs   = IndexSet(n_global_dofs);
+        number_cache.locally_owned_dofs.add_range(my_shift,
+                                                  my_shift +
+                                                    n_locally_owned_dofs);
+        number_cache.locally_owned_dofs.compress();
 
         // this ends the phase where we enumerate degrees of freedom on
         // each processor. what is missing is communicating DoF indices
@@ -5305,10 +5271,6 @@ namespace internal
                       "Triangulation if the flag construct_multigrid_hierarchy "
                       "is set in the constructor."));
 
-
-        const unsigned int n_cpus =
-          Utilities::MPI::n_mpi_processes(triangulation->get_communicator());
-
         // loop over all levels that exist globally (across all
         // processors), even if the current processor does not in fact
         // have any cells on that level or if the local part of the
@@ -5365,8 +5327,6 @@ namespace internal
                     }
               }
 
-            // TODO: make this code simpler with the new constructors of
-            // NumberCache make indices consecutive
             level_number_cache.n_locally_owned_dofs = 0;
             for (types::global_dof_index &index : renumbering)
               if (index != numbers::invalid_dof_index)
@@ -5374,27 +5334,20 @@ namespace internal
 
             //* 3. communicate local dofcount and shift ids to make
             // them unique
-            level_number_cache.n_locally_owned_dofs_per_processor.resize(
-              n_cpus);
-
-            int ierr = MPI_Allgather(
-              &level_number_cache.n_locally_owned_dofs,
-              1,
-              DEAL_II_DOF_INDEX_MPI_TYPE,
-              level_number_cache.n_locally_owned_dofs_per_processor.data(),
-              1,
-              DEAL_II_DOF_INDEX_MPI_TYPE,
-              triangulation->get_communicator());
+            dealii::types::global_dof_index my_shift = 0;
+            const int                       ierr =
+              MPI_Exscan(DEAL_II_MPI_CONST_CAST(
+                           &level_number_cache.n_locally_owned_dofs),
+                         &my_shift,
+                         1,
+                         DEAL_II_DOF_INDEX_MPI_TYPE,
+                         MPI_SUM,
+                         triangulation->get_communicator());
             AssertThrowMPI(ierr);
 
-            const dealii::types::global_dof_index shift = std::accumulate(
-              level_number_cache.n_locally_owned_dofs_per_processor.begin(),
-              level_number_cache.n_locally_owned_dofs_per_processor.begin() +
-                triangulation->locally_owned_subdomain(),
-              static_cast<dealii::types::global_dof_index>(0));
             for (types::global_dof_index &index : renumbering)
               if (index != numbers::invalid_dof_index)
-                index += shift;
+                index += my_shift;
 
             // now re-enumerate all dofs to this shifted and condensed
             // numbering form.  we renumber some dofs as invalid, so
@@ -5409,49 +5362,16 @@ namespace internal
                 renumbering, IndexSet(0), *dof_handler, level, false);
 
             // now a little bit of housekeeping
-            level_number_cache.n_global_dofs = std::accumulate(
-              level_number_cache.n_locally_owned_dofs_per_processor.begin(),
-              level_number_cache.n_locally_owned_dofs_per_processor.end(),
-              static_cast<dealii::types::global_dof_index>(0));
+            level_number_cache.n_global_dofs =
+              Utilities::MPI::sum(level_number_cache.n_locally_owned_dofs,
+                                  triangulation->get_communicator());
 
             level_number_cache.locally_owned_dofs =
               IndexSet(level_number_cache.n_global_dofs);
             level_number_cache.locally_owned_dofs.add_range(
-              shift, shift + level_number_cache.n_locally_owned_dofs);
+              my_shift, my_shift + level_number_cache.n_locally_owned_dofs);
             level_number_cache.locally_owned_dofs.compress();
 
-            // fill global_dof_indexsets
-            level_number_cache.locally_owned_dofs_per_processor.resize(n_cpus);
-            {
-              dealii::types::global_dof_index current_shift = 0;
-              for (unsigned int i = 0; i < n_cpus; ++i)
-                {
-                  level_number_cache.locally_owned_dofs_per_processor[i] =
-                    IndexSet(level_number_cache.n_global_dofs);
-                  level_number_cache.locally_owned_dofs_per_processor[i]
-                    .add_range(current_shift,
-                               current_shift +
-                                 level_number_cache
-                                   .n_locally_owned_dofs_per_processor[i]);
-                  current_shift +=
-                    level_number_cache.n_locally_owned_dofs_per_processor[i];
-                }
-            }
-            Assert(level_number_cache
-                       .locally_owned_dofs_per_processor
-                         [triangulation->locally_owned_subdomain()]
-                       .n_elements() == level_number_cache.n_locally_owned_dofs,
-                   ExcInternalError());
-            Assert(!level_number_cache
-                       .locally_owned_dofs_per_processor
-                         [triangulation->locally_owned_subdomain()]
-                       .n_elements() ||
-                     level_number_cache
-                         .locally_owned_dofs_per_processor
-                           [triangulation->locally_owned_subdomain()]
-                         .nth_index_in_set(0) == shift,
-                   ExcInternalError());
-
             number_caches.emplace_back(level_number_cache);
           }
 
@@ -5679,11 +5599,12 @@ namespace internal
                                             *dof_handler,
                                             /*check_validity=*/false);
 
-            // Since we have not updated the number cache yet, we can use the
-            // index sets contained in the DoFHandler at this stage.
-            return NumberCache(dof_handler->locally_owned_dofs_per_processor(),
-                               Utilities::MPI::this_mpi_process(
-                                 triangulation->get_communicator()));
+            NumberCache number_cache;
+            number_cache.locally_owned_dofs = dof_handler->locally_owned_dofs();
+            number_cache.n_global_dofs      = dof_handler->n_dofs();
+            number_cache.n_locally_owned_dofs =
+              number_cache.locally_owned_dofs.n_elements();
+            return number_cache;
           }
         else
           {
@@ -5809,103 +5730,12 @@ namespace internal
               triangulation->load_user_flags(user_flags);
             }
 
-            // the last step is to update the NumberCache, including knowing
-            // which processor owns which DoF index. this requires
-            // communication.
-            //
-            // this step is substantially more complicated than it is in
-            // distribute_dofs() in case the IndexSets of locally owned DoFs
-            // after renumbering are not contiguous any more (which we have done
-            // at the top of this function). for distribute_dofs() it was enough
-            // to exchange the starting indices for each processor and the
-            // global number of DoFs, but here we actually have to serialize the
-            // IndexSet objects and shop them across the network.
-            const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(
-              triangulation->get_communicator());
-            std::vector<IndexSet> locally_owned_dofs_per_processor(
-              n_cpus, IndexSet(dof_handler->n_dofs()));
-            // serialize our own IndexSet
-            std::vector<char> my_data;
-            {
-#  ifdef DEAL_II_WITH_ZLIB
-
-              boost::iostreams::filtering_ostream out;
-              out.push(
-                boost::iostreams::gzip_compressor(boost::iostreams::gzip_params(
-                  boost::iostreams::gzip::best_compression)));
-              out.push(boost::iostreams::back_inserter(my_data));
-
-              boost::archive::binary_oarchive archive(out);
-
-              archive << my_locally_owned_new_dof_indices;
-              out.flush();
-#  else
-              std::ostringstream              out;
-              boost::archive::binary_oarchive archive(out);
-              archive << my_locally_owned_new_dof_indices;
-              const std::string &s = out.str();
-              my_data.reserve(s.size());
-              my_data.assign(s.begin(), s.end());
-#  endif
-            }
-
-            // determine maximum size of IndexSet
-            const unsigned int max_size =
-              Utilities::MPI::max(my_data.size(),
-                                  triangulation->get_communicator());
-
-            // as the MPI_Allgather call will be reading max_size elements, and
-            // as this may be past the end of my_data, we need to increase the
-            // size of the local buffer. This is filled with zeros.
-            my_data.resize(max_size);
-
-            std::vector<char> buffer(max_size * n_cpus);
-            const int         ierr = MPI_Allgather(my_data.data(),
-                                           max_size,
-                                           MPI_BYTE,
-                                           buffer.data(),
-                                           max_size,
-                                           MPI_BYTE,
-                                           triangulation->get_communicator());
-            AssertThrowMPI(ierr);
-
-            for (unsigned int i = 0; i < n_cpus; ++i)
-              if (i == Utilities::MPI::this_mpi_process(
-                         triangulation->get_communicator()))
-                locally_owned_dofs_per_processor[i] =
-                  my_locally_owned_new_dof_indices;
-              else
-                {
-                  // copy the data previously received into a stringstream
-                  // object and then read the IndexSet from it
-                  std::string decompressed_buffer;
-
-                  // first decompress the buffer
-                  {
-#  ifdef DEAL_II_WITH_ZLIB
-
-                    boost::iostreams::filtering_ostream decompressing_stream;
-                    decompressing_stream.push(
-                      boost::iostreams::gzip_decompressor());
-                    decompressing_stream.push(
-                      boost::iostreams::back_inserter(decompressed_buffer));
-
-                    decompressing_stream.write(&buffer[i * max_size], max_size);
-#  else
-                    decompressed_buffer.assign(&buffer[i * max_size], max_size);
-#  endif
-                  }
-
-                  // then restore the object from the buffer
-                  std::istringstream              in(decompressed_buffer);
-                  boost::archive::binary_iarchive archive(in);
-
-                  archive >> locally_owned_dofs_per_processor[i];
-                }
-
-            return NumberCache(locally_owned_dofs_per_processor,
-                               Utilities::MPI::this_mpi_process(
-                                 triangulation->get_communicator()));
+            NumberCache number_cache;
+            number_cache.locally_owned_dofs = my_locally_owned_new_dof_indices;
+            number_cache.n_global_dofs      = dof_handler->n_dofs();
+            number_cache.n_locally_owned_dofs =
+              number_cache.locally_owned_dofs.n_elements();
+            return number_cache;
           }
 #endif
       }
@@ -5921,8 +5751,7 @@ namespace internal
         // we only implement the case where the multigrid numbers are
         // renumbered within the processor's partition, rather than the most
         // general case
-        const std::vector<IndexSet> &index_sets =
-          dof_handler->locally_owned_mg_dofs_per_processor(level);
+        const IndexSet index_set = dof_handler->locally_owned_mg_dofs(level);
 
         constexpr int dim      = DoFHandlerType::dimension;
         constexpr int spacedim = DoFHandlerType::space_dimension;
@@ -5938,7 +5767,7 @@ namespace internal
 #  ifdef DEBUG
         for (types::global_dof_index i : new_numbers)
           {
-            Assert(index_sets[my_rank].is_element(i),
+            Assert(index_set.is_element(i),
                    ExcNotImplemented(
                      "Renumberings that change the locally owned mg dofs "
                      "partitioning are currently not implemented for "
@@ -5956,7 +5785,7 @@ namespace internal
         std::vector<types::global_dof_index> ghosted_new_numbers(
           relevant_dofs.n_elements());
         {
-          Utilities::MPI::Partitioner          partitioner(index_sets[my_rank],
+          Utilities::MPI::Partitioner          partitioner(index_set,
                                                   relevant_dofs,
                                                   tr->get_communicator());
           std::vector<types::global_dof_index> temp_array(
@@ -6018,8 +5847,12 @@ namespace internal
         Assert(false, ExcNotImplemented());
 #endif
 
-        return NumberCache(
-          index_sets, Utilities::MPI::this_mpi_process(tr->get_communicator()));
+        NumberCache number_cache;
+        number_cache.locally_owned_dofs = index_set;
+        number_cache.n_global_dofs      = dof_handler->n_dofs();
+        number_cache.n_locally_owned_dofs =
+          number_cache.locally_owned_dofs.n_elements();
+        return number_cache;
       }
     } // namespace Policy
   }   // namespace DoFHandlerImplementation
index 6aff386c2769ffe8774c8c073e91317f66d3c30c..777250c0b1c08e5d179add7e38dee00317c093dd 100644 (file)
@@ -877,39 +877,26 @@ namespace DoFRenumbering
         for (unsigned int c = 0; c < n_buckets; ++c)
           local_dof_count[c] = component_to_dof_map[c].size();
 
-
-        // gather information from all CPUs
-        std::vector<types::global_dof_index> all_dof_counts(
-          fe_collection.n_components() *
-          Utilities::MPI::n_mpi_processes(tria->get_communicator()));
-
-        const int ierr = MPI_Allgather(local_dof_count.data(),
-                                       n_buckets,
-                                       DEAL_II_DOF_INDEX_MPI_TYPE,
-                                       all_dof_counts.data(),
-                                       n_buckets,
-                                       DEAL_II_DOF_INDEX_MPI_TYPE,
-                                       tria->get_communicator());
+        std::vector<types::global_dof_index> prefix_dof_count(n_buckets);
+        const int ierr = MPI_Exscan(local_dof_count.data(),
+                                    prefix_dof_count.data(),
+                                    n_buckets,
+                                    DEAL_II_DOF_INDEX_MPI_TYPE,
+                                    MPI_SUM,
+                                    tria->get_communicator());
         AssertThrowMPI(ierr);
 
-        for (unsigned int i = 0; i < n_buckets; ++i)
-          Assert(
-            all_dof_counts[n_buckets * tria->locally_owned_subdomain() + i] ==
-              local_dof_count[i],
-            ExcInternalError());
+        std::vector<types::global_dof_index> global_dof_count(n_buckets);
+        Utilities::MPI::sum(local_dof_count,
+                            tria->get_communicator(),
+                            global_dof_count);
 
         // calculate shifts
-        unsigned int cumulated = 0;
+        types::global_dof_index cumulated = 0;
         for (unsigned int c = 0; c < n_buckets; ++c)
           {
-            shifts[c] = cumulated;
-            for (types::subdomain_id i = 0; i < tria->locally_owned_subdomain();
-                 ++i)
-              shifts[c] += all_dof_counts[c + n_buckets * i];
-            for (unsigned int i = 0;
-                 i < Utilities::MPI::n_mpi_processes(tria->get_communicator());
-                 ++i)
-              cumulated += all_dof_counts[c + n_buckets * i];
+            shifts[c] = prefix_dof_count[c] + cumulated;
+            cumulated += global_dof_count[c];
           }
 #else
         (void)tria;
@@ -1193,39 +1180,26 @@ namespace DoFRenumbering
         for (unsigned int c = 0; c < n_buckets; ++c)
           local_dof_count[c] = block_to_dof_map[c].size();
 
-
-        // gather information from all CPUs
-        std::vector<types::global_dof_index> all_dof_counts(
-          fe_collection.n_components() *
-          Utilities::MPI::n_mpi_processes(tria->get_communicator()));
-
-        const int ierr = MPI_Allgather(local_dof_count.data(),
-                                       n_buckets,
-                                       DEAL_II_DOF_INDEX_MPI_TYPE,
-                                       all_dof_counts.data(),
-                                       n_buckets,
-                                       DEAL_II_DOF_INDEX_MPI_TYPE,
-                                       tria->get_communicator());
+        std::vector<types::global_dof_index> prefix_dof_count(n_buckets);
+        const int ierr = MPI_Exscan(local_dof_count.data(),
+                                    prefix_dof_count.data(),
+                                    n_buckets,
+                                    DEAL_II_DOF_INDEX_MPI_TYPE,
+                                    MPI_SUM,
+                                    tria->get_communicator());
         AssertThrowMPI(ierr);
 
-        for (unsigned int i = 0; i < n_buckets; ++i)
-          Assert(
-            all_dof_counts[n_buckets * tria->locally_owned_subdomain() + i] ==
-              local_dof_count[i],
-            ExcInternalError());
+        std::vector<types::global_dof_index> global_dof_count(n_buckets);
+        Utilities::MPI::sum(local_dof_count,
+                            tria->get_communicator(),
+                            global_dof_count);
 
         // calculate shifts
         types::global_dof_index cumulated = 0;
         for (unsigned int c = 0; c < n_buckets; ++c)
           {
-            shifts[c] = cumulated;
-            for (types::subdomain_id i = 0; i < tria->locally_owned_subdomain();
-                 ++i)
-              shifts[c] += all_dof_counts[c + n_buckets * i];
-            for (unsigned int i = 0;
-                 i < Utilities::MPI::n_mpi_processes(tria->get_communicator());
-                 ++i)
-              cumulated += all_dof_counts[c + n_buckets * i];
+            shifts[c] = prefix_dof_count[c] + cumulated;
+            cumulated += global_dof_count[c];
           }
 #else
         (void)tria;
index 6deff5e03438d6b757140e007163a7605b3ed6cf..5d5aa0125e623ef89e1c1f9d37178bb64b970f08 100644 (file)
@@ -14,6 +14,7 @@
 // ---------------------------------------------------------------------
 
 #include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/mpi.h>
 
 #include <deal.II/dofs/number_cache.h>
 
@@ -80,6 +81,154 @@ namespace internal
       locally_owned_dofs_per_processor.clear();
     }
 
+
+
+    std::vector<types::global_dof_index>
+    NumberCache::get_n_locally_owned_dofs_per_processor(
+      const MPI_Comm mpi_communicator) const
+    {
+      const unsigned int n_procs =
+        Utilities::MPI::job_supports_mpi() ?
+          Utilities::MPI::n_mpi_processes(mpi_communicator) :
+          1;
+      if (n_global_dofs == 0)
+        return std::vector<types::global_dof_index>();
+      else if (n_locally_owned_dofs_per_processor.empty() == false)
+        {
+          AssertDimension(n_locally_owned_dofs_per_processor.size(), n_procs);
+          return n_locally_owned_dofs_per_processor;
+        }
+      else
+        {
+          std::vector<types::global_dof_index> result(n_procs,
+                                                      n_locally_owned_dofs);
+#ifdef DEAL_II_WITH_MPI
+          if (n_procs > 1)
+            MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs),
+                          1,
+                          DEAL_II_DOF_INDEX_MPI_TYPE,
+                          result.data(),
+                          1,
+                          DEAL_II_DOF_INDEX_MPI_TYPE,
+                          mpi_communicator);
+#endif
+          return result;
+        }
+    }
+
+
+
+    std::vector<IndexSet>
+    NumberCache::get_locally_owned_dofs_per_processor(
+      const MPI_Comm mpi_communicator) const
+    {
+      AssertDimension(locally_owned_dofs.size(), n_global_dofs);
+      const unsigned int n_procs =
+        Utilities::MPI::job_supports_mpi() ?
+          Utilities::MPI::n_mpi_processes(mpi_communicator) :
+          1;
+      if (n_global_dofs == 0)
+        return std::vector<IndexSet>();
+      else if (locally_owned_dofs_per_processor.empty() == false)
+        {
+          AssertDimension(locally_owned_dofs_per_processor.size(), n_procs);
+          return locally_owned_dofs_per_processor;
+        }
+      else
+        {
+          std::vector<IndexSet> locally_owned_dofs_per_processor(
+            n_procs, locally_owned_dofs);
+
+#ifdef DEAL_II_WITH_MPI
+          if (n_procs > 1)
+            {
+              // this step is substantially more complicated because indices
+              // might be distributed arbitrarily among the processors. Here we
+              // have to serialize the IndexSet objects and shop them across the
+              // network.
+              std::vector<char> my_data;
+              {
+#  ifdef DEAL_II_WITH_ZLIB
+
+                boost::iostreams::filtering_ostream out;
+                out.push(boost::iostreams::gzip_compressor(
+                  boost::iostreams::gzip_params(
+                    boost::iostreams::gzip::best_compression)));
+                out.push(boost::iostreams::back_inserter(my_data));
+
+                boost::archive::binary_oarchive archive(out);
+
+                archive << locally_owned_dofs;
+                out.flush();
+#  else
+                std::ostringstream              out;
+                boost::archive::binary_oarchive archive(out);
+                archive << locally_owned_dofs;
+                const std::string &s = out.str();
+                my_data.reserve(s.size());
+                my_data.assign(s.begin(), s.end());
+#  endif
+              }
+
+              // determine maximum size of IndexSet
+              const unsigned int max_size =
+                Utilities::MPI::max(my_data.size(), mpi_communicator);
+
+              // as the MPI_Allgather call will be reading max_size elements,
+              // and as this may be past the end of my_data, we need to increase
+              // the size of the local buffer. This is filled with zeros.
+              my_data.resize(max_size);
+
+              std::vector<char> buffer(max_size * n_procs);
+              const int         ierr = MPI_Allgather(my_data.data(),
+                                             max_size,
+                                             MPI_BYTE,
+                                             buffer.data(),
+                                             max_size,
+                                             MPI_BYTE,
+                                             mpi_communicator);
+              AssertThrowMPI(ierr);
+
+              for (unsigned int i = 0; i < n_procs; ++i)
+                if (i == Utilities::MPI::this_mpi_process(mpi_communicator))
+                  locally_owned_dofs_per_processor[i] = locally_owned_dofs;
+                else
+                  {
+                    // copy the data previously received into a stringstream
+                    // object and then read the IndexSet from it
+                    std::string decompressed_buffer;
+
+                    // first decompress the buffer
+                    {
+#  ifdef DEAL_II_WITH_ZLIB
+
+                      boost::iostreams::filtering_ostream decompressing_stream;
+                      decompressing_stream.push(
+                        boost::iostreams::gzip_decompressor());
+                      decompressing_stream.push(
+                        boost::iostreams::back_inserter(decompressed_buffer));
+
+                      decompressing_stream.write(&buffer[i * max_size],
+                                                 max_size);
+#  else
+                      decompressed_buffer.assign(&buffer[i * max_size],
+                                                 max_size);
+#  endif
+                    }
+
+                    // then restore the object from the buffer
+                    std::istringstream              in(decompressed_buffer);
+                    boost::archive::binary_iarchive archive(in);
+
+                    archive >> locally_owned_dofs_per_processor[i];
+                  }
+            }
+#endif
+          return locally_owned_dofs_per_processor;
+        }
+    }
+
+
     std::size_t
     NumberCache::memory_consumption() const
     {
index 7e1d8b70c8a9012dda27aa0cf4221bb3dba0d182..3f28240a684d210ab4164f63dbd45a7e9cd7d75e 100644 (file)
@@ -2347,24 +2347,14 @@ namespace GridTools
 
     // Make indices global by getting the number of vertices owned by each
     // processors and shifting the indices accordingly
-    const unsigned int n_cpu =
-      Utilities::MPI::n_mpi_processes(triangulation.get_communicator());
-    std::vector<types::global_vertex_index> indices(n_cpu);
-    int ierr = MPI_Allgather(&next_index,
-                             1,
-                             DEAL_II_VERTEX_INDEX_MPI_TYPE,
-                             indices.data(),
-                             1,
-                             DEAL_II_VERTEX_INDEX_MPI_TYPE,
-                             triangulation.get_communicator());
+    types::global_dof_index shift = 0;
+    int ierr = MPI_Exscan(&next_index,
+                          &shift,
+                          1,
+                          DEAL_II_VERTEX_INDEX_MPI_TYPE,
+                          MPI_SUM,
+                          triangulation.get_communicator());
     AssertThrowMPI(ierr);
-    Assert(indices.begin() + triangulation.locally_owned_subdomain() <
-             indices.end(),
-           ExcInternalError());
-    const types::global_vertex_index shift =
-      std::accumulate(indices.begin(),
-                      indices.begin() + triangulation.locally_owned_subdomain(),
-                      types::global_vertex_index(0));
 
     std::map<unsigned int, types::global_vertex_index>::iterator
       global_index_it = local_to_global_vertex_index.begin(),
index 9a0f3b17537e2538fd134d27712125ab069d5262..34fa8a6c08cdb9d6a230713020840b6e39d1b78c 100644 (file)
@@ -187,6 +187,12 @@ namespace internal
           // The list of neighbors is symmetric (our neighbors have us as a
           // neighbor), so we can use it to send and to know how many messages
           // we will get.
+          std::vector<std::vector<IndexSet>>
+            locally_owned_mg_dofs_per_processor;
+          for (unsigned int l = 0; l < tria->n_global_levels(); ++l)
+            locally_owned_mg_dofs_per_processor.push_back(
+              mg_dof.locally_owned_mg_dofs_per_processor(l));
+
           const std::set<types::subdomain_id> &neighbors =
             tria->level_ghost_owners();
           std::map<int, std::vector<DoFPair>> send_data;
@@ -198,8 +204,7 @@ namespace internal
               std::set<types::subdomain_id>::iterator it;
               for (it = neighbors.begin(); it != neighbors.end(); ++it)
                 {
-                  if (mg_dof
-                        .locally_owned_mg_dofs_per_processor(dofpair.level)[*it]
+                  if (locally_owned_mg_dofs_per_processor[dofpair.level][*it]
                         .is_element(dofpair.level_dof_index))
                     {
                       send_data[*it].push_back(dofpair);
index 6f8388bfe7c7372ee329b3682348dbe87cc6be3c..0217ff46d425c7815efac2a8c7703accfb6916dc 100644 (file)
@@ -283,7 +283,7 @@ MGTransferPrebuilt<VectorType>::build_matrices(
 
           // Compute # of locally owned MG dofs / processor for distribution
           const std::vector<::dealii::IndexSet>
-            &locally_owned_mg_dofs_per_processor =
+            locally_owned_mg_dofs_per_processor =
               mg_dof.locally_owned_mg_dofs_per_processor(level + 1);
           std::vector<::dealii::types::global_dof_index>
             n_locally_owned_mg_dofs_per_processor(
index b476f4efedeb41b9b78e0e7c27b9c6888a3bdaa5..6087a92294392c3475daeab933edb499b5030a7a 100644 (file)
@@ -73,7 +73,7 @@ test()
           << std::endl;
 
   deallog << "n_locally_owned_dofs_per_processor: ";
-  std::vector<types::global_dof_index> v =
+  const std::vector<types::global_dof_index> v =
     dof_handler.n_locally_owned_dofs_per_processor();
   unsigned int sum = 0;
   for (unsigned int i = 0; i < v.size(); ++i)
@@ -97,21 +97,19 @@ test()
   const unsigned int N = dof_handler.n_dofs();
 
   Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError());
-  Assert(
-    std::accumulate(dof_handler.n_locally_owned_dofs_per_processor().begin(),
-                    dof_handler.n_locally_owned_dofs_per_processor().end(),
-                    0U) == N,
-    ExcInternalError());
+  const std::vector<types::global_dof_index> n_owned_dofs =
+    dof_handler.n_locally_owned_dofs_per_processor();
+  Assert(std::accumulate(n_owned_dofs.begin(), n_owned_dofs.end(), 0U) == N,
+         ExcInternalError());
 
+  const std::vector<IndexSet> owned_dofs =
+    dof_handler.locally_owned_dofs_per_processor();
   IndexSet all(N);
-  for (unsigned int i = 0;
-       i < dof_handler.locally_owned_dofs_per_processor().size();
-       ++i)
+  for (unsigned int i = 0; i < owned_dofs.size(); ++i)
     {
-      IndexSet intersect =
-        all & dof_handler.locally_owned_dofs_per_processor()[i];
+      IndexSet intersect = all & owned_dofs[i];
       Assert(intersect.n_elements() == 0, ExcInternalError());
-      all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+      all.add_indices(owned_dofs[i]);
     }
 
   Assert(all == complete_index_set(N), ExcInternalError());

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.