]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Fix functions that should be inline in *.templates.h files 12277/head
authorDaniel Arndt <arndtd@ornl.gov>
Fri, 21 May 2021 20:59:14 +0000 (16:59 -0400)
committerDaniel Arndt <arndtd@ornl.gov>
Fri, 21 May 2021 20:59:14 +0000 (16:59 -0400)
include/deal.II/base/mpi_noncontiguous_partitioner.templates.h
include/deal.II/fe/fe_tools.templates.h
include/deal.II/grid/connectivity.h
include/deal.II/lac/relaxation_block.templates.h
include/deal.II/matrix_free/dof_info.templates.h
include/deal.II/multigrid/mg_transfer_global_coarsening.templates.h
source/base/mpi_noncontiguous_partitioner.cc
source/fe/fe_tools.cc
source/matrix_free/dof_info.cc
source/multigrid/mg_transfer_global_coarsening.cc

index e510b3ce563f00af327f38ac33e7409afb51fd42..e3a1447d703492d59cc447f4352e7d18c8eff87e 100644 (file)
@@ -32,222 +32,6 @@ namespace Utilities
 {
   namespace MPI
   {
-    NoncontiguousPartitioner::NoncontiguousPartitioner(
-      const IndexSet &indexset_has,
-      const IndexSet &indexset_want,
-      const MPI_Comm &communicator)
-    {
-      this->reinit(indexset_has, indexset_want, communicator);
-    }
-
-
-
-    NoncontiguousPartitioner::NoncontiguousPartitioner(
-      const std::vector<types::global_dof_index> &indices_has,
-      const std::vector<types::global_dof_index> &indices_want,
-      const MPI_Comm &                            communicator)
-    {
-      this->reinit(indices_has, indices_want, communicator);
-    }
-
-
-
-    std::pair<unsigned int, unsigned int>
-    NoncontiguousPartitioner::n_targets() const
-    {
-      return {send_ranks.size(), recv_ranks.size()};
-    }
-
-
-
-    unsigned int
-    NoncontiguousPartitioner::temporary_storage_size() const
-    {
-      return send_ptr.back();
-    }
-
-
-
-    types::global_dof_index
-    NoncontiguousPartitioner::memory_consumption()
-    {
-      return MemoryConsumption::memory_consumption(send_ranks) +
-             MemoryConsumption::memory_consumption(send_ptr) +
-             MemoryConsumption::memory_consumption(send_indices) +
-             MemoryConsumption::memory_consumption(recv_ranks) +
-             MemoryConsumption::memory_consumption(recv_ptr) +
-             MemoryConsumption::memory_consumption(recv_indices) +
-             MemoryConsumption::memory_consumption(buffers) +
-             MemoryConsumption::memory_consumption(requests);
-    }
-
-
-
-    const MPI_Comm &
-    NoncontiguousPartitioner::get_mpi_communicator() const
-    {
-      return communicator;
-    }
-
-
-
-    void
-    NoncontiguousPartitioner::reinit(const IndexSet &indexset_has,
-                                     const IndexSet &indexset_want,
-                                     const MPI_Comm &communicator)
-    {
-      this->communicator = communicator;
-
-      // clean up
-      send_ranks.clear();
-      send_ptr.clear();
-      send_indices.clear();
-      recv_ranks.clear();
-      recv_ptr.clear();
-      recv_indices.clear();
-      buffers.clear();
-      requests.clear();
-
-      // setup communication pattern
-      std::vector<unsigned int> owning_ranks_of_ghosts(
-        indexset_want.n_elements());
-
-      // set up dictionary
-      Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
-        process(indexset_has,
-                indexset_want,
-                communicator,
-                owning_ranks_of_ghosts,
-                true);
-
-      Utilities::MPI::ConsensusAlgorithms::Selector<
-        std::pair<types::global_dof_index, types::global_dof_index>,
-        unsigned int>
-        consensus_algorithm(process, communicator);
-      consensus_algorithm.run();
-
-      // setup map of processes from where this rank will receive values
-      {
-        std::map<unsigned int, std::vector<types::global_dof_index>> recv_map;
-
-        for (const auto &owner : owning_ranks_of_ghosts)
-          recv_map[owner] = std::vector<types::global_dof_index>();
-
-        for (types::global_dof_index i = 0; i < owning_ranks_of_ghosts.size();
-             i++)
-          recv_map[owning_ranks_of_ghosts[i]].push_back(i);
-
-        recv_ptr.push_back(recv_indices.size() /*=0*/);
-        for (const auto &target_with_indexset : recv_map)
-          {
-            recv_ranks.push_back(target_with_indexset.first);
-
-            for (const auto cell_index : target_with_indexset.second)
-              recv_indices.push_back(cell_index);
-
-            recv_ptr.push_back(recv_indices.size());
-          }
-      }
-
-      {
-        const auto targets_with_indexset = process.get_requesters();
-
-        send_ptr.push_back(recv_ptr.back());
-        for (const auto &target_with_indexset : targets_with_indexset)
-          {
-            send_ranks.push_back(target_with_indexset.first);
-
-            for (const auto cell_index : target_with_indexset.second)
-              send_indices.push_back(indexset_has.index_within_set(cell_index));
-
-            send_ptr.push_back(send_indices.size() + recv_ptr.back());
-          }
-      }
-    }
-
-
-
-    void
-    NoncontiguousPartitioner::reinit(
-      const std::vector<types::global_dof_index> &indices_has,
-      const std::vector<types::global_dof_index> &indices_want,
-      const MPI_Comm &                            communicator)
-    {
-      // step 0) clean vectors from numbers::invalid_dof_index (indicating
-      //         padding)
-      std::vector<types::global_dof_index> indices_has_clean;
-      indices_has_clean.reserve(indices_has.size());
-
-      for (const auto i : indices_has)
-        if (i != numbers::invalid_dof_index)
-          indices_has_clean.push_back(i);
-
-      std::vector<types::global_dof_index> indices_want_clean;
-      indices_want_clean.reserve(indices_want.size());
-
-      for (const auto i : indices_want)
-        if (i != numbers::invalid_dof_index)
-          indices_want_clean.push_back(i);
-
-      // step 0) determine "number of degrees of freedom" needed for IndexSet
-      const types::global_dof_index local_n_dofs_has =
-        indices_has_clean.empty() ?
-          0 :
-          (*std::max_element(indices_has_clean.begin(),
-                             indices_has_clean.end()) +
-           1);
-
-      const types::global_dof_index local_n_dofs_want =
-        indices_want_clean.empty() ?
-          0 :
-          (*std::max_element(indices_want_clean.begin(),
-                             indices_want_clean.end()) +
-           1);
-
-      const types::global_dof_index n_dofs =
-        Utilities::MPI::max(std::max(local_n_dofs_has, local_n_dofs_want),
-                            communicator);
-
-      // step 1) convert vectors to indexsets (sorted!)
-      IndexSet index_set_has(n_dofs);
-      index_set_has.add_indices(indices_has_clean.begin(),
-                                indices_has_clean.end());
-
-      IndexSet index_set_want(n_dofs);
-      index_set_want.add_indices(indices_want_clean.begin(),
-                                 indices_want_clean.end());
-
-      // step 2) setup internal data structures with indexset
-      this->reinit(index_set_has, index_set_want, communicator);
-
-      // step 3) fix inner data structures so that it is sorted as
-      //         in the original vector
-      {
-        std::vector<types::global_dof_index> temp_map_send(
-          index_set_has.n_elements());
-
-        for (types::global_dof_index i = 0; i < indices_has.size(); i++)
-          if (indices_has[i] != numbers::invalid_dof_index)
-            temp_map_send[index_set_has.index_within_set(indices_has[i])] = i;
-
-        for (auto &i : send_indices)
-          i = temp_map_send[i];
-      }
-
-      {
-        std::vector<types::global_dof_index> temp_map_recv(
-          index_set_want.n_elements());
-
-        for (types::global_dof_index i = 0; i < indices_want.size(); i++)
-          if (indices_want[i] != numbers::invalid_dof_index)
-            temp_map_recv[index_set_want.index_within_set(indices_want[i])] = i;
-
-        for (auto &i : recv_indices)
-          i = temp_map_recv[i];
-      }
-    }
-
-
     template <typename Number>
     void
     NoncontiguousPartitioner::export_to_ghosted_array(
index b9e79caebc0c6b37d269798515653144b3a372ff..7ffc2beeb959acd45ec9ee3fbd6a0bb38495fe9f 100644 (file)
@@ -1030,115 +1030,6 @@ namespace FETools
     return nullptr;
   }
 
-  // Specializations for FE_Q.
-  template <>
-  std::unique_ptr<FiniteElement<1, 1>>
-  FEFactory<FE_Q<1, 1>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q<1>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<2, 2>>
-  FEFactory<FE_Q<2, 2>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q<2>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<3, 3>>
-  FEFactory<FE_Q<3, 3>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q<3>>(quad);
-  }
-
-  // Specializations for FE_Q_DG0.
-  template <>
-  std::unique_ptr<FiniteElement<1, 1>>
-  FEFactory<FE_Q_DG0<1, 1>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q_DG0<1>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<2, 2>>
-  FEFactory<FE_Q_DG0<2, 2>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q_DG0<2>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<3, 3>>
-  FEFactory<FE_Q_DG0<3, 3>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q_DG0<3>>(quad);
-  }
-
-  // Specializations for FE_Q_Bubbles.
-  template <>
-  std::unique_ptr<FiniteElement<1, 1>>
-  FEFactory<FE_Q_Bubbles<1, 1>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q_Bubbles<1>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<2, 2>>
-  FEFactory<FE_Q_Bubbles<2, 2>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q_Bubbles<2>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<3, 3>>
-  FEFactory<FE_Q_Bubbles<3, 3>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_Q_Bubbles<3>>(quad);
-  }
-
-  // Specializations for FE_DGQArbitraryNodes.
-  template <>
-  std::unique_ptr<FiniteElement<1, 1>>
-  FEFactory<FE_DGQ<1>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_DGQArbitraryNodes<1>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<1, 2>>
-  FEFactory<FE_DGQ<1, 2>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_DGQArbitraryNodes<1, 2>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<1, 3>>
-  FEFactory<FE_DGQ<1, 3>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_DGQArbitraryNodes<1, 3>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<2, 2>>
-  FEFactory<FE_DGQ<2>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_DGQArbitraryNodes<2>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<2, 3>>
-  FEFactory<FE_DGQ<2, 3>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_DGQArbitraryNodes<2, 3>>(quad);
-  }
-
-  template <>
-  std::unique_ptr<FiniteElement<3, 3>>
-  FEFactory<FE_DGQ<3>>::get(const Quadrature<1> &quad) const
-  {
-    return std::make_unique<FE_DGQArbitraryNodes<3>>(quad);
-  }
-
 
 
   namespace internal
@@ -1249,8 +1140,7 @@ namespace FETools
       std::array<
         std::array<std::map<std::string, std::unique_ptr<const Subscriptor>>,
                    4>,
-        4>
-      fill_default_map()
+        4> inline fill_default_map()
       {
         std::array<
           std::array<std::map<std::string, std::unique_ptr<const Subscriptor>>,
index 0f845c8d54c02fa38abec18e50603b3d26c9a401..93e5e9a6a944d1b8767767606522c5bfcdfda758 100644 (file)
@@ -1240,7 +1240,7 @@ namespace internal
      *
      * Furthermore, the type of the quad is determined.
      */
-    void
+    inline void
     build_intersection(
       const std::vector<std::shared_ptr<CellTypeBase>> &cell_types,
       const std::vector<dealii::ReferenceCell> &        cell_types_index,
index 56e586e10c83b06d62b75e0e0dc568353b63b77c..3118a61858075d7fbb6ebb623e673f39ed1a74af 100644 (file)
@@ -197,7 +197,7 @@ namespace internal
    * Specialization for Trilinos. Use the ghosted vector.
    */
   template <>
-  const TrilinosWrappers::MPI::Vector &
+  inline const TrilinosWrappers::MPI::Vector &
   prepare_ghost_vector(const TrilinosWrappers::MPI::Vector &prev,
                        TrilinosWrappers::MPI::Vector *      other)
   {
index f53aae721da93b62f529aa797b740b90fdd65a8e..296e7d339b8600f677cbf6f4200bc8d5a1e76e6e 100644 (file)
@@ -101,114 +101,6 @@ namespace internal
 
     // ----------------- actual DoFInfo functions -----------------------------
 
-    DoFInfo::DoFInfo()
-    {
-      clear();
-    }
-
-
-
-    void
-    DoFInfo::clear()
-    {
-      row_starts.clear();
-      dof_indices.clear();
-      constraint_indicator.clear();
-      vector_partitioner.reset();
-      ghost_dofs.clear();
-      dofs_per_cell.clear();
-      dofs_per_face.clear();
-      vectorization_length       = 1;
-      dimension                  = 2;
-      global_base_element_offset = 0;
-      n_base_elements            = 0;
-      n_components.clear();
-      start_components.clear();
-      row_starts_plain_indices.clear();
-      plain_dof_indices.clear();
-      dof_indices_interleaved.clear();
-      for (unsigned int i = 0; i < 3; ++i)
-        {
-          index_storage_variants[i].clear();
-          dof_indices_contiguous[i].clear();
-          dof_indices_interleave_strides[i].clear();
-          n_vectorization_lanes_filled[i].clear();
-        }
-      store_plain_indices = false;
-      cell_active_fe_index.clear();
-      max_fe_index = 0;
-      fe_index_conversion.clear();
-    }
-
-
-
-    void
-    DoFInfo::get_dof_indices_on_cell_batch(std::vector<unsigned int> &my_rows,
-                                           const unsigned int         cell,
-                                           const bool apply_constraints) const
-    {
-      const unsigned int n_fe_components = start_components.back();
-      const unsigned int fe_index =
-        dofs_per_cell.size() == 1 ? 0 : cell_active_fe_index[cell];
-      const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
-
-      const unsigned int n_vectorization  = vectorization_length;
-      constexpr auto     dof_access_index = dof_access_cell;
-      AssertIndexRange(cell,
-                       n_vectorization_lanes_filled[dof_access_index].size());
-      const unsigned int n_vectorization_actual =
-        n_vectorization_lanes_filled[dof_access_index][cell];
-
-      // we might have constraints, so the final number
-      // of indices is not known a priori.
-      // conservatively reserve the maximum without constraints
-      my_rows.reserve(n_vectorization * dofs_this_cell);
-      my_rows.resize(0);
-      unsigned int total_size = 0;
-      for (unsigned int v = 0; v < n_vectorization_actual; ++v)
-        {
-          const unsigned int ib =
-            (cell * n_vectorization + v) * n_fe_components;
-          const unsigned int ie =
-            (cell * n_vectorization + v + 1) * n_fe_components;
-
-          // figure out constraints by comparing constraint_indicator row
-          // shift for this cell within the block as compared to the next
-          // one
-          const bool has_constraints =
-            row_starts[ib].second != row_starts[ib + n_fe_components].second;
-
-          auto do_copy = [&](const unsigned int *begin,
-                             const unsigned int *end) {
-            const unsigned int shift = total_size;
-            total_size += (end - begin);
-            my_rows.resize(total_size);
-            std::copy(begin, end, my_rows.begin() + shift);
-          };
-
-          if (!has_constraints || apply_constraints)
-            {
-              const unsigned int *begin =
-                dof_indices.data() + row_starts[ib].first;
-              const unsigned int *end =
-                dof_indices.data() + row_starts[ie].first;
-              do_copy(begin, end);
-            }
-          else
-            {
-              Assert(row_starts_plain_indices[cell * n_vectorization + v] !=
-                       numbers::invalid_unsigned_int,
-                     ExcNotInitialized());
-              const unsigned int *begin =
-                plain_dof_indices.data() +
-                row_starts_plain_indices[cell * n_vectorization + v];
-              const unsigned int *end = begin + dofs_this_cell;
-              do_copy(begin, end);
-            }
-        }
-    }
-
-
     template <typename number>
     void
     DoFInfo::read_dof_indices(
@@ -381,668 +273,6 @@ namespace internal
 
 
 
-    void
-    DoFInfo::assign_ghosts(const std::vector<unsigned int> &boundary_cells,
-                           const MPI_Comm &                 communicator_sm,
-                           const bool use_vector_data_exchanger_full)
-    {
-      Assert(boundary_cells.size() < row_starts.size(), ExcInternalError());
-
-      // sort ghost dofs and compress out duplicates
-      const unsigned int n_owned  = (vector_partitioner->local_range().second -
-                                    vector_partitioner->local_range().first);
-      const std::size_t  n_ghosts = ghost_dofs.size();
-#ifdef DEBUG
-      for (const auto dof_index : dof_indices)
-        AssertIndexRange(dof_index, n_owned + n_ghosts);
-#endif
-
-      const unsigned int        n_components = start_components.back();
-      std::vector<unsigned int> ghost_numbering(n_ghosts);
-      IndexSet                  ghost_indices(vector_partitioner->size());
-      if (n_ghosts > 0)
-        {
-          unsigned int n_unique_ghosts = 0;
-          // since we need to go back to the local_to_global indices and
-          // replace the temporary numbering of ghosts by the real number in
-          // the index set, we need to store these values
-          std::vector<std::pair<types::global_dof_index, unsigned int>>
-            ghost_origin(n_ghosts);
-          for (std::size_t i = 0; i < n_ghosts; ++i)
-            {
-              ghost_origin[i].first  = ghost_dofs[i];
-              ghost_origin[i].second = i;
-            }
-          std::sort(ghost_origin.begin(), ghost_origin.end());
-
-          types::global_dof_index last_contiguous_start = ghost_origin[0].first;
-          ghost_numbering[ghost_origin[0].second]       = 0;
-          for (std::size_t i = 1; i < n_ghosts; i++)
-            {
-              if (ghost_origin[i].first > ghost_origin[i - 1].first + 1)
-                {
-                  ghost_indices.add_range(last_contiguous_start,
-                                          ghost_origin[i - 1].first + 1);
-                  last_contiguous_start = ghost_origin[i].first;
-                }
-              if (ghost_origin[i].first > ghost_origin[i - 1].first)
-                ++n_unique_ghosts;
-              ghost_numbering[ghost_origin[i].second] = n_unique_ghosts;
-            }
-          ++n_unique_ghosts;
-          ghost_indices.add_range(last_contiguous_start,
-                                  ghost_origin.back().first + 1);
-          ghost_indices.compress();
-
-          // make sure that we got the correct local numbering of the ghost
-          // dofs. the ghost index set should store the same number
-          {
-            AssertDimension(n_unique_ghosts, ghost_indices.n_elements());
-            for (std::size_t i = 0; i < n_ghosts; ++i)
-              Assert(ghost_numbering[i] ==
-                       ghost_indices.index_within_set(ghost_dofs[i]),
-                     ExcInternalError());
-          }
-
-          // apply correct numbering for ghost indices: We previously just
-          // enumerated them according to their appearance in the
-          // local_to_global structure. Above, we derived a relation between
-          // this enumeration and the actual number
-          const unsigned int n_boundary_cells = boundary_cells.size();
-          for (unsigned int i = 0; i < n_boundary_cells; ++i)
-            {
-              unsigned int *data_ptr =
-                dof_indices.data() +
-                row_starts[boundary_cells[i] * n_components].first;
-              const unsigned int *row_end =
-                dof_indices.data() +
-                row_starts[(boundary_cells[i] + 1) * n_components].first;
-              for (; data_ptr != row_end; ++data_ptr)
-                *data_ptr = ((*data_ptr < n_owned) ?
-                               *data_ptr :
-                               n_owned + ghost_numbering[*data_ptr - n_owned]);
-
-              // now the same procedure for plain indices
-              if (store_plain_indices == true)
-                {
-                  if (row_starts[boundary_cells[i] * n_components].second !=
-                      row_starts[(boundary_cells[i] + 1) * n_components].second)
-                    {
-                      unsigned int *data_ptr =
-                        plain_dof_indices.data() +
-                        row_starts_plain_indices[boundary_cells[i]];
-                      const unsigned int fe_index =
-                        (cell_active_fe_index.size() == 0 ||
-                         dofs_per_cell.size() == 1) ?
-                          0 :
-                          cell_active_fe_index[boundary_cells[i]];
-                      AssertIndexRange(fe_index, dofs_per_cell.size());
-                      const unsigned int *row_end =
-                        data_ptr + dofs_per_cell[fe_index];
-                      for (; data_ptr != row_end; ++data_ptr)
-                        *data_ptr =
-                          ((*data_ptr < n_owned) ?
-                             *data_ptr :
-                             n_owned + ghost_numbering[*data_ptr - n_owned]);
-                    }
-                }
-            }
-        }
-
-      std::vector<types::global_dof_index> empty;
-      ghost_dofs.swap(empty);
-
-      // set the ghost indices now. need to cast away constness here, but that
-      // is uncritical since we reset the Partitioner in the same initialize
-      // call as this call here.
-      Utilities::MPI::Partitioner *vec_part =
-        const_cast<Utilities::MPI::Partitioner *>(vector_partitioner.get());
-      vec_part->set_ghost_indices(ghost_indices);
-
-      if (use_vector_data_exchanger_full == false)
-        vector_exchanger =
-          std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
-                             PartitionerWrapper>(vector_partitioner);
-      else
-        vector_exchanger = std::make_shared<
-          internal::MatrixFreeFunctions::VectorDataExchange::Full>(
-          vector_partitioner, communicator_sm);
-    }
-
-
-
-    void
-    DoFInfo::reorder_cells(
-      const TaskInfo &                  task_info,
-      const std::vector<unsigned int> & renumbering,
-      const std::vector<unsigned int> & constraint_pool_row_index,
-      const std::vector<unsigned char> &irregular_cells)
-    {
-      (void)constraint_pool_row_index;
-
-      // first reorder the active FE index.
-      const bool have_hp = dofs_per_cell.size() > 1;
-      if (cell_active_fe_index.size() > 0)
-        {
-          std::vector<unsigned int> new_active_fe_index;
-          new_active_fe_index.reserve(task_info.cell_partition_data.back());
-          unsigned int position_cell = 0;
-          for (unsigned int cell = 0;
-               cell < task_info.cell_partition_data.back();
-               ++cell)
-            {
-              const unsigned int n_comp =
-                (irregular_cells[cell] > 0 ? irregular_cells[cell] :
-                                             vectorization_length);
-
-              // take maximum FE index among the ones present (we might have
-              // lumped some lower indices into higher ones)
-              unsigned int fe_index =
-                cell_active_fe_index[renumbering[position_cell]];
-              for (unsigned int j = 1; j < n_comp; ++j)
-                fe_index = std::max(
-                  fe_index,
-                  cell_active_fe_index[renumbering[position_cell + j]]);
-
-              new_active_fe_index.push_back(fe_index);
-              position_cell += n_comp;
-            }
-          std::swap(new_active_fe_index, cell_active_fe_index);
-        }
-      if (have_hp)
-        AssertDimension(cell_active_fe_index.size(),
-                        task_info.cell_partition_data.back());
-
-      const unsigned int n_components = start_components.back();
-
-      std::vector<std::pair<unsigned int, unsigned int>> new_row_starts(
-        vectorization_length * n_components *
-          task_info.cell_partition_data.back() +
-        1);
-      std::vector<unsigned int> new_dof_indices;
-      std::vector<std::pair<unsigned short, unsigned short>>
-                                new_constraint_indicator;
-      std::vector<unsigned int> new_plain_indices, new_rowstart_plain;
-      unsigned int              position_cell = 0;
-      new_dof_indices.reserve(dof_indices.size());
-      new_constraint_indicator.reserve(constraint_indicator.size());
-      if (store_plain_indices == true)
-        {
-          new_rowstart_plain.resize(vectorization_length *
-                                        task_info.cell_partition_data.back() +
-                                      1,
-                                    numbers::invalid_unsigned_int);
-          new_plain_indices.reserve(plain_dof_indices.size());
-        }
-
-      // copy the indices and the constraint indicators to the new data field,
-      // where we will go through the cells in the renumbered way. in case the
-      // vectorization length does not exactly match up, we fill invalid
-      // numbers to the rowstart data. for contiguous cell indices, we skip
-      // the rowstarts field completely and directly go into the
-      // new_dof_indices field (this layout is used in FEEvaluation).
-      for (unsigned int i = 0; i < task_info.cell_partition_data.back(); ++i)
-        {
-          const unsigned int n_vect =
-            (irregular_cells[i] > 0 ? irregular_cells[i] :
-                                      vectorization_length);
-          const unsigned int dofs_per_cell =
-            have_hp ? this->dofs_per_cell[cell_active_fe_index[i]] :
-                      this->dofs_per_cell[0];
-
-          for (unsigned int j = 0; j < n_vect; ++j)
-            {
-              const unsigned int cell_no =
-                renumbering[position_cell + j] * n_components;
-              for (unsigned int comp = 0; comp < n_components; ++comp)
-                {
-                  new_row_starts[(i * vectorization_length + j) * n_components +
-                                 comp]
-                    .first = new_dof_indices.size();
-                  new_row_starts[(i * vectorization_length + j) * n_components +
-                                 comp]
-                    .second = new_constraint_indicator.size();
-
-                  new_dof_indices.insert(
-                    new_dof_indices.end(),
-                    dof_indices.data() + row_starts[cell_no + comp].first,
-                    dof_indices.data() + row_starts[cell_no + comp + 1].first);
-                  for (unsigned int index = row_starts[cell_no + comp].second;
-                       index != row_starts[cell_no + comp + 1].second;
-                       ++index)
-                    new_constraint_indicator.push_back(
-                      constraint_indicator[index]);
-                }
-              if (store_plain_indices &&
-                  row_starts[cell_no].second !=
-                    row_starts[cell_no + n_components].second)
-                {
-                  new_rowstart_plain[i * vectorization_length + j] =
-                    new_plain_indices.size();
-                  new_plain_indices.insert(
-                    new_plain_indices.end(),
-                    plain_dof_indices.data() +
-                      row_starts_plain_indices[cell_no / n_components],
-                    plain_dof_indices.data() +
-                      row_starts_plain_indices[cell_no / n_components] +
-                      dofs_per_cell);
-                }
-            }
-          for (unsigned int j = n_vect; j < vectorization_length; ++j)
-            for (unsigned int comp = 0; comp < n_components; ++comp)
-              {
-                new_row_starts[(i * vectorization_length + j) * n_components +
-                               comp]
-                  .first = new_dof_indices.size();
-                new_row_starts[(i * vectorization_length + j) * n_components +
-                               comp]
-                  .second = new_constraint_indicator.size();
-              }
-          position_cell += n_vect;
-        }
-      AssertDimension(position_cell * n_components + 1, row_starts.size());
-
-      AssertDimension(dof_indices.size(), new_dof_indices.size());
-      new_row_starts[task_info.cell_partition_data.back() *
-                     vectorization_length * n_components]
-        .first = new_dof_indices.size();
-      new_row_starts[task_info.cell_partition_data.back() *
-                     vectorization_length * n_components]
-        .second = new_constraint_indicator.size();
-
-      AssertDimension(constraint_indicator.size(),
-                      new_constraint_indicator.size());
-
-      new_row_starts.swap(row_starts);
-      new_dof_indices.swap(dof_indices);
-      new_constraint_indicator.swap(constraint_indicator);
-      new_plain_indices.swap(plain_dof_indices);
-      new_rowstart_plain.swap(row_starts_plain_indices);
-
-#ifdef DEBUG
-      // sanity check 1: all indices should be smaller than the number of dofs
-      // locally owned plus the number of ghosts
-      const unsigned int index_range =
-        (vector_partitioner->local_range().second -
-         vector_partitioner->local_range().first) +
-        vector_partitioner->ghost_indices().n_elements();
-      for (const auto dof_index : dof_indices)
-        AssertIndexRange(dof_index, index_range);
-
-      // sanity check 2: for the constraint indicators, the first index should
-      // be smaller than the number of indices in the row, and the second
-      // index should be smaller than the number of constraints in the
-      // constraint pool.
-      for (unsigned int row = 0; row < task_info.cell_partition_data.back();
-           ++row)
-        {
-          const unsigned int row_length_ind =
-            row_starts[(row * vectorization_length + 1) * n_components].first -
-            row_starts[row * vectorization_length * n_components].first;
-          AssertIndexRange(
-            row_starts[(row * vectorization_length + 1) * n_components].second,
-            constraint_indicator.size() + 1);
-          const std::pair<unsigned short, unsigned short> *
-            con_it =
-             constraint_indicator.data() +
-             row_starts[row * vectorization_length * n_components].second,
-           *end_con =
-             constraint_indicator.data() +
-             row_starts[(row * vectorization_length + 1) * n_components].second;
-          for (; con_it != end_con; ++con_it)
-            {
-              AssertIndexRange(con_it->first, row_length_ind + 1);
-              AssertIndexRange(con_it->second,
-                               constraint_pool_row_index.size() - 1);
-            }
-        }
-
-      // sanity check 3: check the number of cells once again
-      unsigned int n_active_cells = 0;
-      for (unsigned int c = 0; c < *(task_info.cell_partition_data.end() - 2);
-           ++c)
-        if (irregular_cells[c] > 0)
-          n_active_cells += irregular_cells[c];
-        else
-          n_active_cells += vectorization_length;
-      AssertDimension(n_active_cells, task_info.n_active_cells);
-#endif
-
-      compute_cell_index_compression(irregular_cells);
-    }
-
-
-
-    void
-    DoFInfo::compute_cell_index_compression(
-      const std::vector<unsigned char> &irregular_cells)
-    {
-      const bool         have_hp      = dofs_per_cell.size() > 1;
-      const unsigned int n_components = start_components.back();
-
-      Assert(vectorization_length == 1 ||
-               row_starts.size() % vectorization_length == 1,
-             ExcInternalError());
-      if (vectorization_length > 1)
-        AssertDimension(row_starts.size() / vectorization_length / n_components,
-                        irregular_cells.size());
-      index_storage_variants[dof_access_cell].resize(
-        irregular_cells.size(), IndexStorageVariants::full);
-      n_vectorization_lanes_filled[dof_access_cell].resize(
-        irregular_cells.size());
-      for (unsigned int i = 0; i < irregular_cells.size(); ++i)
-        if (irregular_cells[i] > 0)
-          n_vectorization_lanes_filled[dof_access_cell][i] = irregular_cells[i];
-        else
-          n_vectorization_lanes_filled[dof_access_cell][i] =
-            vectorization_length;
-
-      dof_indices_contiguous[dof_access_cell].resize(
-        irregular_cells.size() * vectorization_length,
-        numbers::invalid_unsigned_int);
-      dof_indices_interleaved.resize(dof_indices.size(),
-                                     numbers::invalid_unsigned_int);
-      dof_indices_interleave_strides[dof_access_cell].resize(
-        irregular_cells.size() * vectorization_length,
-        numbers::invalid_unsigned_int);
-
-      std::vector<unsigned int> index_kinds(
-        static_cast<unsigned int>(
-          IndexStorageVariants::interleaved_contiguous_mixed_strides) +
-        1);
-      std::vector<unsigned int> offsets(vectorization_length);
-      for (unsigned int i = 0; i < irregular_cells.size(); ++i)
-        {
-          const unsigned int ndofs =
-            dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
-          const unsigned int n_comp =
-            n_vectorization_lanes_filled[dof_access_cell][i];
-
-          // check 1: Check if there are constraints -> no compression possible
-          bool has_constraints = false;
-          for (unsigned int j = 0; j < n_comp; ++j)
-            {
-              const unsigned int cell_no = i * vectorization_length + j;
-              if (row_starts[cell_no * n_components].second !=
-                  row_starts[(cell_no + 1) * n_components].second)
-                {
-                  has_constraints = true;
-                  break;
-                }
-            }
-          if (has_constraints)
-            index_storage_variants[dof_access_cell][i] =
-              IndexStorageVariants::full;
-          else
-            {
-              bool indices_are_contiguous = true;
-              for (unsigned int j = 0; j < n_comp; ++j)
-                {
-                  const unsigned int  cell_no = i * vectorization_length + j;
-                  const unsigned int *dof_indices =
-                    this->dof_indices.data() +
-                    row_starts[cell_no * n_components].first;
-                  AssertDimension(
-                    ndofs,
-                    row_starts[(cell_no + 1) * n_components].first -
-                      row_starts[cell_no * n_components].first);
-                  for (unsigned int i = 1; i < ndofs; ++i)
-                    if (dof_indices[i] != dof_indices[0] + i)
-                      {
-                        indices_are_contiguous = false;
-                        break;
-                      }
-                }
-
-              bool indices_are_interleaved_and_contiguous =
-                (ndofs > 1 && n_comp == vectorization_length);
-
-              {
-                const unsigned int *dof_indices =
-                  this->dof_indices.data() +
-                  row_starts[i * vectorization_length * n_components].first;
-                for (unsigned int k = 0; k < ndofs; ++k)
-                  for (unsigned int j = 0; j < n_comp; ++j)
-                    if (dof_indices[j * ndofs + k] !=
-                        dof_indices[0] + k * n_comp + j)
-                      {
-                        indices_are_interleaved_and_contiguous = false;
-                        break;
-                      }
-              }
-
-              if (indices_are_contiguous ||
-                  indices_are_interleaved_and_contiguous)
-                {
-                  for (unsigned int j = 0; j < n_comp; ++j)
-                    dof_indices_contiguous
-                      [dof_access_cell][i * vectorization_length + j] =
-                        this->dof_indices[row_starts[(i * vectorization_length +
-                                                      j) *
-                                                     n_components]
-                                            .first];
-                }
-
-              if (indices_are_interleaved_and_contiguous)
-                {
-                  Assert(n_comp == vectorization_length, ExcInternalError());
-                  index_storage_variants[dof_access_cell][i] =
-                    IndexStorageVariants::interleaved_contiguous;
-                  for (unsigned int j = 0; j < n_comp; ++j)
-                    dof_indices_interleave_strides[2][i * vectorization_length +
-                                                      j] = n_comp;
-                }
-              else if (indices_are_contiguous)
-                {
-                  index_storage_variants[dof_access_cell][i] =
-                    IndexStorageVariants::contiguous;
-                  for (unsigned int j = 0; j < n_comp; ++j)
-                    dof_indices_interleave_strides[2][i * vectorization_length +
-                                                      j] = 1;
-                }
-              else
-                {
-                  int                 indices_are_interleaved_and_mixed = 2;
-                  const unsigned int *dof_indices =
-                    &this->dof_indices[row_starts[i * vectorization_length *
-                                                  n_components]
-                                         .first];
-                  for (unsigned int j = 0; j < n_comp; ++j)
-                    offsets[j] =
-                      dof_indices[j * ndofs + 1] - dof_indices[j * ndofs];
-                  for (unsigned int k = 0; k < ndofs; ++k)
-                    for (unsigned int j = 0; j < n_comp; ++j)
-                      // the first if case is to avoid negative offsets
-                      // (invalid)
-                      if (dof_indices[j * ndofs + 1] < dof_indices[j * ndofs] ||
-                          dof_indices[j * ndofs + k] !=
-                            dof_indices[j * ndofs] + k * offsets[j])
-                        {
-                          indices_are_interleaved_and_mixed = 0;
-                          break;
-                        }
-                  if (indices_are_interleaved_and_mixed == 2)
-                    {
-                      for (unsigned int j = 0; j < n_comp; ++j)
-                        dof_indices_interleave_strides
-                          [dof_access_cell][i * vectorization_length + j] =
-                            offsets[j];
-                      for (unsigned int j = 0; j < n_comp; ++j)
-                        dof_indices_contiguous[dof_access_cell]
-                                              [i * vectorization_length + j] =
-                                                dof_indices[j * ndofs];
-                      for (unsigned int j = 0; j < n_comp; ++j)
-                        if (offsets[j] != vectorization_length)
-                          {
-                            indices_are_interleaved_and_mixed = 1;
-                            break;
-                          }
-                      if (indices_are_interleaved_and_mixed == 1 ||
-                          n_comp != vectorization_length)
-                        index_storage_variants[dof_access_cell][i] =
-                          IndexStorageVariants::
-                            interleaved_contiguous_mixed_strides;
-                      else
-                        index_storage_variants[dof_access_cell][i] =
-                          IndexStorageVariants::interleaved_contiguous_strided;
-                    }
-                  else
-                    {
-                      const unsigned int *dof_indices =
-                        this->dof_indices.data() +
-                        row_starts[i * vectorization_length * n_components]
-                          .first;
-                      if (n_comp == vectorization_length)
-                        index_storage_variants[dof_access_cell][i] =
-                          IndexStorageVariants::interleaved;
-                      else
-                        index_storage_variants[dof_access_cell][i] =
-                          IndexStorageVariants::full;
-
-                      // do not use interleaved storage if two vectorized
-                      // components point to the same field (scatter not
-                      // possible)
-                      for (unsigned int k = 0; k < ndofs; ++k)
-                        for (unsigned int l = 0; l < n_comp; ++l)
-                          for (unsigned int j = l + 1; j < n_comp; ++j)
-                            if (dof_indices[j * ndofs + k] ==
-                                dof_indices[l * ndofs + k])
-                              {
-                                index_storage_variants[dof_access_cell][i] =
-                                  IndexStorageVariants::full;
-                                break;
-                              }
-                    }
-                }
-            }
-          index_kinds[static_cast<unsigned int>(
-            index_storage_variants[dof_access_cell][i])]++;
-        }
-
-      // Cleanup phase: we want to avoid single cells with different properties
-      // than the bulk of the domain in order to avoid extra checks in the face
-      // identification.
-
-      // Step 1: check whether the interleaved indices were only assigned to
-      // the single cell within a vectorized array.
-      auto fix_single_interleaved_indices =
-        [&](const IndexStorageVariants variant) {
-          if (index_kinds[static_cast<unsigned int>(
-                IndexStorageVariants::interleaved_contiguous_mixed_strides)] >
-                0 &&
-              index_kinds[static_cast<unsigned int>(variant)] > 0)
-            for (unsigned int i = 0; i < irregular_cells.size(); ++i)
-              {
-                if (index_storage_variants[dof_access_cell][i] ==
-                      IndexStorageVariants::
-                        interleaved_contiguous_mixed_strides &&
-                    n_vectorization_lanes_filled[dof_access_cell][i] == 1 &&
-                    (variant != IndexStorageVariants::contiguous ||
-                     dof_indices_interleave_strides[dof_access_cell]
-                                                   [i * vectorization_length] ==
-                       1))
-                  {
-                    index_storage_variants[dof_access_cell][i] = variant;
-                    index_kinds[static_cast<unsigned int>(
-                      IndexStorageVariants::
-                        interleaved_contiguous_mixed_strides)]--;
-                    index_kinds[static_cast<unsigned int>(variant)]++;
-                  }
-              }
-        };
-
-      fix_single_interleaved_indices(IndexStorageVariants::full);
-      fix_single_interleaved_indices(IndexStorageVariants::contiguous);
-      fix_single_interleaved_indices(IndexStorageVariants::interleaved);
-
-      unsigned int n_interleaved =
-        index_kinds[static_cast<unsigned int>(
-          IndexStorageVariants::interleaved_contiguous)] +
-        index_kinds[static_cast<unsigned int>(
-          IndexStorageVariants::interleaved_contiguous_strided)] +
-        index_kinds[static_cast<unsigned int>(
-          IndexStorageVariants::interleaved_contiguous_mixed_strides)];
-
-      // Step 2: fix single contiguous cell among others with interleaved
-      // storage
-      if (n_interleaved > 0 && index_kinds[static_cast<unsigned int>(
-                                 IndexStorageVariants::contiguous)] > 0)
-        for (unsigned int i = 0; i < irregular_cells.size(); ++i)
-          if (index_storage_variants[dof_access_cell][i] ==
-              IndexStorageVariants::contiguous)
-            {
-              index_storage_variants[dof_access_cell][i] =
-                IndexStorageVariants::interleaved_contiguous_mixed_strides;
-              index_kinds[static_cast<unsigned int>(
-                IndexStorageVariants::contiguous)]--;
-              index_kinds[static_cast<unsigned int>(
-                IndexStorageVariants::interleaved_contiguous_mixed_strides)]++;
-            }
-
-      // Step 3: Interleaved cells are left but also some non-contiguous ones
-      // -> revert all to full storage
-      if (n_interleaved > 0 &&
-          index_kinds[static_cast<unsigned int>(IndexStorageVariants::full)] +
-              index_kinds[static_cast<unsigned int>(
-                IndexStorageVariants::interleaved)] >
-            0)
-        for (unsigned int i = 0; i < irregular_cells.size(); ++i)
-          if (index_storage_variants[dof_access_cell][i] >
-              IndexStorageVariants::contiguous)
-            {
-              index_kinds[static_cast<unsigned int>(
-                index_storage_variants[2][i])]--;
-              if (n_vectorization_lanes_filled[dof_access_cell][i] ==
-                  vectorization_length)
-                index_storage_variants[dof_access_cell][i] =
-                  IndexStorageVariants::interleaved;
-              else
-                index_storage_variants[dof_access_cell][i] =
-                  IndexStorageVariants::full;
-              index_kinds[static_cast<unsigned int>(
-                index_storage_variants[dof_access_cell][i])]++;
-            }
-
-      // Step 4: Copy the interleaved indices into their own data structure
-      for (unsigned int i = 0; i < irregular_cells.size(); ++i)
-        if (index_storage_variants[dof_access_cell][i] ==
-            IndexStorageVariants::interleaved)
-          {
-            if (n_vectorization_lanes_filled[dof_access_cell][i] <
-                vectorization_length)
-              {
-                index_storage_variants[dof_access_cell][i] =
-                  IndexStorageVariants::full;
-                continue;
-              }
-            const unsigned int ndofs =
-              dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
-            const unsigned int *dof_indices =
-              &this->dof_indices
-                 [row_starts[i * vectorization_length * n_components].first];
-            unsigned int *interleaved_dof_indices =
-              &this->dof_indices_interleaved
-                 [row_starts[i * vectorization_length * n_components].first];
-            AssertDimension(this->dof_indices.size(),
-                            this->dof_indices_interleaved.size());
-            AssertDimension(n_vectorization_lanes_filled[dof_access_cell][i],
-                            vectorization_length);
-            AssertIndexRange(
-              row_starts[i * vectorization_length * n_components].first,
-              this->dof_indices_interleaved.size());
-            AssertIndexRange(
-              row_starts[i * vectorization_length * n_components].first +
-                ndofs * vectorization_length,
-              this->dof_indices_interleaved.size() + 1);
-            for (unsigned int k = 0; k < ndofs; ++k)
-              for (unsigned int j = 0; j < vectorization_length; ++j)
-                interleaved_dof_indices[k * vectorization_length + j] =
-                  dof_indices[j * ndofs + k];
-          }
-    }
-
-
-
     template <int length>
     void
     DoFInfo::compute_face_index_compression(
@@ -1173,379 +403,6 @@ namespace internal
 
 
 
-    void
-    DoFInfo::compute_tight_partitioners(
-      const Table<2, ShapeInfo<double>> &       shape_info,
-      const unsigned int                        n_owned_cells,
-      const unsigned int                        n_lanes,
-      const std::vector<FaceToCellTopology<1>> &inner_faces,
-      const std::vector<FaceToCellTopology<1>> &ghosted_faces,
-      const bool                                fill_cell_centric,
-      const MPI_Comm &                          communicator_sm,
-      const bool                                use_vector_data_exchanger_full)
-    {
-      const Utilities::MPI::Partitioner &part = *vector_partitioner;
-
-      // partitioner 0: no face integrals, simply use the indices present
-      // on the cells
-      std::vector<types::global_dof_index> ghost_indices;
-      {
-        const unsigned int n_components = start_components.back();
-        for (unsigned int cell = 0; cell < n_owned_cells; ++cell)
-          {
-            for (unsigned int i = row_starts[cell * n_components].first;
-                 i < row_starts[(cell + 1) * n_components].first;
-                 ++i)
-              if (dof_indices[i] >= part.local_size())
-                ghost_indices.push_back(part.local_to_global(dof_indices[i]));
-
-            const unsigned int fe_index =
-              dofs_per_cell.size() == 1 ? 0 :
-                                          cell_active_fe_index[cell / n_lanes];
-            const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
-
-            for (unsigned int i = row_starts_plain_indices[cell];
-                 i < row_starts_plain_indices[cell] + dofs_this_cell;
-                 ++i)
-              if (plain_dof_indices[i] >= part.local_size())
-                ghost_indices.push_back(
-                  part.local_to_global(plain_dof_indices[i]));
-          }
-        std::sort(ghost_indices.begin(), ghost_indices.end());
-        ghost_indices.erase(std::unique(ghost_indices.begin(),
-                                        ghost_indices.end()),
-                            ghost_indices.end());
-        IndexSet compressed_set(part.size());
-        compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end());
-        compressed_set.subtract_set(part.locally_owned_range());
-        const bool all_ghosts_equal =
-          Utilities::MPI::min<int>(compressed_set.n_elements() ==
-                                     part.ghost_indices().n_elements(),
-                                   part.get_mpi_communicator()) != 0;
-
-        std::shared_ptr<const Utilities::MPI::Partitioner> temp_0;
-
-        if (all_ghosts_equal)
-          temp_0 = vector_partitioner;
-        else
-          {
-            temp_0 = std::make_shared<Utilities::MPI::Partitioner>(
-              part.locally_owned_range(), part.get_mpi_communicator());
-            const_cast<Utilities::MPI::Partitioner *>(temp_0.get())
-              ->set_ghost_indices(compressed_set, part.ghost_indices());
-          }
-
-        if (use_vector_data_exchanger_full == false)
-          vector_exchanger_face_variants[0] =
-            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
-                               PartitionerWrapper>(temp_0);
-        else
-          vector_exchanger_face_variants[0] = std::make_shared<
-            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
-            temp_0, communicator_sm);
-      }
-
-      // construct a numbering of faces
-      std::vector<FaceToCellTopology<1>> all_faces(inner_faces);
-      all_faces.insert(all_faces.end(),
-                       ghosted_faces.begin(),
-                       ghosted_faces.end());
-      Table<2, unsigned int> cell_and_face_to_faces(
-        (row_starts.size() - 1) / start_components.back(),
-        2 * shape_info(0, 0).n_dimensions);
-      cell_and_face_to_faces.fill(numbers::invalid_unsigned_int);
-      for (unsigned int f = 0; f < all_faces.size(); ++f)
-        {
-          cell_and_face_to_faces(all_faces[f].cells_interior[0],
-                                 all_faces[f].interior_face_no) = f;
-          Assert(all_faces[f].cells_exterior[0] !=
-                   numbers::invalid_unsigned_int,
-                 ExcInternalError());
-          cell_and_face_to_faces(all_faces[f].cells_exterior[0],
-                                 all_faces[f].exterior_face_no) = f;
-        }
-
-      // lambda function to detect objects on face pairs
-      const auto loop_over_faces =
-        [&](const std::function<
-            void(const unsigned int, const unsigned int, const bool)> &fu) {
-          for (const auto &face : inner_faces)
-            {
-              AssertIndexRange(face.cells_interior[0], n_owned_cells);
-              fu(face.cells_exterior[0], face.exterior_face_no, false /*flag*/);
-            }
-        };
-
-      const auto loop_over_all_faces =
-        [&](const std::function<
-            void(const unsigned int, const unsigned int, const bool)> &fu) {
-          for (unsigned int c = 0; c < cell_and_face_to_faces.size(0); ++c)
-            for (unsigned int d = 0; d < cell_and_face_to_faces.size(1); ++d)
-              {
-                const unsigned int f = cell_and_face_to_faces(c, d);
-                if (f == numbers::invalid_unsigned_int)
-                  continue;
-
-                const unsigned int cell_m = all_faces[f].cells_interior[0];
-                const unsigned int cell_p = all_faces[f].cells_exterior[0];
-
-                const bool ext = c == cell_m;
-
-                if (ext && cell_p == numbers::invalid_unsigned_int)
-                  continue;
-
-                const unsigned int p       = ext ? cell_p : cell_m;
-                const unsigned int face_no = ext ?
-                                               all_faces[f].exterior_face_no :
-                                               all_faces[f].interior_face_no;
-
-                fu(p, face_no, true);
-              }
-        };
-
-      const auto process_values =
-        [&](
-          std::shared_ptr<const Utilities::MPI::Partitioner>
-            &vector_partitioner_values,
-          const std::function<void(
-            const std::function<void(
-              const unsigned int, const unsigned int, const bool)> &)> &loop) {
-          bool all_nodal_and_tensorial = true;
-          for (unsigned int c = 0; c < n_base_elements; ++c)
-            {
-              const auto &si =
-                shape_info(global_base_element_offset + c, 0).data.front();
-              if (!si.nodal_at_cell_boundaries ||
-                  (si.element_type ==
-                   internal::MatrixFreeFunctions::ElementType::tensor_none))
-                all_nodal_and_tensorial = false;
-            }
-          if (all_nodal_and_tensorial == false)
-            vector_partitioner_values = vector_partitioner;
-          else
-            {
-              bool has_noncontiguous_cell = false;
-
-              loop([&](const unsigned int cell_no,
-                       const unsigned int face_no,
-                       const bool         flag) {
-                const unsigned int index =
-                  dof_indices_contiguous[dof_access_cell][cell_no];
-                if (flag || (index != numbers::invalid_unsigned_int &&
-                             index >= part.local_size()))
-                  {
-                    const unsigned int stride =
-                      dof_indices_interleave_strides[dof_access_cell][cell_no];
-                    unsigned int i = 0;
-                    for (unsigned int e = 0; e < n_base_elements; ++e)
-                      for (unsigned int c = 0; c < n_components[e]; ++c)
-                        {
-                          const ShapeInfo<double> &shape =
-                            shape_info(global_base_element_offset + e, 0);
-                          for (unsigned int j = 0;
-                               j < shape.dofs_per_component_on_face;
-                               ++j)
-                            ghost_indices.push_back(part.local_to_global(
-                              index + i +
-                              shape.face_to_cell_index_nodal(face_no, j) *
-                                stride));
-                          i += shape.dofs_per_component_on_cell * stride;
-                        }
-                    AssertDimension(i, dofs_per_cell[0] * stride);
-                  }
-                else if (index == numbers::invalid_unsigned_int)
-                  has_noncontiguous_cell = true;
-              });
-              has_noncontiguous_cell =
-                Utilities::MPI::min<int>(has_noncontiguous_cell,
-                                         part.get_mpi_communicator()) != 0;
-
-              std::sort(ghost_indices.begin(), ghost_indices.end());
-              ghost_indices.erase(std::unique(ghost_indices.begin(),
-                                              ghost_indices.end()),
-                                  ghost_indices.end());
-              IndexSet compressed_set(part.size());
-              compressed_set.add_indices(ghost_indices.begin(),
-                                         ghost_indices.end());
-              compressed_set.subtract_set(part.locally_owned_range());
-              const bool all_ghosts_equal =
-                Utilities::MPI::min<int>(compressed_set.n_elements() ==
-                                           part.ghost_indices().n_elements(),
-                                         part.get_mpi_communicator()) != 0;
-              if (all_ghosts_equal || has_noncontiguous_cell)
-                vector_partitioner_values = vector_partitioner;
-              else
-                {
-                  vector_partitioner_values =
-                    std::make_shared<Utilities::MPI::Partitioner>(
-                      part.locally_owned_range(), part.get_mpi_communicator());
-                  const_cast<Utilities::MPI::Partitioner *>(
-                    vector_partitioner_values.get())
-                    ->set_ghost_indices(compressed_set, part.ghost_indices());
-                }
-            }
-        };
-
-
-      const auto process_gradients =
-        [&](
-          const std::shared_ptr<const Utilities::MPI::Partitioner>
-            &vector_partitoner_values,
-          std::shared_ptr<const Utilities::MPI::Partitioner>
-            &vector_partitioner_gradients,
-          const std::function<void(
-            const std::function<void(
-              const unsigned int, const unsigned int, const bool)> &)> &loop) {
-          bool all_hermite = true;
-          for (unsigned int c = 0; c < n_base_elements; ++c)
-            if (shape_info(global_base_element_offset + c, 0).element_type !=
-                internal::MatrixFreeFunctions::tensor_symmetric_hermite)
-              all_hermite = false;
-          if (all_hermite == false ||
-              vector_partitoner_values.get() == vector_partitioner.get())
-            vector_partitioner_gradients = vector_partitioner;
-          else
-            {
-              loop([&](const unsigned int cell_no,
-                       const unsigned int face_no,
-                       const bool         flag) {
-                const unsigned int index =
-                  dof_indices_contiguous[dof_access_cell][cell_no];
-                if (flag || (index != numbers::invalid_unsigned_int &&
-                             index >= part.local_size()))
-                  {
-                    const unsigned int stride =
-                      dof_indices_interleave_strides[dof_access_cell][cell_no];
-                    unsigned int i = 0;
-                    for (unsigned int e = 0; e < n_base_elements; ++e)
-                      for (unsigned int c = 0; c < n_components[e]; ++c)
-                        {
-                          const ShapeInfo<double> &shape =
-                            shape_info(global_base_element_offset + e, 0);
-                          for (unsigned int j = 0;
-                               j < 2 * shape.dofs_per_component_on_face;
-                               ++j)
-                            ghost_indices.push_back(part.local_to_global(
-                              index + i +
-                              shape.face_to_cell_index_hermite(face_no, j) *
-                                stride));
-                          i += shape.dofs_per_component_on_cell * stride;
-                        }
-                    AssertDimension(i, dofs_per_cell[0] * stride);
-                  }
-              });
-              std::sort(ghost_indices.begin(), ghost_indices.end());
-              ghost_indices.erase(std::unique(ghost_indices.begin(),
-                                              ghost_indices.end()),
-                                  ghost_indices.end());
-              IndexSet compressed_set(part.size());
-              compressed_set.add_indices(ghost_indices.begin(),
-                                         ghost_indices.end());
-              compressed_set.subtract_set(part.locally_owned_range());
-              const bool all_ghosts_equal =
-                Utilities::MPI::min<int>(compressed_set.n_elements() ==
-                                           part.ghost_indices().n_elements(),
-                                         part.get_mpi_communicator()) != 0;
-              if (all_ghosts_equal)
-                vector_partitioner_gradients = vector_partitioner;
-              else
-                {
-                  vector_partitioner_gradients =
-                    std::make_shared<Utilities::MPI::Partitioner>(
-                      part.locally_owned_range(), part.get_mpi_communicator());
-                  const_cast<Utilities::MPI::Partitioner *>(
-                    vector_partitioner_gradients.get())
-                    ->set_ghost_indices(compressed_set, part.ghost_indices());
-                }
-            }
-        };
-
-      std::shared_ptr<const Utilities::MPI::Partitioner> temp_1, temp_2, temp_3,
-        temp_4;
-
-      // partitioner 1: values on faces
-      process_values(temp_1, loop_over_faces);
-
-      // partitioner 2: values and gradients on faces
-      process_gradients(temp_1, temp_2, loop_over_faces);
-
-      if (fill_cell_centric)
-        {
-          ghost_indices.clear();
-          // partitioner 3: values on all faces
-          process_values(temp_3, loop_over_all_faces);
-          // partitioner 4: values and gradients on faces
-          process_gradients(temp_3, temp_4, loop_over_all_faces);
-        }
-      else
-        {
-          temp_3 = std::make_shared<Utilities::MPI::Partitioner>(
-            part.locally_owned_range(), part.get_mpi_communicator());
-          temp_4 = std::make_shared<Utilities::MPI::Partitioner>(
-            part.locally_owned_range(), part.get_mpi_communicator());
-        }
-
-      if (use_vector_data_exchanger_full == false)
-        {
-          vector_exchanger_face_variants[1] =
-            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
-                               PartitionerWrapper>(temp_1);
-          vector_exchanger_face_variants[2] =
-            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
-                               PartitionerWrapper>(temp_2);
-          vector_exchanger_face_variants[3] =
-            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
-                               PartitionerWrapper>(temp_3);
-          vector_exchanger_face_variants[4] =
-            std::make_shared<internal::MatrixFreeFunctions::VectorDataExchange::
-                               PartitionerWrapper>(temp_4);
-        }
-      else
-        {
-          vector_exchanger_face_variants[1] = std::make_shared<
-            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
-            temp_1, communicator_sm);
-          vector_exchanger_face_variants[2] = std::make_shared<
-            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
-            temp_2, communicator_sm);
-          vector_exchanger_face_variants[3] = std::make_shared<
-            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
-            temp_3, communicator_sm);
-          vector_exchanger_face_variants[4] = std::make_shared<
-            internal::MatrixFreeFunctions::VectorDataExchange::Full>(
-            temp_4, communicator_sm);
-        }
-    }
-
-
-
-    void
-    DoFInfo::compute_shared_memory_contiguous_indices(
-      std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
-        &cell_indices_contiguous_sm)
-    {
-      AssertDimension(dofs_per_cell.size(), 1);
-
-      for (unsigned int i = 0; i < 3; ++i)
-        {
-          dof_indices_contiguous_sm[i].resize(
-            cell_indices_contiguous_sm[i].size());
-
-          for (unsigned int j = 0; j < cell_indices_contiguous_sm[i].size();
-               ++j)
-            if (cell_indices_contiguous_sm[i][j].first !=
-                numbers::invalid_unsigned_int)
-              dof_indices_contiguous_sm[i][j] = {
-                cell_indices_contiguous_sm[i][j].first,
-                cell_indices_contiguous_sm[i][j].second * dofs_per_cell[0]};
-            else
-              dof_indices_contiguous_sm[i][j] = {numbers::invalid_unsigned_int,
-                                                 numbers::invalid_unsigned_int};
-        }
-    }
-
-
-
     template <int length>
     void
     DoFInfo::compute_vector_zero_access_pattern(
@@ -1775,277 +632,10 @@ namespace internal
             ++dat;
         }
       };
-
-
-
-      // We construct the connectivity graph in parallel. we use one lock for
-      // 256 degrees of freedom to keep the number of locks down to a
-      // reasonable level and reduce the cost of locking to some extent.
-      static constexpr unsigned int bucket_size_threading = 256;
-
-      void
-      compute_row_lengths(const unsigned int         begin,
-                          const unsigned int         end,
-                          const DoFInfo &            dof_info,
-                          std::vector<std::mutex> &  mutexes,
-                          std::vector<unsigned int> &row_lengths)
-      {
-        std::vector<unsigned int> scratch;
-        const unsigned int n_components = dof_info.start_components.back();
-        for (unsigned int block = begin; block < end; ++block)
-          {
-            scratch.clear();
-            scratch.insert(
-              scratch.end(),
-              dof_info.dof_indices.data() +
-                dof_info.row_starts[block * n_components].first,
-              dof_info.dof_indices.data() +
-                dof_info.row_starts[(block + 1) * n_components].first);
-            std::sort(scratch.begin(), scratch.end());
-            std::vector<unsigned int>::const_iterator end_unique =
-              std::unique(scratch.begin(), scratch.end());
-            std::vector<unsigned int>::const_iterator it = scratch.begin();
-            while (it != end_unique)
-              {
-                // In this code, the procedure is that we insert all elements
-                // that are within the range of one lock at once
-                const unsigned int next_bucket =
-                  (*it / bucket_size_threading + 1) * bucket_size_threading;
-                std::lock_guard<std::mutex> lock(
-                  mutexes[*it / bucket_size_threading]);
-                for (; it != end_unique && *it < next_bucket; ++it)
-                  {
-                    AssertIndexRange(*it, row_lengths.size());
-                    row_lengths[*it]++;
-                  }
-              }
-          }
-      }
-
-      void
-      fill_connectivity_dofs(const unsigned int               begin,
-                             const unsigned int               end,
-                             const DoFInfo &                  dof_info,
-                             const std::vector<unsigned int> &row_lengths,
-                             std::vector<std::mutex> &        mutexes,
-                             dealii::SparsityPattern &        connectivity_dof)
-      {
-        std::vector<unsigned int> scratch;
-        const unsigned int n_components = dof_info.start_components.back();
-        for (unsigned int block = begin; block < end; ++block)
-          {
-            scratch.clear();
-            scratch.insert(
-              scratch.end(),
-              dof_info.dof_indices.data() +
-                dof_info.row_starts[block * n_components].first,
-              dof_info.dof_indices.data() +
-                dof_info.row_starts[(block + 1) * n_components].first);
-            std::sort(scratch.begin(), scratch.end());
-            std::vector<unsigned int>::const_iterator end_unique =
-              std::unique(scratch.begin(), scratch.end());
-            std::vector<unsigned int>::const_iterator it = scratch.begin();
-            while (it != end_unique)
-              {
-                const unsigned int next_bucket =
-                  (*it / bucket_size_threading + 1) * bucket_size_threading;
-                std::lock_guard<std::mutex> lock(
-                  mutexes[*it / bucket_size_threading]);
-                for (; it != end_unique && *it < next_bucket; ++it)
-                  if (row_lengths[*it] > 0)
-                    connectivity_dof.add(*it, block);
-              }
-          }
-      }
-
-      void
-      fill_connectivity(const unsigned int               begin,
-                        const unsigned int               end,
-                        const DoFInfo &                  dof_info,
-                        const std::vector<unsigned int> &renumbering,
-                        const dealii::SparsityPattern &  connectivity_dof,
-                        DynamicSparsityPattern &         connectivity)
-      {
-        ordered_vector     row_entries;
-        const unsigned int n_components = dof_info.start_components.back();
-        for (unsigned int block = begin; block < end; ++block)
-          {
-            row_entries.clear();
-
-            const unsigned int
-              *it = dof_info.dof_indices.data() +
-                    dof_info.row_starts[block * n_components].first,
-              *end_cell = dof_info.dof_indices.data() +
-                          dof_info.row_starts[(block + 1) * n_components].first;
-            for (; it != end_cell; ++it)
-              {
-                SparsityPattern::iterator sp = connectivity_dof.begin(*it);
-                std::vector<types::global_dof_index>::iterator insert_pos =
-                  row_entries.begin();
-                for (; sp != connectivity_dof.end(*it); ++sp)
-                  if (sp->column() != block)
-                    row_entries.insert(renumbering[sp->column()], insert_pos);
-              }
-            connectivity.add_entries(renumbering[block],
-                                     row_entries.begin(),
-                                     row_entries.end());
-          }
-      }
     } // namespace internal
 
 
 
-    void
-    DoFInfo::make_connectivity_graph(
-      const TaskInfo &                 task_info,
-      const std::vector<unsigned int> &renumbering,
-      DynamicSparsityPattern &         connectivity) const
-    {
-      unsigned int n_rows = (vector_partitioner->local_range().second -
-                             vector_partitioner->local_range().first) +
-                            vector_partitioner->ghost_indices().n_elements();
-
-      // Avoid square sparsity patterns that allocate the diagonal entry
-      if (n_rows == task_info.n_active_cells)
-        ++n_rows;
-
-      // first determine row lengths
-      std::vector<unsigned int> row_lengths(n_rows);
-      std::vector<std::mutex> mutexes(n_rows / internal::bucket_size_threading +
-                                      1);
-      dealii::parallel::apply_to_subranges(
-        0,
-        task_info.n_active_cells,
-        [this, &mutexes, &row_lengths](const unsigned int begin,
-                                       const unsigned int end) {
-          internal::compute_row_lengths(
-            begin, end, *this, mutexes, row_lengths);
-        },
-        20);
-
-      // disregard dofs that only sit on a single cell because they cannot
-      // couple
-      for (unsigned int row = 0; row < n_rows; ++row)
-        if (row_lengths[row] <= 1)
-          row_lengths[row] = 0;
-
-      // Create a temporary sparsity pattern that holds to each degree of
-      // freedom on which cells it appears, i.e., store the connectivity
-      // between cells and dofs
-      SparsityPattern connectivity_dof(n_rows,
-                                       task_info.n_active_cells,
-                                       row_lengths);
-      dealii::parallel::apply_to_subranges(
-        0,
-        task_info.n_active_cells,
-        [this, &row_lengths, &mutexes, &connectivity_dof](
-          const unsigned int begin, const unsigned int end) {
-          internal::fill_connectivity_dofs(
-            begin, end, *this, row_lengths, mutexes, connectivity_dof);
-        },
-        20);
-      connectivity_dof.compress();
-
-
-      // Invert renumbering for use in fill_connectivity.
-      std::vector<unsigned int> reverse_numbering(task_info.n_active_cells);
-      reverse_numbering = Utilities::invert_permutation(renumbering);
-
-      // From the above connectivity between dofs and cells, we can finally
-      // create a connectivity list between cells. The connectivity graph
-      // should apply the renumbering, i.e., the entry for cell j is the entry
-      // for cell renumbering[j] in the original ordering.
-      dealii::parallel::apply_to_subranges(
-        0,
-        task_info.n_active_cells,
-        [this, &reverse_numbering, &connectivity_dof, &connectivity](
-          const unsigned int begin, const unsigned int end) {
-          internal::fill_connectivity(begin,
-                                      end,
-                                      *this,
-                                      reverse_numbering,
-                                      connectivity_dof,
-                                      connectivity);
-        },
-        20);
-    }
-
-
-
-    void
-    DoFInfo::compute_dof_renumbering(
-      std::vector<types::global_dof_index> &renumbering)
-    {
-      const unsigned int locally_owned_size =
-        vector_partitioner->locally_owned_size();
-      renumbering.resize(0);
-      renumbering.resize(locally_owned_size, numbers::invalid_dof_index);
-
-      types::global_dof_index counter      = 0;
-      const unsigned int      n_components = start_components.back();
-      const unsigned int      n_cell_batches =
-        n_vectorization_lanes_filled[dof_access_cell].size();
-      Assert(n_cell_batches <=
-               (row_starts.size() - 1) / vectorization_length / n_components,
-             ExcInternalError());
-      for (unsigned int cell_no = 0; cell_no < n_cell_batches; ++cell_no)
-        {
-          // do not renumber in case we have constraints
-          if (row_starts[cell_no * n_components * vectorization_length]
-                .second ==
-              row_starts[(cell_no + 1) * n_components * vectorization_length]
-                .second)
-            {
-              const unsigned int ndofs =
-                dofs_per_cell.size() == 1 ?
-                  dofs_per_cell[0] :
-                  (dofs_per_cell[cell_active_fe_index.size() > 0 ?
-                                   cell_active_fe_index[cell_no] :
-                                   0]);
-              const unsigned int *dof_ind =
-                dof_indices.data() +
-                row_starts[cell_no * n_components * vectorization_length].first;
-              for (unsigned int i = 0; i < ndofs; ++i)
-                for (unsigned int j = 0;
-                     j < n_vectorization_lanes_filled[dof_access_cell][cell_no];
-                     ++j)
-                  if (dof_ind[j * ndofs + i] < locally_owned_size)
-                    if (renumbering[dof_ind[j * ndofs + i]] ==
-                        numbers::invalid_dof_index)
-                      renumbering[dof_ind[j * ndofs + i]] = counter++;
-            }
-        }
-
-      AssertIndexRange(counter, locally_owned_size + 1);
-      for (types::global_dof_index &dof_index : renumbering)
-        if (dof_index == numbers::invalid_dof_index)
-          dof_index = counter++;
-
-      // transform indices to global index space
-      for (types::global_dof_index &dof_index : renumbering)
-        dof_index = vector_partitioner->local_to_global(dof_index);
-
-      AssertDimension(counter, renumbering.size());
-    }
-
-
-
-    std::size_t
-    DoFInfo::memory_consumption() const
-    {
-      std::size_t memory = sizeof(*this);
-      memory +=
-        (row_starts.capacity() * sizeof(std::pair<unsigned int, unsigned int>));
-      memory += MemoryConsumption::memory_consumption(dof_indices);
-      memory += MemoryConsumption::memory_consumption(row_starts_plain_indices);
-      memory += MemoryConsumption::memory_consumption(plain_dof_indices);
-      memory += MemoryConsumption::memory_consumption(constraint_indicator);
-      memory += MemoryConsumption::memory_consumption(*vector_partitioner);
-      return memory;
-    }
-
-
-
     template <typename StreamType>
     void
     DoFInfo::print_memory_consumption(StreamType &    out,
index ad726f4f16f161f061302be8f996db3e04e148a2..d38b2eb6e35a602914ef148b18807a5da3cbb894 100644 (file)
@@ -2123,45 +2123,6 @@ namespace internal
 
 namespace MGTransferGlobalCoarseningTools
 {
-  unsigned int
-  create_next_polynomial_coarsening_degree(
-    const unsigned int                      previous_fe_degree,
-    const PolynomialCoarseningSequenceType &p_sequence)
-  {
-    switch (p_sequence)
-      {
-        case PolynomialCoarseningSequenceType::bisect:
-          return std::max(previous_fe_degree / 2, 1u);
-        case PolynomialCoarseningSequenceType::decrease_by_one:
-          return std::max(previous_fe_degree - 1, 1u);
-        case PolynomialCoarseningSequenceType::go_to_one:
-          return 1u;
-        default:
-          Assert(false, StandardExceptions::ExcNotImplemented());
-          return 1u;
-      }
-  }
-
-
-
-  std::vector<unsigned int>
-  create_polynomial_coarsening_sequence(
-    const unsigned int                      max_degree,
-    const PolynomialCoarseningSequenceType &p_sequence)
-  {
-    std::vector<unsigned int> degrees{max_degree};
-
-    while (degrees.back() > 1)
-      degrees.push_back(
-        create_next_polynomial_coarsening_degree(degrees.back(), p_sequence));
-
-    std::reverse(degrees.begin(), degrees.end());
-
-    return degrees;
-  }
-
-
-
   template <int dim, int spacedim>
   std::vector<std::shared_ptr<const Triangulation<dim, spacedim>>>
   create_geometric_coarsening_sequence(
index a4e043b2ef82ca6c07417dd2a368433c04a3371f..0c74c114b47648acb6d04d07b2e2d0dcb0f84230 100644 (file)
 
 DEAL_II_NAMESPACE_OPEN
 
+namespace Utilities
+{
+  namespace MPI
+  {
+    NoncontiguousPartitioner::NoncontiguousPartitioner(
+      const IndexSet &indexset_has,
+      const IndexSet &indexset_want,
+      const MPI_Comm &communicator)
+    {
+      this->reinit(indexset_has, indexset_want, communicator);
+    }
+
+
+
+    NoncontiguousPartitioner::NoncontiguousPartitioner(
+      const std::vector<types::global_dof_index> &indices_has,
+      const std::vector<types::global_dof_index> &indices_want,
+      const MPI_Comm &                            communicator)
+    {
+      this->reinit(indices_has, indices_want, communicator);
+    }
+
+
+
+    std::pair<unsigned int, unsigned int>
+    NoncontiguousPartitioner::n_targets() const
+    {
+      return {send_ranks.size(), recv_ranks.size()};
+    }
+
+
+
+    unsigned int
+    NoncontiguousPartitioner::temporary_storage_size() const
+    {
+      return send_ptr.back();
+    }
+
+
+
+    types::global_dof_index
+    NoncontiguousPartitioner::memory_consumption()
+    {
+      return MemoryConsumption::memory_consumption(send_ranks) +
+             MemoryConsumption::memory_consumption(send_ptr) +
+             MemoryConsumption::memory_consumption(send_indices) +
+             MemoryConsumption::memory_consumption(recv_ranks) +
+             MemoryConsumption::memory_consumption(recv_ptr) +
+             MemoryConsumption::memory_consumption(recv_indices) +
+             MemoryConsumption::memory_consumption(buffers) +
+             MemoryConsumption::memory_consumption(requests);
+    }
+
+
+
+    const MPI_Comm &
+    NoncontiguousPartitioner::get_mpi_communicator() const
+    {
+      return communicator;
+    }
+
+
+
+    void
+    NoncontiguousPartitioner::reinit(const IndexSet &indexset_has,
+                                     const IndexSet &indexset_want,
+                                     const MPI_Comm &communicator)
+    {
+      this->communicator = communicator;
+
+      // clean up
+      send_ranks.clear();
+      send_ptr.clear();
+      send_indices.clear();
+      recv_ranks.clear();
+      recv_ptr.clear();
+      recv_indices.clear();
+      buffers.clear();
+      requests.clear();
+
+      // setup communication pattern
+      std::vector<unsigned int> owning_ranks_of_ghosts(
+        indexset_want.n_elements());
+
+      // set up dictionary
+      Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
+        process(indexset_has,
+                indexset_want,
+                communicator,
+                owning_ranks_of_ghosts,
+                true);
+
+      Utilities::MPI::ConsensusAlgorithms::Selector<
+        std::pair<types::global_dof_index, types::global_dof_index>,
+        unsigned int>
+        consensus_algorithm(process, communicator);
+      consensus_algorithm.run();
+
+      // setup map of processes from where this rank will receive values
+      {
+        std::map<unsigned int, std::vector<types::global_dof_index>> recv_map;
+
+        for (const auto &owner : owning_ranks_of_ghosts)
+          recv_map[owner] = std::vector<types::global_dof_index>();
+
+        for (types::global_dof_index i = 0; i < owning_ranks_of_ghosts.size();
+             i++)
+          recv_map[owning_ranks_of_ghosts[i]].push_back(i);
+
+        recv_ptr.push_back(recv_indices.size() /*=0*/);
+        for (const auto &target_with_indexset : recv_map)
+          {
+            recv_ranks.push_back(target_with_indexset.first);
+
+            for (const auto cell_index : target_with_indexset.second)
+              recv_indices.push_back(cell_index);
+
+            recv_ptr.push_back(recv_indices.size());
+          }
+      }
+
+      {
+        const auto targets_with_indexset = process.get_requesters();
+
+        send_ptr.push_back(recv_ptr.back());
+        for (const auto &target_with_indexset : targets_with_indexset)
+          {
+            send_ranks.push_back(target_with_indexset.first);
+
+            for (const auto cell_index : target_with_indexset.second)
+              send_indices.push_back(indexset_has.index_within_set(cell_index));
+
+            send_ptr.push_back(send_indices.size() + recv_ptr.back());
+          }
+      }
+    }
+
+
+
+    void
+    NoncontiguousPartitioner::reinit(
+      const std::vector<types::global_dof_index> &indices_has,
+      const std::vector<types::global_dof_index> &indices_want,
+      const MPI_Comm &                            communicator)
+    {
+      // step 0) clean vectors from numbers::invalid_dof_index (indicating
+      //         padding)
+      std::vector<types::global_dof_index> indices_has_clean;
+      indices_has_clean.reserve(indices_has.size());
+
+      for (const auto i : indices_has)
+        if (i != numbers::invalid_dof_index)
+          indices_has_clean.push_back(i);
+
+      std::vector<types::global_dof_index> indices_want_clean;
+      indices_want_clean.reserve(indices_want.size());
+
+      for (const auto i : indices_want)
+        if (i != numbers::invalid_dof_index)
+          indices_want_clean.push_back(i);
+
+      // step 0) determine "number of degrees of freedom" needed for IndexSet
+      const types::global_dof_index local_n_dofs_has =
+        indices_has_clean.empty() ?
+          0 :
+          (*std::max_element(indices_has_clean.begin(),
+                             indices_has_clean.end()) +
+           1);
+
+      const types::global_dof_index local_n_dofs_want =
+        indices_want_clean.empty() ?
+          0 :
+          (*std::max_element(indices_want_clean.begin(),
+                             indices_want_clean.end()) +
+           1);
+
+      const types::global_dof_index n_dofs =
+        Utilities::MPI::max(std::max(local_n_dofs_has, local_n_dofs_want),
+                            communicator);
+
+      // step 1) convert vectors to indexsets (sorted!)
+      IndexSet index_set_has(n_dofs);
+      index_set_has.add_indices(indices_has_clean.begin(),
+                                indices_has_clean.end());
+
+      IndexSet index_set_want(n_dofs);
+      index_set_want.add_indices(indices_want_clean.begin(),
+                                 indices_want_clean.end());
+
+      // step 2) setup internal data structures with indexset
+      this->reinit(index_set_has, index_set_want, communicator);
+
+      // step 3) fix inner data structures so that it is sorted as
+      //         in the original vector
+      {
+        std::vector<types::global_dof_index> temp_map_send(
+          index_set_has.n_elements());
+
+        for (types::global_dof_index i = 0; i < indices_has.size(); i++)
+          if (indices_has[i] != numbers::invalid_dof_index)
+            temp_map_send[index_set_has.index_within_set(indices_has[i])] = i;
+
+        for (auto &i : send_indices)
+          i = temp_map_send[i];
+      }
+
+      {
+        std::vector<types::global_dof_index> temp_map_recv(
+          index_set_want.n_elements());
+
+        for (types::global_dof_index i = 0; i < indices_want.size(); i++)
+          if (indices_want[i] != numbers::invalid_dof_index)
+            temp_map_recv[index_set_want.index_within_set(indices_want[i])] = i;
+
+        for (auto &i : recv_indices)
+          i = temp_map_recv[i];
+      }
+    }
+  } // namespace MPI
+} // namespace Utilities
+
 #include "mpi_noncontiguous_partitioner.inst"
 
 DEAL_II_NAMESPACE_CLOSE
index 6c6d291b71fe3d1dfde35e0c4505104048491eb9..eaa4626115630fb0f0b20494c00f42abd2bf0b69 100644 (file)
@@ -24,6 +24,115 @@ DEAL_II_NAMESPACE_OPEN
 // these do not fit into the templates of the dimension in the inst file
 namespace FETools
 {
+  // Specializations for FE_Q.
+  template <>
+  std::unique_ptr<FiniteElement<1, 1>>
+  FEFactory<FE_Q<1, 1>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q<1>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<2, 2>>
+  FEFactory<FE_Q<2, 2>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q<2>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<3, 3>>
+  FEFactory<FE_Q<3, 3>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q<3>>(quad);
+  }
+
+  // Specializations for FE_Q_DG0.
+  template <>
+  std::unique_ptr<FiniteElement<1, 1>>
+  FEFactory<FE_Q_DG0<1, 1>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q_DG0<1>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<2, 2>>
+  FEFactory<FE_Q_DG0<2, 2>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q_DG0<2>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<3, 3>>
+  FEFactory<FE_Q_DG0<3, 3>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q_DG0<3>>(quad);
+  }
+
+  // Specializations for FE_Q_Bubbles.
+  template <>
+  std::unique_ptr<FiniteElement<1, 1>>
+  FEFactory<FE_Q_Bubbles<1, 1>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q_Bubbles<1>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<2, 2>>
+  FEFactory<FE_Q_Bubbles<2, 2>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q_Bubbles<2>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<3, 3>>
+  FEFactory<FE_Q_Bubbles<3, 3>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_Q_Bubbles<3>>(quad);
+  }
+
+  // Specializations for FE_DGQArbitraryNodes.
+  template <>
+  std::unique_ptr<FiniteElement<1, 1>>
+  FEFactory<FE_DGQ<1>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_DGQArbitraryNodes<1>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<1, 2>>
+  FEFactory<FE_DGQ<1, 2>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_DGQArbitraryNodes<1, 2>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<1, 3>>
+  FEFactory<FE_DGQ<1, 3>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_DGQArbitraryNodes<1, 3>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<2, 2>>
+  FEFactory<FE_DGQ<2>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_DGQArbitraryNodes<2>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<2, 3>>
+  FEFactory<FE_DGQ<2, 3>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_DGQArbitraryNodes<2, 3>>(quad);
+  }
+
+  template <>
+  std::unique_ptr<FiniteElement<3, 3>>
+  FEFactory<FE_DGQ<3>>::get(const Quadrature<1> &quad) const
+  {
+    return std::make_unique<FE_DGQArbitraryNodes<3>>(quad);
+  }
+
   template void
   hierarchic_to_lexicographic_numbering<0>(unsigned int,
                                            std::vector<unsigned int> &);
index 3b7e462fa1feacd9a080d6b89b1323dbf2746f83..4b2d8b925b83bd0d55cf5f7051fda94b93ee2409 100644 (file)
 
 DEAL_II_NAMESPACE_OPEN
 
+namespace internal
+{
+  namespace MatrixFreeFunctions
+  {
+    DoFInfo::DoFInfo()
+    {
+      clear();
+    }
+
+
+
+    void
+    DoFInfo::clear()
+    {
+      row_starts.clear();
+      dof_indices.clear();
+      constraint_indicator.clear();
+      vector_partitioner.reset();
+      ghost_dofs.clear();
+      dofs_per_cell.clear();
+      dofs_per_face.clear();
+      vectorization_length       = 1;
+      dimension                  = 2;
+      global_base_element_offset = 0;
+      n_base_elements            = 0;
+      n_components.clear();
+      start_components.clear();
+      row_starts_plain_indices.clear();
+      plain_dof_indices.clear();
+      dof_indices_interleaved.clear();
+      for (unsigned int i = 0; i < 3; ++i)
+        {
+          index_storage_variants[i].clear();
+          dof_indices_contiguous[i].clear();
+          dof_indices_interleave_strides[i].clear();
+          n_vectorization_lanes_filled[i].clear();
+        }
+      store_plain_indices = false;
+      cell_active_fe_index.clear();
+      max_fe_index = 0;
+      fe_index_conversion.clear();
+    }
+
+
+
+    void
+    DoFInfo::get_dof_indices_on_cell_batch(std::vector<unsigned int> &my_rows,
+                                           const unsigned int         cell,
+                                           const bool apply_constraints) const
+    {
+      const unsigned int n_fe_components = start_components.back();
+      const unsigned int fe_index =
+        dofs_per_cell.size() == 1 ? 0 : cell_active_fe_index[cell];
+      const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
+
+      const unsigned int n_vectorization  = vectorization_length;
+      constexpr auto     dof_access_index = dof_access_cell;
+      AssertIndexRange(cell,
+                       n_vectorization_lanes_filled[dof_access_index].size());
+      const unsigned int n_vectorization_actual =
+        n_vectorization_lanes_filled[dof_access_index][cell];
+
+      // we might have constraints, so the final number
+      // of indices is not known a priori.
+      // conservatively reserve the maximum without constraints
+      my_rows.reserve(n_vectorization * dofs_this_cell);
+      my_rows.resize(0);
+      unsigned int total_size = 0;
+      for (unsigned int v = 0; v < n_vectorization_actual; ++v)
+        {
+          const unsigned int ib =
+            (cell * n_vectorization + v) * n_fe_components;
+          const unsigned int ie =
+            (cell * n_vectorization + v + 1) * n_fe_components;
+
+          // figure out constraints by comparing constraint_indicator row
+          // shift for this cell within the block as compared to the next
+          // one
+          const bool has_constraints =
+            row_starts[ib].second != row_starts[ib + n_fe_components].second;
+
+          auto do_copy = [&](const unsigned int *begin,
+                             const unsigned int *end) {
+            const unsigned int shift = total_size;
+            total_size += (end - begin);
+            my_rows.resize(total_size);
+            std::copy(begin, end, my_rows.begin() + shift);
+          };
+
+          if (!has_constraints || apply_constraints)
+            {
+              const unsigned int *begin =
+                dof_indices.data() + row_starts[ib].first;
+              const unsigned int *end =
+                dof_indices.data() + row_starts[ie].first;
+              do_copy(begin, end);
+            }
+          else
+            {
+              Assert(row_starts_plain_indices[cell * n_vectorization + v] !=
+                       numbers::invalid_unsigned_int,
+                     ExcNotInitialized());
+              const unsigned int *begin =
+                plain_dof_indices.data() +
+                row_starts_plain_indices[cell * n_vectorization + v];
+              const unsigned int *end = begin + dofs_this_cell;
+              do_copy(begin, end);
+            }
+        }
+    }
+
+
+
+    void
+    DoFInfo::assign_ghosts(const std::vector<unsigned int> &boundary_cells,
+                           const MPI_Comm &                 communicator_sm,
+                           const bool use_vector_data_exchanger_full)
+    {
+      Assert(boundary_cells.size() < row_starts.size(), ExcInternalError());
+
+      // sort ghost dofs and compress out duplicates
+      const unsigned int n_owned  = (vector_partitioner->local_range().second -
+                                    vector_partitioner->local_range().first);
+      const std::size_t  n_ghosts = ghost_dofs.size();
+#ifdef DEBUG
+      for (const auto dof_index : dof_indices)
+        AssertIndexRange(dof_index, n_owned + n_ghosts);
+#endif
+
+      const unsigned int        n_components = start_components.back();
+      std::vector<unsigned int> ghost_numbering(n_ghosts);
+      IndexSet                  ghost_indices(vector_partitioner->size());
+      if (n_ghosts > 0)
+        {
+          unsigned int n_unique_ghosts = 0;
+          // since we need to go back to the local_to_global indices and
+          // replace the temporary numbering of ghosts by the real number in
+          // the index set, we need to store these values
+          std::vector<std::pair<types::global_dof_index, unsigned int>>
+            ghost_origin(n_ghosts);
+          for (std::size_t i = 0; i < n_ghosts; ++i)
+            {
+              ghost_origin[i].first  = ghost_dofs[i];
+              ghost_origin[i].second = i;
+            }
+          std::sort(ghost_origin.begin(), ghost_origin.end());
+
+          types::global_dof_index last_contiguous_start = ghost_origin[0].first;
+          ghost_numbering[ghost_origin[0].second]       = 0;
+          for (std::size_t i = 1; i < n_ghosts; i++)
+            {
+              if (ghost_origin[i].first > ghost_origin[i - 1].first + 1)
+                {
+                  ghost_indices.add_range(last_contiguous_start,
+                                          ghost_origin[i - 1].first + 1);
+                  last_contiguous_start = ghost_origin[i].first;
+                }
+              if (ghost_origin[i].first > ghost_origin[i - 1].first)
+                ++n_unique_ghosts;
+              ghost_numbering[ghost_origin[i].second] = n_unique_ghosts;
+            }
+          ++n_unique_ghosts;
+          ghost_indices.add_range(last_contiguous_start,
+                                  ghost_origin.back().first + 1);
+          ghost_indices.compress();
+
+          // make sure that we got the correct local numbering of the ghost
+          // dofs. the ghost index set should store the same number
+          {
+            AssertDimension(n_unique_ghosts, ghost_indices.n_elements());
+            for (std::size_t i = 0; i < n_ghosts; ++i)
+              Assert(ghost_numbering[i] ==
+                       ghost_indices.index_within_set(ghost_dofs[i]),
+                     ExcInternalError());
+          }
+
+          // apply correct numbering for ghost indices: We previously just
+          // enumerated them according to their appearance in the
+          // local_to_global structure. Above, we derived a relation between
+          // this enumeration and the actual number
+          const unsigned int n_boundary_cells = boundary_cells.size();
+          for (unsigned int i = 0; i < n_boundary_cells; ++i)
+            {
+              unsigned int *data_ptr =
+                dof_indices.data() +
+                row_starts[boundary_cells[i] * n_components].first;
+              const unsigned int *row_end =
+                dof_indices.data() +
+                row_starts[(boundary_cells[i] + 1) * n_components].first;
+              for (; data_ptr != row_end; ++data_ptr)
+                *data_ptr = ((*data_ptr < n_owned) ?
+                               *data_ptr :
+                               n_owned + ghost_numbering[*data_ptr - n_owned]);
+
+              // now the same procedure for plain indices
+              if (store_plain_indices == true)
+                {
+                  if (row_starts[boundary_cells[i] * n_components].second !=
+                      row_starts[(boundary_cells[i] + 1) * n_components].second)
+                    {
+                      unsigned int *data_ptr =
+                        plain_dof_indices.data() +
+                        row_starts_plain_indices[boundary_cells[i]];
+                      const unsigned int fe_index =
+                        (cell_active_fe_index.size() == 0 ||
+                         dofs_per_cell.size() == 1) ?
+                          0 :
+                          cell_active_fe_index[boundary_cells[i]];
+                      AssertIndexRange(fe_index, dofs_per_cell.size());
+                      const unsigned int *row_end =
+                        data_ptr + dofs_per_cell[fe_index];
+                      for (; data_ptr != row_end; ++data_ptr)
+                        *data_ptr =
+                          ((*data_ptr < n_owned) ?
+                             *data_ptr :
+                             n_owned + ghost_numbering[*data_ptr - n_owned]);
+                    }
+                }
+            }
+        }
+
+      std::vector<types::global_dof_index> empty;
+      ghost_dofs.swap(empty);
+
+      // set the ghost indices now. need to cast away constness here, but that
+      // is uncritical since we reset the Partitioner in the same initialize
+      // call as this call here.
+      Utilities::MPI::Partitioner *vec_part =
+        const_cast<Utilities::MPI::Partitioner *>(vector_partitioner.get());
+      vec_part->set_ghost_indices(ghost_indices);
+
+      if (use_vector_data_exchanger_full == false)
+        vector_exchanger = std::make_shared<
+          MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+          vector_partitioner);
+      else
+        vector_exchanger =
+          std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
+            vector_partitioner, communicator_sm);
+    }
+
+
+
+    void
+    DoFInfo::reorder_cells(
+      const TaskInfo &                  task_info,
+      const std::vector<unsigned int> & renumbering,
+      const std::vector<unsigned int> & constraint_pool_row_index,
+      const std::vector<unsigned char> &irregular_cells)
+    {
+      (void)constraint_pool_row_index;
+
+      // first reorder the active FE index.
+      const bool have_hp = dofs_per_cell.size() > 1;
+      if (cell_active_fe_index.size() > 0)
+        {
+          std::vector<unsigned int> new_active_fe_index;
+          new_active_fe_index.reserve(task_info.cell_partition_data.back());
+          unsigned int position_cell = 0;
+          for (unsigned int cell = 0;
+               cell < task_info.cell_partition_data.back();
+               ++cell)
+            {
+              const unsigned int n_comp =
+                (irregular_cells[cell] > 0 ? irregular_cells[cell] :
+                                             vectorization_length);
+
+              // take maximum FE index among the ones present (we might have
+              // lumped some lower indices into higher ones)
+              unsigned int fe_index =
+                cell_active_fe_index[renumbering[position_cell]];
+              for (unsigned int j = 1; j < n_comp; ++j)
+                fe_index = std::max(
+                  fe_index,
+                  cell_active_fe_index[renumbering[position_cell + j]]);
+
+              new_active_fe_index.push_back(fe_index);
+              position_cell += n_comp;
+            }
+          std::swap(new_active_fe_index, cell_active_fe_index);
+        }
+      if (have_hp)
+        AssertDimension(cell_active_fe_index.size(),
+                        task_info.cell_partition_data.back());
+
+      const unsigned int n_components = start_components.back();
+
+      std::vector<std::pair<unsigned int, unsigned int>> new_row_starts(
+        vectorization_length * n_components *
+          task_info.cell_partition_data.back() +
+        1);
+      std::vector<unsigned int> new_dof_indices;
+      std::vector<std::pair<unsigned short, unsigned short>>
+                                new_constraint_indicator;
+      std::vector<unsigned int> new_plain_indices, new_rowstart_plain;
+      unsigned int              position_cell = 0;
+      new_dof_indices.reserve(dof_indices.size());
+      new_constraint_indicator.reserve(constraint_indicator.size());
+      if (store_plain_indices == true)
+        {
+          new_rowstart_plain.resize(vectorization_length *
+                                        task_info.cell_partition_data.back() +
+                                      1,
+                                    numbers::invalid_unsigned_int);
+          new_plain_indices.reserve(plain_dof_indices.size());
+        }
+
+      // copy the indices and the constraint indicators to the new data field,
+      // where we will go through the cells in the renumbered way. in case the
+      // vectorization length does not exactly match up, we fill invalid
+      // numbers to the rowstart data. for contiguous cell indices, we skip
+      // the rowstarts field completely and directly go into the
+      // new_dof_indices field (this layout is used in FEEvaluation).
+      for (unsigned int i = 0; i < task_info.cell_partition_data.back(); ++i)
+        {
+          const unsigned int n_vect =
+            (irregular_cells[i] > 0 ? irregular_cells[i] :
+                                      vectorization_length);
+          const unsigned int dofs_per_cell =
+            have_hp ? this->dofs_per_cell[cell_active_fe_index[i]] :
+                      this->dofs_per_cell[0];
+
+          for (unsigned int j = 0; j < n_vect; ++j)
+            {
+              const unsigned int cell_no =
+                renumbering[position_cell + j] * n_components;
+              for (unsigned int comp = 0; comp < n_components; ++comp)
+                {
+                  new_row_starts[(i * vectorization_length + j) * n_components +
+                                 comp]
+                    .first = new_dof_indices.size();
+                  new_row_starts[(i * vectorization_length + j) * n_components +
+                                 comp]
+                    .second = new_constraint_indicator.size();
+
+                  new_dof_indices.insert(
+                    new_dof_indices.end(),
+                    dof_indices.data() + row_starts[cell_no + comp].first,
+                    dof_indices.data() + row_starts[cell_no + comp + 1].first);
+                  for (unsigned int index = row_starts[cell_no + comp].second;
+                       index != row_starts[cell_no + comp + 1].second;
+                       ++index)
+                    new_constraint_indicator.push_back(
+                      constraint_indicator[index]);
+                }
+              if (store_plain_indices &&
+                  row_starts[cell_no].second !=
+                    row_starts[cell_no + n_components].second)
+                {
+                  new_rowstart_plain[i * vectorization_length + j] =
+                    new_plain_indices.size();
+                  new_plain_indices.insert(
+                    new_plain_indices.end(),
+                    plain_dof_indices.data() +
+                      row_starts_plain_indices[cell_no / n_components],
+                    plain_dof_indices.data() +
+                      row_starts_plain_indices[cell_no / n_components] +
+                      dofs_per_cell);
+                }
+            }
+          for (unsigned int j = n_vect; j < vectorization_length; ++j)
+            for (unsigned int comp = 0; comp < n_components; ++comp)
+              {
+                new_row_starts[(i * vectorization_length + j) * n_components +
+                               comp]
+                  .first = new_dof_indices.size();
+                new_row_starts[(i * vectorization_length + j) * n_components +
+                               comp]
+                  .second = new_constraint_indicator.size();
+              }
+          position_cell += n_vect;
+        }
+      AssertDimension(position_cell * n_components + 1, row_starts.size());
+
+      AssertDimension(dof_indices.size(), new_dof_indices.size());
+      new_row_starts[task_info.cell_partition_data.back() *
+                     vectorization_length * n_components]
+        .first = new_dof_indices.size();
+      new_row_starts[task_info.cell_partition_data.back() *
+                     vectorization_length * n_components]
+        .second = new_constraint_indicator.size();
+
+      AssertDimension(constraint_indicator.size(),
+                      new_constraint_indicator.size());
+
+      new_row_starts.swap(row_starts);
+      new_dof_indices.swap(dof_indices);
+      new_constraint_indicator.swap(constraint_indicator);
+      new_plain_indices.swap(plain_dof_indices);
+      new_rowstart_plain.swap(row_starts_plain_indices);
+
+#ifdef DEBUG
+      // sanity check 1: all indices should be smaller than the number of dofs
+      // locally owned plus the number of ghosts
+      const unsigned int index_range =
+        (vector_partitioner->local_range().second -
+         vector_partitioner->local_range().first) +
+        vector_partitioner->ghost_indices().n_elements();
+      for (const auto dof_index : dof_indices)
+        AssertIndexRange(dof_index, index_range);
+
+      // sanity check 2: for the constraint indicators, the first index should
+      // be smaller than the number of indices in the row, and the second
+      // index should be smaller than the number of constraints in the
+      // constraint pool.
+      for (unsigned int row = 0; row < task_info.cell_partition_data.back();
+           ++row)
+        {
+          const unsigned int row_length_ind =
+            row_starts[(row * vectorization_length + 1) * n_components].first -
+            row_starts[row * vectorization_length * n_components].first;
+          AssertIndexRange(
+            row_starts[(row * vectorization_length + 1) * n_components].second,
+            constraint_indicator.size() + 1);
+          const std::pair<unsigned short, unsigned short> *
+            con_it =
+             constraint_indicator.data() +
+             row_starts[row * vectorization_length * n_components].second,
+           *end_con =
+             constraint_indicator.data() +
+             row_starts[(row * vectorization_length + 1) * n_components].second;
+          for (; con_it != end_con; ++con_it)
+            {
+              AssertIndexRange(con_it->first, row_length_ind + 1);
+              AssertIndexRange(con_it->second,
+                               constraint_pool_row_index.size() - 1);
+            }
+        }
+
+      // sanity check 3: check the number of cells once again
+      unsigned int n_active_cells = 0;
+      for (unsigned int c = 0; c < *(task_info.cell_partition_data.end() - 2);
+           ++c)
+        if (irregular_cells[c] > 0)
+          n_active_cells += irregular_cells[c];
+        else
+          n_active_cells += vectorization_length;
+      AssertDimension(n_active_cells, task_info.n_active_cells);
+#endif
+
+      compute_cell_index_compression(irregular_cells);
+    }
+
+
+
+    void
+    DoFInfo::compute_cell_index_compression(
+      const std::vector<unsigned char> &irregular_cells)
+    {
+      const bool         have_hp      = dofs_per_cell.size() > 1;
+      const unsigned int n_components = start_components.back();
+
+      Assert(vectorization_length == 1 ||
+               row_starts.size() % vectorization_length == 1,
+             ExcInternalError());
+      if (vectorization_length > 1)
+        AssertDimension(row_starts.size() / vectorization_length / n_components,
+                        irregular_cells.size());
+      index_storage_variants[dof_access_cell].resize(
+        irregular_cells.size(), IndexStorageVariants::full);
+      n_vectorization_lanes_filled[dof_access_cell].resize(
+        irregular_cells.size());
+      for (unsigned int i = 0; i < irregular_cells.size(); ++i)
+        if (irregular_cells[i] > 0)
+          n_vectorization_lanes_filled[dof_access_cell][i] = irregular_cells[i];
+        else
+          n_vectorization_lanes_filled[dof_access_cell][i] =
+            vectorization_length;
+
+      dof_indices_contiguous[dof_access_cell].resize(
+        irregular_cells.size() * vectorization_length,
+        numbers::invalid_unsigned_int);
+      dof_indices_interleaved.resize(dof_indices.size(),
+                                     numbers::invalid_unsigned_int);
+      dof_indices_interleave_strides[dof_access_cell].resize(
+        irregular_cells.size() * vectorization_length,
+        numbers::invalid_unsigned_int);
+
+      std::vector<unsigned int> index_kinds(
+        static_cast<unsigned int>(
+          IndexStorageVariants::interleaved_contiguous_mixed_strides) +
+        1);
+      std::vector<unsigned int> offsets(vectorization_length);
+      for (unsigned int i = 0; i < irregular_cells.size(); ++i)
+        {
+          const unsigned int ndofs =
+            dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
+          const unsigned int n_comp =
+            n_vectorization_lanes_filled[dof_access_cell][i];
+
+          // check 1: Check if there are constraints -> no compression possible
+          bool has_constraints = false;
+          for (unsigned int j = 0; j < n_comp; ++j)
+            {
+              const unsigned int cell_no = i * vectorization_length + j;
+              if (row_starts[cell_no * n_components].second !=
+                  row_starts[(cell_no + 1) * n_components].second)
+                {
+                  has_constraints = true;
+                  break;
+                }
+            }
+          if (has_constraints)
+            index_storage_variants[dof_access_cell][i] =
+              IndexStorageVariants::full;
+          else
+            {
+              bool indices_are_contiguous = true;
+              for (unsigned int j = 0; j < n_comp; ++j)
+                {
+                  const unsigned int  cell_no = i * vectorization_length + j;
+                  const unsigned int *dof_indices =
+                    this->dof_indices.data() +
+                    row_starts[cell_no * n_components].first;
+                  AssertDimension(
+                    ndofs,
+                    row_starts[(cell_no + 1) * n_components].first -
+                      row_starts[cell_no * n_components].first);
+                  for (unsigned int i = 1; i < ndofs; ++i)
+                    if (dof_indices[i] != dof_indices[0] + i)
+                      {
+                        indices_are_contiguous = false;
+                        break;
+                      }
+                }
+
+              bool indices_are_interleaved_and_contiguous =
+                (ndofs > 1 && n_comp == vectorization_length);
+
+              {
+                const unsigned int *dof_indices =
+                  this->dof_indices.data() +
+                  row_starts[i * vectorization_length * n_components].first;
+                for (unsigned int k = 0; k < ndofs; ++k)
+                  for (unsigned int j = 0; j < n_comp; ++j)
+                    if (dof_indices[j * ndofs + k] !=
+                        dof_indices[0] + k * n_comp + j)
+                      {
+                        indices_are_interleaved_and_contiguous = false;
+                        break;
+                      }
+              }
+
+              if (indices_are_contiguous ||
+                  indices_are_interleaved_and_contiguous)
+                {
+                  for (unsigned int j = 0; j < n_comp; ++j)
+                    dof_indices_contiguous
+                      [dof_access_cell][i * vectorization_length + j] =
+                        this->dof_indices[row_starts[(i * vectorization_length +
+                                                      j) *
+                                                     n_components]
+                                            .first];
+                }
+
+              if (indices_are_interleaved_and_contiguous)
+                {
+                  Assert(n_comp == vectorization_length, ExcInternalError());
+                  index_storage_variants[dof_access_cell][i] =
+                    IndexStorageVariants::interleaved_contiguous;
+                  for (unsigned int j = 0; j < n_comp; ++j)
+                    dof_indices_interleave_strides[2][i * vectorization_length +
+                                                      j] = n_comp;
+                }
+              else if (indices_are_contiguous)
+                {
+                  index_storage_variants[dof_access_cell][i] =
+                    IndexStorageVariants::contiguous;
+                  for (unsigned int j = 0; j < n_comp; ++j)
+                    dof_indices_interleave_strides[2][i * vectorization_length +
+                                                      j] = 1;
+                }
+              else
+                {
+                  int                 indices_are_interleaved_and_mixed = 2;
+                  const unsigned int *dof_indices =
+                    &this->dof_indices[row_starts[i * vectorization_length *
+                                                  n_components]
+                                         .first];
+                  for (unsigned int j = 0; j < n_comp; ++j)
+                    offsets[j] =
+                      dof_indices[j * ndofs + 1] - dof_indices[j * ndofs];
+                  for (unsigned int k = 0; k < ndofs; ++k)
+                    for (unsigned int j = 0; j < n_comp; ++j)
+                      // the first if case is to avoid negative offsets
+                      // (invalid)
+                      if (dof_indices[j * ndofs + 1] < dof_indices[j * ndofs] ||
+                          dof_indices[j * ndofs + k] !=
+                            dof_indices[j * ndofs] + k * offsets[j])
+                        {
+                          indices_are_interleaved_and_mixed = 0;
+                          break;
+                        }
+                  if (indices_are_interleaved_and_mixed == 2)
+                    {
+                      for (unsigned int j = 0; j < n_comp; ++j)
+                        dof_indices_interleave_strides
+                          [dof_access_cell][i * vectorization_length + j] =
+                            offsets[j];
+                      for (unsigned int j = 0; j < n_comp; ++j)
+                        dof_indices_contiguous[dof_access_cell]
+                                              [i * vectorization_length + j] =
+                                                dof_indices[j * ndofs];
+                      for (unsigned int j = 0; j < n_comp; ++j)
+                        if (offsets[j] != vectorization_length)
+                          {
+                            indices_are_interleaved_and_mixed = 1;
+                            break;
+                          }
+                      if (indices_are_interleaved_and_mixed == 1 ||
+                          n_comp != vectorization_length)
+                        index_storage_variants[dof_access_cell][i] =
+                          IndexStorageVariants::
+                            interleaved_contiguous_mixed_strides;
+                      else
+                        index_storage_variants[dof_access_cell][i] =
+                          IndexStorageVariants::interleaved_contiguous_strided;
+                    }
+                  else
+                    {
+                      const unsigned int *dof_indices =
+                        this->dof_indices.data() +
+                        row_starts[i * vectorization_length * n_components]
+                          .first;
+                      if (n_comp == vectorization_length)
+                        index_storage_variants[dof_access_cell][i] =
+                          IndexStorageVariants::interleaved;
+                      else
+                        index_storage_variants[dof_access_cell][i] =
+                          IndexStorageVariants::full;
+
+                      // do not use interleaved storage if two vectorized
+                      // components point to the same field (scatter not
+                      // possible)
+                      for (unsigned int k = 0; k < ndofs; ++k)
+                        for (unsigned int l = 0; l < n_comp; ++l)
+                          for (unsigned int j = l + 1; j < n_comp; ++j)
+                            if (dof_indices[j * ndofs + k] ==
+                                dof_indices[l * ndofs + k])
+                              {
+                                index_storage_variants[dof_access_cell][i] =
+                                  IndexStorageVariants::full;
+                                break;
+                              }
+                    }
+                }
+            }
+          index_kinds[static_cast<unsigned int>(
+            index_storage_variants[dof_access_cell][i])]++;
+        }
+
+      // Cleanup phase: we want to avoid single cells with different properties
+      // than the bulk of the domain in order to avoid extra checks in the face
+      // identification.
+
+      // Step 1: check whether the interleaved indices were only assigned to
+      // the single cell within a vectorized array.
+      auto fix_single_interleaved_indices =
+        [&](const IndexStorageVariants variant) {
+          if (index_kinds[static_cast<unsigned int>(
+                IndexStorageVariants::interleaved_contiguous_mixed_strides)] >
+                0 &&
+              index_kinds[static_cast<unsigned int>(variant)] > 0)
+            for (unsigned int i = 0; i < irregular_cells.size(); ++i)
+              {
+                if (index_storage_variants[dof_access_cell][i] ==
+                      IndexStorageVariants::
+                        interleaved_contiguous_mixed_strides &&
+                    n_vectorization_lanes_filled[dof_access_cell][i] == 1 &&
+                    (variant != IndexStorageVariants::contiguous ||
+                     dof_indices_interleave_strides[dof_access_cell]
+                                                   [i * vectorization_length] ==
+                       1))
+                  {
+                    index_storage_variants[dof_access_cell][i] = variant;
+                    index_kinds[static_cast<unsigned int>(
+                      IndexStorageVariants::
+                        interleaved_contiguous_mixed_strides)]--;
+                    index_kinds[static_cast<unsigned int>(variant)]++;
+                  }
+              }
+        };
+
+      fix_single_interleaved_indices(IndexStorageVariants::full);
+      fix_single_interleaved_indices(IndexStorageVariants::contiguous);
+      fix_single_interleaved_indices(IndexStorageVariants::interleaved);
+
+      unsigned int n_interleaved =
+        index_kinds[static_cast<unsigned int>(
+          IndexStorageVariants::interleaved_contiguous)] +
+        index_kinds[static_cast<unsigned int>(
+          IndexStorageVariants::interleaved_contiguous_strided)] +
+        index_kinds[static_cast<unsigned int>(
+          IndexStorageVariants::interleaved_contiguous_mixed_strides)];
+
+      // Step 2: fix single contiguous cell among others with interleaved
+      // storage
+      if (n_interleaved > 0 && index_kinds[static_cast<unsigned int>(
+                                 IndexStorageVariants::contiguous)] > 0)
+        for (unsigned int i = 0; i < irregular_cells.size(); ++i)
+          if (index_storage_variants[dof_access_cell][i] ==
+              IndexStorageVariants::contiguous)
+            {
+              index_storage_variants[dof_access_cell][i] =
+                IndexStorageVariants::interleaved_contiguous_mixed_strides;
+              index_kinds[static_cast<unsigned int>(
+                IndexStorageVariants::contiguous)]--;
+              index_kinds[static_cast<unsigned int>(
+                IndexStorageVariants::interleaved_contiguous_mixed_strides)]++;
+            }
+
+      // Step 3: Interleaved cells are left but also some non-contiguous ones
+      // -> revert all to full storage
+      if (n_interleaved > 0 &&
+          index_kinds[static_cast<unsigned int>(IndexStorageVariants::full)] +
+              index_kinds[static_cast<unsigned int>(
+                IndexStorageVariants::interleaved)] >
+            0)
+        for (unsigned int i = 0; i < irregular_cells.size(); ++i)
+          if (index_storage_variants[dof_access_cell][i] >
+              IndexStorageVariants::contiguous)
+            {
+              index_kinds[static_cast<unsigned int>(
+                index_storage_variants[2][i])]--;
+              if (n_vectorization_lanes_filled[dof_access_cell][i] ==
+                  vectorization_length)
+                index_storage_variants[dof_access_cell][i] =
+                  IndexStorageVariants::interleaved;
+              else
+                index_storage_variants[dof_access_cell][i] =
+                  IndexStorageVariants::full;
+              index_kinds[static_cast<unsigned int>(
+                index_storage_variants[dof_access_cell][i])]++;
+            }
+
+      // Step 4: Copy the interleaved indices into their own data structure
+      for (unsigned int i = 0; i < irregular_cells.size(); ++i)
+        if (index_storage_variants[dof_access_cell][i] ==
+            IndexStorageVariants::interleaved)
+          {
+            if (n_vectorization_lanes_filled[dof_access_cell][i] <
+                vectorization_length)
+              {
+                index_storage_variants[dof_access_cell][i] =
+                  IndexStorageVariants::full;
+                continue;
+              }
+            const unsigned int ndofs =
+              dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
+            const unsigned int *dof_indices =
+              &this->dof_indices
+                 [row_starts[i * vectorization_length * n_components].first];
+            unsigned int *interleaved_dof_indices =
+              &this->dof_indices_interleaved
+                 [row_starts[i * vectorization_length * n_components].first];
+            AssertDimension(this->dof_indices.size(),
+                            this->dof_indices_interleaved.size());
+            AssertDimension(n_vectorization_lanes_filled[dof_access_cell][i],
+                            vectorization_length);
+            AssertIndexRange(
+              row_starts[i * vectorization_length * n_components].first,
+              this->dof_indices_interleaved.size());
+            AssertIndexRange(
+              row_starts[i * vectorization_length * n_components].first +
+                ndofs * vectorization_length,
+              this->dof_indices_interleaved.size() + 1);
+            for (unsigned int k = 0; k < ndofs; ++k)
+              for (unsigned int j = 0; j < vectorization_length; ++j)
+                interleaved_dof_indices[k * vectorization_length + j] =
+                  dof_indices[j * ndofs + k];
+          }
+    }
+
+
+
+    void
+    DoFInfo::compute_tight_partitioners(
+      const Table<2, ShapeInfo<double>> &       shape_info,
+      const unsigned int                        n_owned_cells,
+      const unsigned int                        n_lanes,
+      const std::vector<FaceToCellTopology<1>> &inner_faces,
+      const std::vector<FaceToCellTopology<1>> &ghosted_faces,
+      const bool                                fill_cell_centric,
+      const MPI_Comm &                          communicator_sm,
+      const bool                                use_vector_data_exchanger_full)
+    {
+      const Utilities::MPI::Partitioner &part = *vector_partitioner;
+
+      // partitioner 0: no face integrals, simply use the indices present
+      // on the cells
+      std::vector<types::global_dof_index> ghost_indices;
+      {
+        const unsigned int n_components = start_components.back();
+        for (unsigned int cell = 0; cell < n_owned_cells; ++cell)
+          {
+            for (unsigned int i = row_starts[cell * n_components].first;
+                 i < row_starts[(cell + 1) * n_components].first;
+                 ++i)
+              if (dof_indices[i] >= part.local_size())
+                ghost_indices.push_back(part.local_to_global(dof_indices[i]));
+
+            const unsigned int fe_index =
+              dofs_per_cell.size() == 1 ? 0 :
+                                          cell_active_fe_index[cell / n_lanes];
+            const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
+
+            for (unsigned int i = row_starts_plain_indices[cell];
+                 i < row_starts_plain_indices[cell] + dofs_this_cell;
+                 ++i)
+              if (plain_dof_indices[i] >= part.local_size())
+                ghost_indices.push_back(
+                  part.local_to_global(plain_dof_indices[i]));
+          }
+        std::sort(ghost_indices.begin(), ghost_indices.end());
+        ghost_indices.erase(std::unique(ghost_indices.begin(),
+                                        ghost_indices.end()),
+                            ghost_indices.end());
+        IndexSet compressed_set(part.size());
+        compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end());
+        compressed_set.subtract_set(part.locally_owned_range());
+        const bool all_ghosts_equal =
+          Utilities::MPI::min<int>(static_cast<int>(
+                                     compressed_set.n_elements() ==
+                                     part.ghost_indices().n_elements()),
+                                   part.get_mpi_communicator()) != 0;
+
+        std::shared_ptr<const Utilities::MPI::Partitioner> temp_0;
+
+        if (all_ghosts_equal)
+          temp_0 = vector_partitioner;
+        else
+          {
+            temp_0 = std::make_shared<Utilities::MPI::Partitioner>(
+              part.locally_owned_range(), part.get_mpi_communicator());
+            const_cast<Utilities::MPI::Partitioner *>(temp_0.get())
+              ->set_ghost_indices(compressed_set, part.ghost_indices());
+          }
+
+        if (use_vector_data_exchanger_full == false)
+          vector_exchanger_face_variants[0] = std::make_shared<
+            MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+            temp_0);
+        else
+          vector_exchanger_face_variants[0] =
+            std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
+              temp_0, communicator_sm);
+      }
+
+      // construct a numbering of faces
+      std::vector<FaceToCellTopology<1>> all_faces(inner_faces);
+      all_faces.insert(all_faces.end(),
+                       ghosted_faces.begin(),
+                       ghosted_faces.end());
+      Table<2, unsigned int> cell_and_face_to_faces(
+        (row_starts.size() - 1) / start_components.back(),
+        2 * shape_info(0, 0).n_dimensions);
+      cell_and_face_to_faces.fill(numbers::invalid_unsigned_int);
+      for (unsigned int f = 0; f < all_faces.size(); ++f)
+        {
+          cell_and_face_to_faces(all_faces[f].cells_interior[0],
+                                 all_faces[f].interior_face_no) = f;
+          Assert(all_faces[f].cells_exterior[0] !=
+                   numbers::invalid_unsigned_int,
+                 ExcInternalError());
+          cell_and_face_to_faces(all_faces[f].cells_exterior[0],
+                                 all_faces[f].exterior_face_no) = f;
+        }
+
+      // lambda function to detect objects on face pairs
+      const auto loop_over_faces =
+        [&](const std::function<
+            void(const unsigned int, const unsigned int, const bool)> &fu) {
+          for (const auto &face : inner_faces)
+            {
+              AssertIndexRange(face.cells_interior[0], n_owned_cells);
+              fu(face.cells_exterior[0], face.exterior_face_no, false /*flag*/);
+            }
+        };
+
+      const auto loop_over_all_faces =
+        [&](const std::function<
+            void(const unsigned int, const unsigned int, const bool)> &fu) {
+          for (unsigned int c = 0; c < cell_and_face_to_faces.size(0); ++c)
+            for (unsigned int d = 0; d < cell_and_face_to_faces.size(1); ++d)
+              {
+                const unsigned int f = cell_and_face_to_faces(c, d);
+                if (f == numbers::invalid_unsigned_int)
+                  continue;
+
+                const unsigned int cell_m = all_faces[f].cells_interior[0];
+                const unsigned int cell_p = all_faces[f].cells_exterior[0];
+
+                const bool ext = c == cell_m;
+
+                if (ext && cell_p == numbers::invalid_unsigned_int)
+                  continue;
+
+                const unsigned int p       = ext ? cell_p : cell_m;
+                const unsigned int face_no = ext ?
+                                               all_faces[f].exterior_face_no :
+                                               all_faces[f].interior_face_no;
+
+                fu(p, face_no, true);
+              }
+        };
+
+      const auto process_values =
+        [&](
+          std::shared_ptr<const Utilities::MPI::Partitioner>
+            &vector_partitioner_values,
+          const std::function<void(
+            const std::function<void(
+              const unsigned int, const unsigned int, const bool)> &)> &loop) {
+          bool all_nodal_and_tensorial = true;
+          for (unsigned int c = 0; c < n_base_elements; ++c)
+            {
+              const auto &si =
+                shape_info(global_base_element_offset + c, 0).data.front();
+              if (!si.nodal_at_cell_boundaries ||
+                  (si.element_type ==
+                   MatrixFreeFunctions::ElementType::tensor_none))
+                all_nodal_and_tensorial = false;
+            }
+          if (all_nodal_and_tensorial == false)
+            vector_partitioner_values = vector_partitioner;
+          else
+            {
+              bool has_noncontiguous_cell = false;
+
+              loop([&](const unsigned int cell_no,
+                       const unsigned int face_no,
+                       const bool         flag) {
+                const unsigned int index =
+                  dof_indices_contiguous[dof_access_cell][cell_no];
+                if (flag || (index != numbers::invalid_unsigned_int &&
+                             index >= part.local_size()))
+                  {
+                    const unsigned int stride =
+                      dof_indices_interleave_strides[dof_access_cell][cell_no];
+                    unsigned int i = 0;
+                    for (unsigned int e = 0; e < n_base_elements; ++e)
+                      for (unsigned int c = 0; c < n_components[e]; ++c)
+                        {
+                          const ShapeInfo<double> &shape =
+                            shape_info(global_base_element_offset + e, 0);
+                          for (unsigned int j = 0;
+                               j < shape.dofs_per_component_on_face;
+                               ++j)
+                            ghost_indices.push_back(part.local_to_global(
+                              index + i +
+                              shape.face_to_cell_index_nodal(face_no, j) *
+                                stride));
+                          i += shape.dofs_per_component_on_cell * stride;
+                        }
+                    AssertDimension(i, dofs_per_cell[0] * stride);
+                  }
+                else if (index == numbers::invalid_unsigned_int)
+                  has_noncontiguous_cell = true;
+              });
+              has_noncontiguous_cell =
+                Utilities::MPI::min<int>(static_cast<int>(
+                                           has_noncontiguous_cell),
+                                         part.get_mpi_communicator()) != 0;
+
+              std::sort(ghost_indices.begin(), ghost_indices.end());
+              ghost_indices.erase(std::unique(ghost_indices.begin(),
+                                              ghost_indices.end()),
+                                  ghost_indices.end());
+              IndexSet compressed_set(part.size());
+              compressed_set.add_indices(ghost_indices.begin(),
+                                         ghost_indices.end());
+              compressed_set.subtract_set(part.locally_owned_range());
+              const bool all_ghosts_equal =
+                Utilities::MPI::min<int>(static_cast<int>(
+                                           compressed_set.n_elements() ==
+                                           part.ghost_indices().n_elements()),
+                                         part.get_mpi_communicator()) != 0;
+              if (all_ghosts_equal || has_noncontiguous_cell)
+                vector_partitioner_values = vector_partitioner;
+              else
+                {
+                  vector_partitioner_values =
+                    std::make_shared<Utilities::MPI::Partitioner>(
+                      part.locally_owned_range(), part.get_mpi_communicator());
+                  const_cast<Utilities::MPI::Partitioner *>(
+                    vector_partitioner_values.get())
+                    ->set_ghost_indices(compressed_set, part.ghost_indices());
+                }
+            }
+        };
+
+
+      const auto process_gradients =
+        [&](
+          const std::shared_ptr<const Utilities::MPI::Partitioner>
+            &vector_partitoner_values,
+          std::shared_ptr<const Utilities::MPI::Partitioner>
+            &vector_partitioner_gradients,
+          const std::function<void(
+            const std::function<void(
+              const unsigned int, const unsigned int, const bool)> &)> &loop) {
+          bool all_hermite = true;
+          for (unsigned int c = 0; c < n_base_elements; ++c)
+            if (shape_info(global_base_element_offset + c, 0).element_type !=
+                MatrixFreeFunctions::tensor_symmetric_hermite)
+              all_hermite = false;
+          if (all_hermite == false ||
+              vector_partitoner_values.get() == vector_partitioner.get())
+            vector_partitioner_gradients = vector_partitioner;
+          else
+            {
+              loop([&](const unsigned int cell_no,
+                       const unsigned int face_no,
+                       const bool         flag) {
+                const unsigned int index =
+                  dof_indices_contiguous[dof_access_cell][cell_no];
+                if (flag || (index != numbers::invalid_unsigned_int &&
+                             index >= part.local_size()))
+                  {
+                    const unsigned int stride =
+                      dof_indices_interleave_strides[dof_access_cell][cell_no];
+                    unsigned int i = 0;
+                    for (unsigned int e = 0; e < n_base_elements; ++e)
+                      for (unsigned int c = 0; c < n_components[e]; ++c)
+                        {
+                          const ShapeInfo<double> &shape =
+                            shape_info(global_base_element_offset + e, 0);
+                          for (unsigned int j = 0;
+                               j < 2 * shape.dofs_per_component_on_face;
+                               ++j)
+                            ghost_indices.push_back(part.local_to_global(
+                              index + i +
+                              shape.face_to_cell_index_hermite(face_no, j) *
+                                stride));
+                          i += shape.dofs_per_component_on_cell * stride;
+                        }
+                    AssertDimension(i, dofs_per_cell[0] * stride);
+                  }
+              });
+              std::sort(ghost_indices.begin(), ghost_indices.end());
+              ghost_indices.erase(std::unique(ghost_indices.begin(),
+                                              ghost_indices.end()),
+                                  ghost_indices.end());
+              IndexSet compressed_set(part.size());
+              compressed_set.add_indices(ghost_indices.begin(),
+                                         ghost_indices.end());
+              compressed_set.subtract_set(part.locally_owned_range());
+              const bool all_ghosts_equal =
+                Utilities::MPI::min<int>(static_cast<int>(
+                                           compressed_set.n_elements() ==
+                                           part.ghost_indices().n_elements()),
+                                         part.get_mpi_communicator()) != 0;
+              if (all_ghosts_equal)
+                vector_partitioner_gradients = vector_partitioner;
+              else
+                {
+                  vector_partitioner_gradients =
+                    std::make_shared<Utilities::MPI::Partitioner>(
+                      part.locally_owned_range(), part.get_mpi_communicator());
+                  const_cast<Utilities::MPI::Partitioner *>(
+                    vector_partitioner_gradients.get())
+                    ->set_ghost_indices(compressed_set, part.ghost_indices());
+                }
+            }
+        };
+
+      std::shared_ptr<const Utilities::MPI::Partitioner> temp_1, temp_2, temp_3,
+        temp_4;
+
+      // partitioner 1: values on faces
+      process_values(temp_1, loop_over_faces);
+
+      // partitioner 2: values and gradients on faces
+      process_gradients(temp_1, temp_2, loop_over_faces);
+
+      if (fill_cell_centric)
+        {
+          ghost_indices.clear();
+          // partitioner 3: values on all faces
+          process_values(temp_3, loop_over_all_faces);
+          // partitioner 4: values and gradients on faces
+          process_gradients(temp_3, temp_4, loop_over_all_faces);
+        }
+      else
+        {
+          temp_3 = std::make_shared<Utilities::MPI::Partitioner>(
+            part.locally_owned_range(), part.get_mpi_communicator());
+          temp_4 = std::make_shared<Utilities::MPI::Partitioner>(
+            part.locally_owned_range(), part.get_mpi_communicator());
+        }
+
+      if (use_vector_data_exchanger_full == false)
+        {
+          vector_exchanger_face_variants[1] = std::make_shared<
+            MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+            temp_1);
+          vector_exchanger_face_variants[2] = std::make_shared<
+            MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+            temp_2);
+          vector_exchanger_face_variants[3] = std::make_shared<
+            MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+            temp_3);
+          vector_exchanger_face_variants[4] = std::make_shared<
+            MatrixFreeFunctions::VectorDataExchange::PartitionerWrapper>(
+            temp_4);
+        }
+      else
+        {
+          vector_exchanger_face_variants[1] =
+            std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
+              temp_1, communicator_sm);
+          vector_exchanger_face_variants[2] =
+            std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
+              temp_2, communicator_sm);
+          vector_exchanger_face_variants[3] =
+            std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
+              temp_3, communicator_sm);
+          vector_exchanger_face_variants[4] =
+            std::make_shared<MatrixFreeFunctions::VectorDataExchange::Full>(
+              temp_4, communicator_sm);
+        }
+    }
+
+
+
+    void
+    DoFInfo::compute_shared_memory_contiguous_indices(
+      std::array<std::vector<std::pair<unsigned int, unsigned int>>, 3>
+        &cell_indices_contiguous_sm)
+    {
+      AssertDimension(dofs_per_cell.size(), 1);
+
+      for (unsigned int i = 0; i < 3; ++i)
+        {
+          dof_indices_contiguous_sm[i].resize(
+            cell_indices_contiguous_sm[i].size());
+
+          for (unsigned int j = 0; j < cell_indices_contiguous_sm[i].size();
+               ++j)
+            if (cell_indices_contiguous_sm[i][j].first !=
+                numbers::invalid_unsigned_int)
+              dof_indices_contiguous_sm[i][j] = {
+                cell_indices_contiguous_sm[i][j].first,
+                cell_indices_contiguous_sm[i][j].second * dofs_per_cell[0]};
+            else
+              dof_indices_contiguous_sm[i][j] = {numbers::invalid_unsigned_int,
+                                                 numbers::invalid_unsigned_int};
+        }
+    }
+
+
+
+    namespace internal
+    {
+      // We construct the connectivity graph in parallel. we use one lock for
+      // 256 degrees of freedom to keep the number of locks down to a
+      // reasonable level and reduce the cost of locking to some extent.
+      static constexpr unsigned int bucket_size_threading = 256;
+
+
+
+      void
+      compute_row_lengths(const unsigned int         begin,
+                          const unsigned int         end,
+                          const DoFInfo &            dof_info,
+                          std::vector<std::mutex> &  mutexes,
+                          std::vector<unsigned int> &row_lengths)
+      {
+        std::vector<unsigned int> scratch;
+        const unsigned int n_components = dof_info.start_components.back();
+        for (unsigned int block = begin; block < end; ++block)
+          {
+            scratch.clear();
+            scratch.insert(
+              scratch.end(),
+              dof_info.dof_indices.data() +
+                dof_info.row_starts[block * n_components].first,
+              dof_info.dof_indices.data() +
+                dof_info.row_starts[(block + 1) * n_components].first);
+            std::sort(scratch.begin(), scratch.end());
+            std::vector<unsigned int>::const_iterator end_unique =
+              std::unique(scratch.begin(), scratch.end());
+            std::vector<unsigned int>::const_iterator it = scratch.begin();
+            while (it != end_unique)
+              {
+                // In this code, the procedure is that we insert all elements
+                // that are within the range of one lock at once
+                const unsigned int next_bucket =
+                  (*it / bucket_size_threading + 1) * bucket_size_threading;
+                std::lock_guard<std::mutex> lock(
+                  mutexes[*it / bucket_size_threading]);
+                for (; it != end_unique && *it < next_bucket; ++it)
+                  {
+                    AssertIndexRange(*it, row_lengths.size());
+                    row_lengths[*it]++;
+                  }
+              }
+          }
+      }
+
+      void
+      fill_connectivity_dofs(const unsigned int               begin,
+                             const unsigned int               end,
+                             const DoFInfo &                  dof_info,
+                             const std::vector<unsigned int> &row_lengths,
+                             std::vector<std::mutex> &        mutexes,
+                             dealii::SparsityPattern &        connectivity_dof)
+      {
+        std::vector<unsigned int> scratch;
+        const unsigned int n_components = dof_info.start_components.back();
+        for (unsigned int block = begin; block < end; ++block)
+          {
+            scratch.clear();
+            scratch.insert(
+              scratch.end(),
+              dof_info.dof_indices.data() +
+                dof_info.row_starts[block * n_components].first,
+              dof_info.dof_indices.data() +
+                dof_info.row_starts[(block + 1) * n_components].first);
+            std::sort(scratch.begin(), scratch.end());
+            std::vector<unsigned int>::const_iterator end_unique =
+              std::unique(scratch.begin(), scratch.end());
+            std::vector<unsigned int>::const_iterator it = scratch.begin();
+            while (it != end_unique)
+              {
+                const unsigned int next_bucket =
+                  (*it / bucket_size_threading + 1) * bucket_size_threading;
+                std::lock_guard<std::mutex> lock(
+                  mutexes[*it / bucket_size_threading]);
+                for (; it != end_unique && *it < next_bucket; ++it)
+                  if (row_lengths[*it] > 0)
+                    connectivity_dof.add(*it, block);
+              }
+          }
+      }
+
+
+
+      void
+      fill_connectivity(const unsigned int               begin,
+                        const unsigned int               end,
+                        const DoFInfo &                  dof_info,
+                        const std::vector<unsigned int> &renumbering,
+                        const dealii::SparsityPattern &  connectivity_dof,
+                        DynamicSparsityPattern &         connectivity)
+      {
+        ordered_vector     row_entries;
+        const unsigned int n_components = dof_info.start_components.back();
+        for (unsigned int block = begin; block < end; ++block)
+          {
+            row_entries.clear();
+
+            const unsigned int
+              *it = dof_info.dof_indices.data() +
+                    dof_info.row_starts[block * n_components].first,
+              *end_cell = dof_info.dof_indices.data() +
+                          dof_info.row_starts[(block + 1) * n_components].first;
+            for (; it != end_cell; ++it)
+              {
+                SparsityPattern::iterator sp = connectivity_dof.begin(*it);
+                std::vector<types::global_dof_index>::iterator insert_pos =
+                  row_entries.begin();
+                for (; sp != connectivity_dof.end(*it); ++sp)
+                  if (sp->column() != block)
+                    row_entries.insert(renumbering[sp->column()], insert_pos);
+              }
+            connectivity.add_entries(renumbering[block],
+                                     row_entries.begin(),
+                                     row_entries.end());
+          }
+      }
+
+    } // namespace internal
+
+    void
+    DoFInfo::make_connectivity_graph(
+      const TaskInfo &                 task_info,
+      const std::vector<unsigned int> &renumbering,
+      DynamicSparsityPattern &         connectivity) const
+    {
+      unsigned int n_rows = (vector_partitioner->local_range().second -
+                             vector_partitioner->local_range().first) +
+                            vector_partitioner->ghost_indices().n_elements();
+
+      // Avoid square sparsity patterns that allocate the diagonal entry
+      if (n_rows == task_info.n_active_cells)
+        ++n_rows;
+
+      // first determine row lengths
+      std::vector<unsigned int> row_lengths(n_rows);
+      std::vector<std::mutex> mutexes(n_rows / internal::bucket_size_threading +
+                                      1);
+      dealii::parallel::apply_to_subranges(
+        0,
+        task_info.n_active_cells,
+        [this, &mutexes, &row_lengths](const unsigned int begin,
+                                       const unsigned int end) {
+          internal::compute_row_lengths(
+            begin, end, *this, mutexes, row_lengths);
+        },
+        20);
+
+      // disregard dofs that only sit on a single cell because they cannot
+      // couple
+      for (unsigned int row = 0; row < n_rows; ++row)
+        if (row_lengths[row] <= 1)
+          row_lengths[row] = 0;
+
+      // Create a temporary sparsity pattern that holds to each degree of
+      // freedom on which cells it appears, i.e., store the connectivity
+      // between cells and dofs
+      SparsityPattern connectivity_dof(n_rows,
+                                       task_info.n_active_cells,
+                                       row_lengths);
+      dealii::parallel::apply_to_subranges(
+        0,
+        task_info.n_active_cells,
+        [this, &row_lengths, &mutexes, &connectivity_dof](
+          const unsigned int begin, const unsigned int end) {
+          internal::fill_connectivity_dofs(
+            begin, end, *this, row_lengths, mutexes, connectivity_dof);
+        },
+        20);
+      connectivity_dof.compress();
+
+
+      // Invert renumbering for use in fill_connectivity.
+      std::vector<unsigned int> reverse_numbering(task_info.n_active_cells);
+      reverse_numbering = Utilities::invert_permutation(renumbering);
+
+      // From the above connectivity between dofs and cells, we can finally
+      // create a connectivity list between cells. The connectivity graph
+      // should apply the renumbering, i.e., the entry for cell j is the entry
+      // for cell renumbering[j] in the original ordering.
+      dealii::parallel::apply_to_subranges(
+        0,
+        task_info.n_active_cells,
+        [this, &reverse_numbering, &connectivity_dof, &connectivity](
+          const unsigned int begin, const unsigned int end) {
+          internal::fill_connectivity(begin,
+                                      end,
+                                      *this,
+                                      reverse_numbering,
+                                      connectivity_dof,
+                                      connectivity);
+        },
+        20);
+    }
+
+
+
+    void
+    DoFInfo::compute_dof_renumbering(
+      std::vector<types::global_dof_index> &renumbering)
+    {
+      const unsigned int locally_owned_size =
+        vector_partitioner->locally_owned_size();
+      renumbering.resize(0);
+      renumbering.resize(locally_owned_size, numbers::invalid_dof_index);
+
+      types::global_dof_index counter      = 0;
+      const unsigned int      n_components = start_components.back();
+      const unsigned int      n_cell_batches =
+        n_vectorization_lanes_filled[dof_access_cell].size();
+      Assert(n_cell_batches <=
+               (row_starts.size() - 1) / vectorization_length / n_components,
+             ExcInternalError());
+      for (unsigned int cell_no = 0; cell_no < n_cell_batches; ++cell_no)
+        {
+          // do not renumber in case we have constraints
+          if (row_starts[cell_no * n_components * vectorization_length]
+                .second ==
+              row_starts[(cell_no + 1) * n_components * vectorization_length]
+                .second)
+            {
+              const unsigned int ndofs =
+                dofs_per_cell.size() == 1 ?
+                  dofs_per_cell[0] :
+                  (dofs_per_cell[cell_active_fe_index.size() > 0 ?
+                                   cell_active_fe_index[cell_no] :
+                                   0]);
+              const unsigned int *dof_ind =
+                dof_indices.data() +
+                row_starts[cell_no * n_components * vectorization_length].first;
+              for (unsigned int i = 0; i < ndofs; ++i)
+                for (unsigned int j = 0;
+                     j < n_vectorization_lanes_filled[dof_access_cell][cell_no];
+                     ++j)
+                  if (dof_ind[j * ndofs + i] < locally_owned_size)
+                    if (renumbering[dof_ind[j * ndofs + i]] ==
+                        numbers::invalid_dof_index)
+                      renumbering[dof_ind[j * ndofs + i]] = counter++;
+            }
+        }
+
+      AssertIndexRange(counter, locally_owned_size + 1);
+      for (types::global_dof_index &dof_index : renumbering)
+        if (dof_index == numbers::invalid_dof_index)
+          dof_index = counter++;
+
+      // transform indices to global index space
+      for (types::global_dof_index &dof_index : renumbering)
+        dof_index = vector_partitioner->local_to_global(dof_index);
+
+      AssertDimension(counter, renumbering.size());
+    }
+
+
+
+    std::size_t
+    DoFInfo::memory_consumption() const
+    {
+      std::size_t memory = sizeof(*this);
+      memory +=
+        (row_starts.capacity() * sizeof(std::pair<unsigned int, unsigned int>));
+      memory += MemoryConsumption::memory_consumption(dof_indices);
+      memory += MemoryConsumption::memory_consumption(row_starts_plain_indices);
+      memory += MemoryConsumption::memory_consumption(plain_dof_indices);
+      memory += MemoryConsumption::memory_consumption(constraint_indicator);
+      memory += MemoryConsumption::memory_consumption(*vector_partitioner);
+      return memory;
+    }
+  } // namespace MatrixFreeFunctions
+} // namespace internal
+
 namespace internal
 {
   namespace MatrixFreeFunctions
index 92e0574c932824d4ee85a3e153356b75df6c8396..812d9236d51ef7f1022fa279cff91f21222d1ac0 100644 (file)
 
 DEAL_II_NAMESPACE_OPEN
 
+namespace MGTransferGlobalCoarseningTools
+{
+  unsigned int
+  create_next_polynomial_coarsening_degree(
+    const unsigned int                      previous_fe_degree,
+    const PolynomialCoarseningSequenceType &p_sequence)
+  {
+    switch (p_sequence)
+      {
+        case PolynomialCoarseningSequenceType::bisect:
+          return std::max(previous_fe_degree / 2, 1u);
+        case PolynomialCoarseningSequenceType::decrease_by_one:
+          return std::max(previous_fe_degree - 1, 1u);
+        case PolynomialCoarseningSequenceType::go_to_one:
+          return 1u;
+        default:
+          Assert(false, StandardExceptions::ExcNotImplemented());
+          return 1u;
+      }
+  }
+
+
+
+  std::vector<unsigned int>
+  create_polynomial_coarsening_sequence(
+    const unsigned int                      max_degree,
+    const PolynomialCoarseningSequenceType &p_sequence)
+  {
+    std::vector<unsigned int> degrees{max_degree};
+
+    while (degrees.back() > 1)
+      degrees.push_back(
+        create_next_polynomial_coarsening_degree(degrees.back(), p_sequence));
+
+    std::reverse(degrees.begin(), degrees.end());
+
+    return degrees;
+  }
+} // namespace MGTransferGlobalCoarseningTools
+
 #include "mg_transfer_global_coarsening.inst"
 
 DEAL_II_NAMESPACE_CLOSE

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.