]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Introduce face loop functionality in MatrixFree. Rework DoFInfo.
authorMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Mon, 23 Apr 2018 18:08:19 +0000 (20:08 +0200)
committerMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Fri, 27 Apr 2018 09:58:13 +0000 (11:58 +0200)
include/deal.II/matrix_free/dof_info.h
include/deal.II/matrix_free/dof_info.templates.h
include/deal.II/matrix_free/fe_evaluation.h
include/deal.II/matrix_free/matrix_free.h
include/deal.II/matrix_free/matrix_free.templates.h
source/matrix_free/CMakeLists.txt
source/matrix_free/mapping_info.cc [new file with mode: 0644]
source/matrix_free/mapping_info.inst.in [new file with mode: 0644]
source/matrix_free/matrix_free.inst.in
tests/matrix_free/thread_correctness_hp.cc

index 29baceeab7fa4166cd3de20ebc587fd22cdd5fba..914b045e0a3cbd080c5dcbf0110d1715476b21cd 100644 (file)
@@ -24,7 +24,8 @@
 #include <deal.II/lac/constraint_matrix.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/dofs/dof_handler.h>
-#include <deal.II/matrix_free/helper_functions.h>
+#include <deal.II/matrix_free/task_info.h>
+#include <deal.II/matrix_free/face_info.h>
 
 #include <array>
 #include <memory>
@@ -56,10 +57,19 @@ namespace internal
      *
      * @ingroup matrixfree
      *
-     * @author Katharina Kormann and Martin Kronbichler, 2010, 2011
+     * @author Katharina Kormann and Martin Kronbichler, 2010, 2011, 2018
      */
     struct DoFInfo
     {
+      /**
+       * This value is used to define subranges in the vectors which we can
+       * zero inside the MatrixFree::loop() call. The goal is to only clear a
+       * part of the vector at a time to keep the values that are zeroed in
+       * caches, saving one global vector access for the case where this is
+       * applied rather than `vector = 0.;`.
+       */
+      static const unsigned int chunk_size_zero_vector = 8192;
+
       /**
        * Default empty constructor.
        */
@@ -75,72 +85,13 @@ namespace internal
        */
       void clear ();
 
-
-      /**
-       * Return a pointer to the first index in the DoF row @p row.
-       */
-      const unsigned int *begin_indices (const unsigned int row) const;
-
-      /**
-       * Return a pointer to the one past the last DoF index in the row @p
-       * row.
-       */
-      const unsigned int *end_indices (const unsigned int row) const;
-
-      /**
-       * Return the number of entries in the indices field for the given row.
-       */
-      unsigned int row_length_indices (const unsigned int row) const;
-
-      /**
-       * Return a pointer to the first constraint indicator in the row @p
-       * row.
-       */
-      const std::pair<unsigned short,unsigned short> *
-      begin_indicators (const unsigned int row) const;
-
-      /**
-       * Return a pointer to the one past the last constraint indicator in
-       * the row @p row.
-       */
-      const std::pair<unsigned short,unsigned short> *
-      end_indicators (const unsigned int row) const;
-
-      /**
-       * Return the number of entries in the constraint indicator field for
-       * the given row.
-       */
-      unsigned int row_length_indicators (const unsigned int row) const;
-
-      /**
-       * Return a pointer to the first index in the DoF row @p row for plain
-       * indices (i.e., the entries where constraints are not embedded).
-       */
-      const unsigned int *begin_indices_plain (const unsigned int row) const;
-
-      /**
-       * Return a pointer to the one past the last DoF index in the row @p
-       * row (i.e., the entries where constraints are not embedded).
-       */
-      const unsigned int *end_indices_plain (const unsigned int row) const;
-
       /**
        * Return the FE index for a given finite element degree. If not in hp
        * mode, this function always returns index 0. If an index is not found
-       * in hp mode, it returns max_fe_degree, i.e., one index past the last
-       * valid one.
+       * in hp mode, it returns numbers::invalid_unsigned_int.
        */
-      unsigned int fe_index_from_degree (const unsigned int fe_degree) const;
-
-
-      /**
-       * Return the FE index for a given finite element degree. If not in hp
-       * mode or if the index is not found, this function always returns index
-       * 0. Hence, this function does not check whether the given degree is
-       * actually present.
-       */
-      unsigned int
-      fe_index_from_dofs_per_cell (const unsigned int dofs_per_cell) const;
+      unsigned int fe_index_from_degree (const unsigned int first_selected_component,
+                                         const unsigned int fe_degree) const;
 
       /**
        * This internal method takes the local indices on a cell and fills them
@@ -154,7 +105,7 @@ namespace internal
                              const std::vector<unsigned int> &lexicographic_inv,
                              const ConstraintMatrix          &constraints,
                              const unsigned int               cell_number,
-                             ConstraintValues<double> &constraint_values,
+                             ConstraintValues<double>        &constraint_values,
                              bool                            &cell_at_boundary);
 
       /**
@@ -166,46 +117,31 @@ namespace internal
        */
       void assign_ghosts(const std::vector<unsigned int> &boundary_cells);
 
-      /**
-       * Reorganizes cells for serial (non-thread-parallelized) such that
-       * boundary cells are places in the middle. This way, computations and
-       * communication can be overlapped. Should only be called by one DoFInfo
-       * object when used on a system of several DoFHandlers.
-       */
-      void compute_renumber_serial (const std::vector<unsigned int> &boundary_cells,
-                                    const SizeInfo                  &size_info,
-                                    std::vector<unsigned int>       &renumbering);
-
-      /**
-       * Reorganizes cells in the hp case without parallelism such that all
-       * cells with the same FE index are placed consecutively. Should only be
-       * called by one DoFInfo object when used on a system of several
-       * DoFHandlers.
-       */
-      void compute_renumber_hp_serial (SizeInfo                  &size_info,
-                                       std::vector<unsigned int> &renumbering,
-                                       std::vector<unsigned int> &irregular_cells);
-
-      /**
-       * Compute the initial renumbering of cells such that all cells with
-       * ghosts are put first. This is the first step before building the
-       * thread graph and used to overlap computations and communication.
-       */
-      void compute_renumber_parallel (const std::vector<unsigned int> &boundary_cells,
-                                      SizeInfo                        &size_info,
-                                      std::vector<unsigned int>       &renumbering);
-
       /**
        * This method reorders the way cells are gone through based on a given
        * renumbering of the cells. It also takes @p vectorization_length cells
        * together and interprets them as one cell only, as is needed for
        * vectorization.
        */
-      void reorder_cells (const SizeInfo                   &size_info,
+      void reorder_cells (const TaskInfo                   &task_info,
                           const std::vector<unsigned int>  &renumbering,
                           const std::vector<unsigned int>  &constraint_pool_row_index,
-                          const std::vector<unsigned char> &irregular_cells,
-                          const unsigned int                vectorization_length);
+                          const std::vector<unsigned char> &irregular_cells);
+
+      /**
+       * Finds possible compression for the cell indices that we can apply for
+       * increased efficiency. Run at the end of reorder_cells.
+       */
+      void
+      compute_cell_index_compression(const std::vector<unsigned char> &irregular_cells);
+
+      /**
+       * Finds possible compression for the face indices that we can apply for
+       * increased efficiency. Run at the end of reorder_cells.
+       */
+      template <int length>
+      void
+      compute_face_index_compression(const std::vector<FaceToCellTopology<length> > &faces);
 
       /**
        * This function computes the connectivity of the currently stored
@@ -218,9 +154,30 @@ namespace internal
                                DynamicSparsityPattern          &connectivity) const;
 
       /**
-       * Renumbers the degrees of freedom to give good access for this class.
+       * Compute a renumbering of the degrees of freedom to improve the data
+       * access patterns for this class that can be utilized by the categories
+       * in the IndexStorageVariants enum. For example, the index ordering can
+       * be improved for typical DG elements by interleaving the degrees of
+       * freedom from batches of cells, which avoids the explicit data
+       * transposition in IndexStorageVariants::contiguous. Currently, these
+       * more advanced features are not implemented, so there is only limited
+       * value of this function.
        */
-      void renumber_dofs (std::vector<types::global_dof_index> &renumbering);
+      void compute_dof_renumbering (std::vector<types::global_dof_index> &renumbering);
+
+      /**
+       * Fills the array that defines how to zero selected ranges in the result
+       * vector within the cell loop, filling the two member variables @p
+       * vector_zero_range_list_index and @p vector_zero_range_list.
+       *
+       * The intent of this pattern is to zero the vector entries in close
+       * temporal proximity to the first access and thus keeping the vector
+       * entries in cache.
+       */
+      template <int length>
+      void
+      compute_vector_zero_access_pattern (const TaskInfo                                 &task_info,
+                                          const std::vector<FaceToCellTopology<length> > &faces);
 
       /**
        * Return the memory consumption in bytes of this class.
@@ -233,7 +190,7 @@ namespace internal
        */
       template <typename StreamType>
       void print_memory_consumption(StreamType     &out,
-                                    const SizeInfo &size_info) const;
+                                    const TaskInfo &size_info) const;
 
       /**
        * Prints a representation of the indices in the class to the given
@@ -244,17 +201,84 @@ namespace internal
                   const std::vector<unsigned int> &constraint_pool_row_index,
                   std::ostream                    &out) const;
 
+      /**
+       * Enum for various storage variants of the indices. This storage format
+       * is used to implement more efficient indexing schemes in case the
+       * underlying data structures allow for them, and to inform the access
+       * functions in FEEvaluationBase::read_write_operation() on which array
+       * to get the data from. One example of more efficient storage is the
+       * enum value IndexStorageVariants::contiguous, which means that one can
+       * get the indices to all degrees of freedom of a cell by reading only
+       * the first index for each cell, whereas all subsequent indices are
+       * merely an offset from the first index.
+       */
+      enum class IndexStorageVariants : unsigned char
+      {
+        /**
+         * This value indicates that no index compression was found and the
+         * only valid storage is to access all indices present on the cell,
+         * possibly including constraints. For a cell/face of this index type,
+         * the data access in FEEvaluationBase is directed to the array @p
+         * dof_indices with the index
+         * `row_starts[cell_index*n_vectorization*n_components].first`.
+         */
+        full,
+        /**
+         * This value indicates that the indices are interleaved for access
+         * with vectorized gather and scatter operation. This storage variant
+         * is possible in case there are no constraints on the cell and the
+         * indices in the batch of cells are not pointing to the same global
+         * index in different slots of a vectorized array (in order to support
+         * scatter operations). For a cell/face of this index type, the data
+         * access in FEEvaluationBase is directed to the array
+         * `dof_indices_interleaved` with the index
+         * `row_starts[cell_index*n_vectorization*n_components].first`.
+         */
+        interleaved,
+        /**
+         * This value indicates that the indices within a cell are all
+         * contiguous, and one can get the index to the cell by reading that
+         * single value for each of the cells in the cell batch. For a
+         * cell/face of this index type, the data access in FEEvaluationBase
+         * is directed to the array `dof_indices_contiguous` with the index
+         * `cell_index*n_vectorization*n_components`.
+         */
+        contiguous
+      };
+
+      /**
+       * Stores the dimension of the underlying DoFHandler. Since the indices
+       * are not templated, this is the variable that makes the dimension
+       * accessible in the (rare) cases it is needed inside this class.
+       */
+      unsigned int dimension;
+
+      /**
+       * For efficiency reasons, always keep a fixed number of cells with
+       * similar properties together. This variable controls the number of
+       * cells batched together. As opposed to the other classes which are
+       * templated on the number type, this class as a pure index container is
+       * not templated, so we need to keep the information otherwise contained
+       * in VectorizedArray<Number>::n_array_elements.
+       */
+      unsigned int vectorization_length;
+
+      /**
+       * Stores the index storage variant of all cell batches.
+       *
+       * The three arrays given here address the types for the faces decorated
+       * as interior (0), the faces decorated with as exterior (1), and the
+       * cells (2).
+       */
+      std::vector<IndexStorageVariants> index_storage_variants[3];
+
       /**
        * Stores the rowstart indices of the compressed row storage in the @p
        * dof_indices and @p constraint_indicator fields. These two fields are
        * always accessed together, so it is simpler to keep just one variable
-       * for them. This also obviates keeping two rowstart vectors in synch.
-       *
-       * In addition, the third field stores whether a particular cell has a
-       * certain structure in the indices, like indices for vector-valued
-       * problems or for cells where not all vector components are filled.
+       * for them. This also obviates keeping two rowstart vectors in sync.
        */
-      std::vector<std::array<unsigned int, 3> > row_starts;
+      std::vector<std::pair<unsigned int, unsigned int> > row_starts;
 
       /**
        * Stores the indices of the degrees of freedom for each cell. These
@@ -284,6 +308,32 @@ namespace internal
        */
       std::vector<std::pair<unsigned short,unsigned short> > constraint_indicator;
 
+      /**
+       * Reordered index storage for `IndexStorageVariants::interleaved`.
+       */
+      std::vector<unsigned int> dof_indices_interleaved;
+
+      /**
+       * Compressed index storage for faster access than through @p
+       * dof_indices used according to the description in IndexStorageVariants.
+       *
+       * The three arrays given here address the types for the faces decorated
+       * as interior (0), the faces decorated with as exterior (1), and the
+       * cells (2).
+       */
+      std::vector<unsigned int> dof_indices_contiguous[3];
+
+      /**
+       * Caches the number of indices filled when vectorizing. This
+       * information can implicitly deduced from the row_starts data fields,
+       * but this field allows for faster access.
+       *
+       * The three arrays given here address the types for the faces decorated
+       * as interior (0), the faces decorated with as exterior (1), and the
+       * cells (2).
+       */
+      std::vector<unsigned char> n_vectorization_lanes_filled[3];
+
       /**
        * This stores the parallel partitioning that can be used to set up
        * vectors. The partitioner includes the description of the local range
@@ -292,6 +342,19 @@ namespace internal
        */
       std::shared_ptr<const Utilities::MPI::Partitioner> vector_partitioner;
 
+      /**
+       * This partitioning selects a subset of ghost indices to the full
+       * vector partitioner stored in @p vector_partitioner. These
+       * partitioners are used in specialized loops that only import parts of
+       * the ghosted region for reducing the amount of communication. There
+       * are three variants of the partitioner initialized, one that queries
+       * only the cell values, one that additionally describes the indices for
+       * evaluating the function values on the faces, and one that describes
+       * the indices for evaluation both the function values and the gradients
+       * on the faces adjacent to the locally owned cells.
+       */
+      std::array<std::shared_ptr<const Utilities::MPI::Partitioner>, 3> vector_partitioner_face_variants;
+
       /**
        * This stores a (sorted) list of all locally owned degrees of freedom
        * that are constrained.
@@ -315,17 +378,47 @@ namespace internal
       std::vector<unsigned int> plain_dof_indices;
 
       /**
-       * Stores the dimension of the underlying DoFHandler. Since the indices
-       * are not templated, this is the variable that makes the dimension
-       * accessible in the (rare) cases it is needed inside this class.
+       * Stores the offset in terms of the number of base elements over all
+       * DoFInfo objects.
        */
-      unsigned int dimension;
+      unsigned int global_base_element_offset;
 
       /**
-       * Stores the number of components in the DoFHandler where the indices
-       * have been read from.
+       * Stores the number of base elements in the DoFHandler where the
+       * indices have been read from.
        */
-      unsigned int n_components;
+      unsigned int n_base_elements;
+
+      /**
+       * Stores the number of components of each base element in the finite
+       * element where the indices have been read from.
+       */
+      std::vector<unsigned int> n_components;
+
+      /**
+       * The ith entry of this vector stores the component number of the given
+       * base element.
+       */
+      std::vector<unsigned int> start_components;
+
+      /**
+       * For a given component in an FESystem, this variable tells which base
+       * element the index belongs to.
+       */
+      std::vector<unsigned int> component_to_base_index;
+
+      /**
+       * For a vector-valued element, this gives the constant offset in the
+       * number of degrees of freedom starting at the given component, as the
+       * degrees are numbered by degrees of freedom. This data structure does
+       * not take possible constraints and thus, shorter or longer lists, into
+       * account. This information is encoded in the row_starts variables
+       * directly.
+       *
+       * The outer vector goes through the various fe indices in the hp case,
+       * similarly to the @p dofs_per_cell variable.
+       */
+      std::vector<std::vector<unsigned int> > component_dof_indices_offset;
 
       /**
        * Stores the number of degrees of freedom per cell.
@@ -354,11 +447,11 @@ namespace internal
       unsigned int max_fe_index;
 
       /**
-       * This variable stores the dofs per cell and the finite element degree
-       * associated for all fe indices in the underlying element for easier
-       * access to data in the hp case.
+       * To each of the slots in an hp adaptive case, the inner vector stores
+       * the corresponding element degree. This is used by the constructor of
+       * FEEvaluationBase to identify the correct data slot in the hp case.
        */
-      std::vector<std::pair<unsigned int,unsigned int> > fe_index_conversion;
+      std::vector<std::vector<unsigned int> > fe_index_conversion;
 
       /**
        * Temporarily stores the numbers of ghosts during setup. Cleared when
@@ -366,6 +459,18 @@ namespace internal
        * partitioner.
        */
       std::vector<types::global_dof_index> ghost_dofs;
+
+      /**
+       * Stores an integer to each partition in TaskInfo that indicates
+       * whether to clear certain parts in the result vector if the user
+       * requested it with the respective argument in the MatrixFree::loop.
+       */
+      std::vector<unsigned int> vector_zero_range_list_index;
+
+      /**
+       * Stores the actual ranges in the vector to be cleared.
+       */
+      std::vector<unsigned int> vector_zero_range_list;
     };
 
 
@@ -373,138 +478,19 @@ namespace internal
 
 #ifndef DOXYGEN
 
-    inline
-    const unsigned int *
-    DoFInfo::begin_indices (const unsigned int row) const
-    {
-      AssertIndexRange (row, row_starts.size()-1);
-      const unsigned int index = row_starts[row][0];
-      AssertIndexRange(index, dof_indices.size()+1);
-      return dof_indices.empty() ?
-             nullptr :
-             dof_indices.data()+index;
-    }
-
-
-
-    inline
-    const unsigned int *
-    DoFInfo::end_indices (const unsigned int row) const
-    {
-      AssertIndexRange (row, row_starts.size()-1);
-      const unsigned int index = row_starts[row+1][0];
-      AssertIndexRange(index, dof_indices.size()+1);
-      return dof_indices.empty() ?
-             nullptr :
-             dof_indices.data()+index;
-    }
-
-
 
     inline
     unsigned int
-    DoFInfo::row_length_indices (const unsigned int row) const
-    {
-      AssertIndexRange (row, row_starts.size()-1);
-      return (row_starts[row+1][0] - row_starts[row][0]);
-    }
-
-
-
-    inline
-    const std::pair<unsigned short,unsigned short> *
-    DoFInfo::begin_indicators (const unsigned int row) const
-    {
-      AssertIndexRange (row, row_starts.size()-1);
-      const unsigned int index = row_starts[row][1];
-      AssertIndexRange (index, constraint_indicator.size()+1);
-      return constraint_indicator.empty() ?
-             nullptr :
-             constraint_indicator.data()+index;
-    }
-
-
-
-    inline
-    const std::pair<unsigned short,unsigned short> *
-    DoFInfo::end_indicators (const unsigned int row) const
-    {
-      AssertIndexRange (row, row_starts.size()-1);
-      const unsigned int index = row_starts[row+1][1];
-      AssertIndexRange (index, constraint_indicator.size()+1);
-      return constraint_indicator.empty() ?
-             nullptr :
-             constraint_indicator.data()+index;
-    }
-
-
-
-    inline
-    unsigned int
-    DoFInfo::row_length_indicators (const unsigned int row) const
-    {
-      AssertIndexRange (row, row_starts.size()-1);
-      return (row_starts[row+1][1] - row_starts[row][1]);
-    }
-
-
-
-    inline
-    const unsigned int *
-    DoFInfo::begin_indices_plain (const unsigned int row) const
-    {
-      // if we have no constraints, should take the data from dof_indices
-      if (row_length_indicators(row) == 0)
-        {
-          Assert (row_starts_plain_indices[row]==numbers::invalid_unsigned_int,
-                  ExcInternalError());
-          return begin_indices(row);
-        }
-      else
-        {
-          AssertDimension (row_starts.size(), row_starts_plain_indices.size());
-          const unsigned int index = row_starts_plain_indices[row];
-          AssertIndexRange(index, plain_dof_indices.size()+1);
-          return plain_dof_indices.empty() ?
-                 nullptr :
-                 plain_dof_indices.data()+index;
-        }
-    }
-
-
-
-    inline
-    const unsigned int *
-    DoFInfo::end_indices_plain (const unsigned int row) const
-    {
-      return begin_indices_plain(row) +
-             dofs_per_cell[(cell_active_fe_index.size()==0)?
-                           0:cell_active_fe_index[row]];
-    }
-
-
-
-    inline
-    unsigned int
-    DoFInfo::fe_index_from_degree (const unsigned int fe_degree) const
+    DoFInfo::fe_index_from_degree (const unsigned int first_selected_component,
+                                   const unsigned int fe_degree) const
     {
       const unsigned int n_indices = fe_index_conversion.size();
+      if (n_indices <= 1)
+        return 0;
       for (unsigned int i=0; i<n_indices; ++i)
-        if (fe_index_conversion[i].first == fe_degree)
-          return i;
-      return n_indices;
-    }
-
-
-
-    inline
-    unsigned int
-    DoFInfo::fe_index_from_dofs_per_cell (const unsigned int dofs_per_cell) const
-    {
-      for (unsigned int i=0; i<fe_index_conversion.size(); ++i)
-        if (fe_index_conversion[i].second == dofs_per_cell)
+        if (fe_index_conversion[i][first_selected_component] == fe_degree)
           return i;
-      return 0;
+      return numbers::invalid_unsigned_int;
     }
 
   } // end of namespace MatrixFreeFunctions
index 217c9b70fd5b98d6eefb72e1ae1f958fceb8736e..518b93eb7ddabbcf8b2057f7f37978a0b5927cb2 100644 (file)
 
 #include <deal.II/base/memory_consumption.h>
 #include <deal.II/base/multithread_info.h>
+#include <deal.II/base/thread_management.h>
+#include <deal.II/base/parallel.h>
 #include <deal.II/lac/dynamic_sparsity_pattern.h>
 #include <deal.II/lac/sparsity_pattern.h>
 #include <deal.II/matrix_free/dof_info.h>
-#include <deal.II/matrix_free/helper_functions.h>
+#include <deal.II/matrix_free/task_info.h>
 #include <deal.II/matrix_free/mapping_info.h>
 
 DEAL_II_NAMESPACE_OPEN
@@ -142,8 +144,12 @@ namespace internal
       ghost_dofs.clear();
       dofs_per_cell.clear();
       dofs_per_face.clear();
+      vectorization_length = 1;
       dimension = 2;
-      n_components = 0;
+      global_base_element_offset = 0;
+      n_base_elements = 0;
+      n_components.clear();
+      start_components.clear();
       row_starts_plain_indices.clear();
       plain_dof_indices.clear();
       store_plain_indices = false;
@@ -155,12 +161,13 @@ namespace internal
 
 
     void
-    DoFInfo::read_dof_indices (const std::vector<types::global_dof_index> &local_indices,
-                               const std::vector<unsigned int> &lexicographic_inv,
-                               const ConstraintMatrix          &constraints,
-                               const unsigned int               cell_number,
-                               ConstraintValues<double> &constraint_values,
-                               bool                            &cell_at_boundary)
+    DoFInfo
+    ::read_dof_indices (const std::vector<types::global_dof_index> &local_indices,
+                        const std::vector<unsigned int> &lexicographic_inv,
+                        const ConstraintMatrix          &constraints,
+                        const unsigned int               cell_number,
+                        ConstraintValues<double>        &constraint_values,
+                        bool                            &cell_at_subdomain_boundary)
     {
       Assert (vector_partitioner.get() != nullptr, ExcInternalError());
       const unsigned int n_mpi_procs = vector_partitioner->n_mpi_processes();
@@ -170,120 +177,127 @@ namespace internal
               ExcMessage("The size local range of owned indices must not "
                          "exceed the size of unsigned int"));
       const unsigned int n_owned     = last_owned - first_owned;
-      std::pair<unsigned short,unsigned short> constraint_iterator (0,0);
 
-      unsigned int dofs_this_cell = (cell_active_fe_index.empty()) ?
-                                    dofs_per_cell[0] : dofs_per_cell[cell_active_fe_index[cell_number]];
-      for (unsigned int i=0; i<dofs_this_cell; i++)
+      Assert(dofs_per_cell.size() == 1 || cell_number < cell_active_fe_index.size(),
+             ExcInternalError());
+      const unsigned int fe_index = dofs_per_cell.size() == 1 ? 0 : cell_active_fe_index[cell_number];
+      const unsigned int dofs_this_cell = dofs_per_cell[fe_index];
+      const unsigned int n_components = start_components.back();
+      for (unsigned int comp=0; comp<n_components; ++comp)
         {
-          types::global_dof_index current_dof =
-            local_indices[lexicographic_inv[i]];
-          const std::vector<std::pair<types::global_dof_index,double> >
-          *entries_ptr =
-            constraints.get_constraint_entries(current_dof);
-
-          // dof is constrained
-          if (entries_ptr != nullptr)
+          std::pair<unsigned short,unsigned short> constraint_iterator (0,0);
+          for (unsigned int i=component_dof_indices_offset[fe_index][comp];
+               i<component_dof_indices_offset[fe_index][comp+1]; i++)
             {
-              // in case we want to access plain indices, we need to know
-              // about the location of constrained indices as well (all the
-              // other indices are collected by the cases below)
-              if (current_dof < first_owned || current_dof >= last_owned)
-                {
-                  ghost_dofs.push_back (current_dof);
-                  cell_at_boundary = true;
-                }
-
-              // check whether this dof is identity constrained to another
-              // dof. then we can simply insert that dof and there is no need
-              // to actually resolve the constraint entries
+              types::global_dof_index current_dof =
+                local_indices[lexicographic_inv[i]];
               const std::vector<std::pair<types::global_dof_index,double> >
-              &entries = *entries_ptr;
-              const types::global_dof_index n_entries = entries.size();
-              if (n_entries == 1 && std::fabs(entries[0].second-1.)<1e-14)
+              *entries_ptr =
+                constraints.get_constraint_entries(current_dof);
+
+              // dof is constrained
+              if (entries_ptr != nullptr)
                 {
-                  current_dof = entries[0].first;
-                  goto no_constraint;
-                }
+                  // in case we want to access plain indices, we need to know
+                  // about the location of constrained indices as well (all the
+                  // other indices are collected by the cases below)
+                  if (current_dof < first_owned || current_dof >= last_owned)
+                    {
+                      ghost_dofs.push_back (current_dof);
+                      cell_at_subdomain_boundary = true;
+                    }
+
+                  // check whether this dof is identity constrained to another
+                  // dof. then we can simply insert that dof and there is no need
+                  // to actually resolve the constraint entries
+                  const std::vector<std::pair<types::global_dof_index,double> >
+                  &entries = *entries_ptr;
+                  const types::global_dof_index n_entries = entries.size();
+                  if (n_entries == 1 && std::abs(entries[0].second-1.)<
+                      100*std::numeric_limits<double>::epsilon())
+                    {
+                      current_dof = entries[0].first;
+                      goto no_constraint;
+                    }
 
-              // append a new index to the indicators
-              constraint_indicator.push_back (constraint_iterator);
-              constraint_indicator.back().second =
-                constraint_values.insert_entries (entries);
+                  // append a new index to the indicators
+                  constraint_indicator.push_back (constraint_iterator);
+                  constraint_indicator.back().second =
+                    constraint_values.insert_entries (entries);
 
-              // reset constraint iterator for next round
-              constraint_iterator.first = 0;
+                  // reset constraint iterator for next round
+                  constraint_iterator.first = 0;
 
-              // add the local_to_global indices computed in the
-              // insert_entries function. transform the index to local index
-              // space or mark it as ghost if necessary
-              if (n_entries > 0)
-                {
-                  const std::vector<types::global_dof_index> &constraint_indices =
-                    constraint_values.constraint_indices;
-                  for (unsigned int j=0; j<n_entries; ++j)
+                  // add the local_to_global indices computed in the
+                  // insert_entries function. transform the index to local index
+                  // space or mark it as ghost if necessary
+                  if (n_entries > 0)
                     {
-                      if (n_mpi_procs > 1 &&
-                          (constraint_indices[j] < first_owned ||
-                           constraint_indices[j] >= last_owned))
+                      const std::vector<types::global_dof_index> &constraint_indices =
+                        constraint_values.constraint_indices;
+                      for (unsigned int j=0; j<n_entries; ++j)
                         {
-                          dof_indices.push_back (n_owned + ghost_dofs.size());
-
-                          // collect ghosts so that we can later construct an
-                          // IndexSet for them. also store whether the current
-                          // cell is on the boundary
-                          ghost_dofs.push_back(constraint_indices[j]);
-                          cell_at_boundary = true;
+                          if (n_mpi_procs > 1 &&
+                              (constraint_indices[j] < first_owned ||
+                               constraint_indices[j] >= last_owned))
+                            {
+                              dof_indices.push_back (n_owned + ghost_dofs.size());
+
+                              // collect ghosts so that we can later construct an
+                              // IndexSet for them. also store whether the current
+                              // cell is on the boundary
+                              ghost_dofs.push_back(constraint_indices[j]);
+                              cell_at_subdomain_boundary = true;
+                            }
+                          else
+                            // not ghost, so transform to the local index space
+                            // directly
+                            dof_indices.push_back
+                            (static_cast<unsigned int>(constraint_indices[j] -
+                                                       first_owned));
                         }
-                      else
-                        // not ghost, so transform to the local index space
-                        // directly
-                        dof_indices.push_back
-                        (static_cast<unsigned int>(constraint_indices[j] -
-                                                   first_owned));
                     }
                 }
-            }
-          else
-            {
-no_constraint:
-              // Not constrained, we simply have to add the local index to the
-              // indices_local_to_global list and increment constraint
-              // iterator. transform to local index space/mark as ghost
-              if (n_mpi_procs > 1 &&
-                  (current_dof < first_owned ||
-                   current_dof >= last_owned))
-                {
-                  ghost_dofs.push_back(current_dof);
-                  current_dof = n_owned + ghost_dofs.size()-1;
-                  cell_at_boundary = true;
-                }
               else
-                current_dof -= first_owned;
+                {
+no_constraint:
+                  // Not constrained, we simply have to add the local index to the
+                  // indices_local_to_global list and increment constraint
+                  // iterator. transform to local index space/mark as ghost
+                  if (n_mpi_procs > 1 &&
+                      (current_dof < first_owned ||
+                       current_dof >= last_owned))
+                    {
+                      ghost_dofs.push_back(current_dof);
+                      current_dof = n_owned + ghost_dofs.size()-1;
+                      cell_at_subdomain_boundary = true;
+                    }
+                  else
+                    current_dof -= first_owned;
 
-              dof_indices.push_back (static_cast<unsigned int>(current_dof));
+                  dof_indices.push_back (static_cast<unsigned int>(current_dof));
 
-              // make sure constraint_iterator.first is always within the
-              // bounds of unsigned short
-              Assert (constraint_iterator.first <
-                      (1<<(8*sizeof(unsigned short)))-1,
-                      ExcInternalError());
-              constraint_iterator.first++;
+                  // make sure constraint_iterator.first is always within the
+                  // bounds of unsigned short
+                  Assert (constraint_iterator.first <
+                          (1<<(8*sizeof(unsigned short)))-1,
+                          ExcInternalError());
+                  constraint_iterator.first++;
+                }
             }
+          row_starts[cell_number*n_components+comp+1].first = dof_indices.size();
+          row_starts[cell_number*n_components+comp+1].second = constraint_indicator.size();
         }
-      row_starts[cell_number+1][0] = dof_indices.size();
-      row_starts[cell_number+1][1] = constraint_indicator.size();
-      row_starts[cell_number+1][2] = 0;
 
       // now to the plain indices: in case we have constraints on this cell,
       // store the indices without the constraints resolve once again
       if (store_plain_indices == true)
         {
           if (cell_number == 0)
-            row_starts_plain_indices.resize (row_starts.size());
+            row_starts_plain_indices.resize ((row_starts.size()-1)/n_components+1);
           row_starts_plain_indices[cell_number] = plain_dof_indices.size();
-          bool cell_has_constraints = (row_starts[cell_number+1][1] >
-                                       row_starts[cell_number][1]);
+          const bool cell_has_constraints = (row_starts[(cell_number+1)*n_components].second >
+                                             row_starts[cell_number*n_components].second);
           if (cell_has_constraints == true)
             {
               for (unsigned int i=0; i<dofs_this_cell; ++i)
@@ -296,7 +310,7 @@ no_constraint:
                     {
                       ghost_dofs.push_back(current_dof);
                       current_dof = n_owned + ghost_dofs.size()-1;
-                      cell_at_boundary = true;
+                      cell_at_subdomain_boundary = true;
                     }
                   else
                     current_dof -= first_owned;
@@ -310,7 +324,8 @@ no_constraint:
 
 
     void
-    DoFInfo::assign_ghosts (const std::vector<unsigned int> &boundary_cells)
+    DoFInfo
+    ::assign_ghosts (const std::vector<unsigned int> &boundary_cells)
     {
       Assert (boundary_cells.size() < row_starts.size(), ExcInternalError());
 
@@ -324,6 +339,7 @@ no_constraint:
         AssertIndexRange (*dof, n_owned+n_ghosts);
 #endif
 
+      const unsigned int n_components = start_components.back();
       std::vector<unsigned int> ghost_numbering (n_ghosts);
       IndexSet ghost_indices (vector_partitioner->size());
       if (n_ghosts > 0)
@@ -376,9 +392,8 @@ no_constraint:
           const unsigned int n_boundary_cells = boundary_cells.size();
           for (unsigned int i=0; i<n_boundary_cells; ++i)
             {
-              unsigned int *data_ptr = const_cast<unsigned int *> (begin_indices(boundary_cells[i]));
-
-              const unsigned int *row_end = end_indices(boundary_cells[i]);
+              unsigned int *data_ptr = const_cast<unsigned int *> (&dof_indices[row_starts[boundary_cells[i]*n_components].first]);
+              const unsigned int *row_end = &dof_indices[row_starts[(boundary_cells[i]+1)*n_components].first];
               for ( ; data_ptr != row_end; ++data_ptr)
                 *data_ptr = ((*data_ptr < n_owned)
                              ?
@@ -390,10 +405,13 @@ no_constraint:
               // now the same procedure for plain indices
               if (store_plain_indices == true)
                 {
-                  if (row_length_indicators(boundary_cells[i]) > 0)
+                  if (row_starts[boundary_cells[i]*n_components].second !=
+                      row_starts[(boundary_cells[i]+1)*n_components].second)
                     {
-                      unsigned int *data_ptr = const_cast<unsigned int *> (begin_indices_plain(boundary_cells[i]));
-                      const unsigned int *row_end = end_indices_plain(boundary_cells[i]);
+                      unsigned int *data_ptr = const_cast<unsigned int *> (&plain_dof_indices[row_starts_plain_indices[boundary_cells[i]]]);
+                      const unsigned int *row_end = data_ptr +
+                                                    dofs_per_cell[cell_active_fe_index.size() == 0 ?
+                                                                  0 : cell_active_fe_index[i]];
                       for ( ; data_ptr != row_end; ++data_ptr)
                         *data_ptr = ((*data_ptr < n_owned)
                                      ?
@@ -420,154 +438,119 @@ no_constraint:
 
 
     void
-    DoFInfo::reorder_cells (const TaskInfo                   &task_info,
-                            const std::vector<unsigned int>  &renumbering,
-                            const std::vector<unsigned int>  &constraint_pool_row_index,
-                            const std::vector<unsigned char> &irregular_cells,
-                            const unsigned int                vectorization_length)
+    DoFInfo
+    ::reorder_cells (const TaskInfo                  &task_info,
+                     const std::vector<unsigned int> &renumbering,
+                     const std::vector<unsigned int> &constraint_pool_row_index,
+                     const std::vector<unsigned char> &irregular_cells)
     {
+      (void)constraint_pool_row_index;
+
       // first reorder the active fe index.
+      const bool have_hp = dofs_per_cell.size() > 1;
       if (cell_active_fe_index.size() > 0)
         {
           std::vector<unsigned int> new_active_fe_index;
           new_active_fe_index.reserve (task_info.cell_partition_data.back());
-          std::vector<unsigned int> fe_indices(vectorization_length);
           unsigned int position_cell = 0;
           for (unsigned int cell=0; cell<task_info.cell_partition_data.back(); ++cell)
             {
               const unsigned int n_comp = (irregular_cells[cell] > 0 ?
                                            irregular_cells[cell] : vectorization_length);
-              for (unsigned int j=0; j<n_comp; ++j)
-                fe_indices[j]=cell_active_fe_index[renumbering[position_cell+j]];
 
-              // by construction, all cells should have the same fe index.
+              // take maximum FE index among the ones present (we might have
+              // lumped some lower indices into higher ones)
+              unsigned int fe_index = cell_active_fe_index[renumbering[position_cell]];
               for (unsigned int j=1; j<n_comp; ++j)
-                Assert (fe_indices[j] == fe_indices[0], ExcInternalError());
+                fe_index=std::max(fe_index,
+                                  cell_active_fe_index[renumbering[position_cell+j]]);
 
-              new_active_fe_index.push_back(fe_indices[0]);
+              new_active_fe_index.push_back(fe_index);
               position_cell += n_comp;
             }
           std::swap (new_active_fe_index, cell_active_fe_index);
         }
+      if (have_hp)
+        AssertDimension(cell_active_fe_index.size(),
+                        task_info.cell_partition_data.back());
 
-      std::vector<std::array<unsigned int, 3> > new_row_starts;
+      const unsigned int n_components = start_components.back();
+
+      std::vector<std::pair<unsigned int, unsigned int> >
+      new_row_starts(vectorization_length * n_components * task_info.cell_partition_data.back()+1);
       std::vector<unsigned int> new_dof_indices;
       std::vector<std::pair<unsigned short,unsigned short> >
       new_constraint_indicator;
       std::vector<unsigned int> new_plain_indices, new_rowstart_plain;
       unsigned int position_cell = 0;
-      new_row_starts.resize(task_info.cell_partition_data.back()+1);
       new_dof_indices.reserve (dof_indices.size());
       new_constraint_indicator.reserve (constraint_indicator.size());
       if (store_plain_indices == true)
         {
-          new_rowstart_plain.resize (task_info.cell_partition_data.back()+1,
+          new_rowstart_plain.resize (vectorization_length *
+                                     task_info.cell_partition_data.back() + 1,
                                      numbers::invalid_unsigned_int);
           new_plain_indices.reserve (plain_dof_indices.size());
         }
 
-      // copy the indices and the constraint indicators to the new data field:
-      // Store the indices in a way so that adjacent data fields in local
-      // vectors are adjacent, i.e., first dof index 0 for all vectors, then
-      // dof index 1 for all vectors, and so on. This involves some extra
-      // resorting.
-      std::vector<const unsigned int *> glob_indices (vectorization_length);
-      std::vector<const unsigned int *> plain_glob_indices (vectorization_length);
-      std::vector<const std::pair<unsigned short,unsigned short>*>
-      constr_ind(vectorization_length), constr_end(vectorization_length);
-      std::vector<unsigned int> index(vectorization_length);
+      // copy the indices and the constraint indicators to the new data field,
+      // where we will go through the cells in the renumbered way. in case the
+      // vectorization length does not exactly match up, we fill invalid
+      // numbers to the rowstart data. for contiguous cell indices, we skip
+      // the rowstarts field completely and directly go into the
+      // new_dof_indices field (this layout is used in FEEvaluation).
       for (unsigned int i=0; i<task_info.cell_partition_data.back(); ++i)
         {
-          const unsigned int dofs_mcell =
-            dofs_per_cell[cell_active_fe_index.size() == 0 ? 0 :
-                          cell_active_fe_index[i]] * vectorization_length;
-          new_row_starts[i][0] = new_dof_indices.size();
-          new_row_starts[i][1] = new_constraint_indicator.size();
-          new_row_starts[i][2] = irregular_cells[i];
-
-          const unsigned int n_comp = (irregular_cells[i]>0 ?
+          const unsigned int n_vect = (irregular_cells[i]>0 ?
                                        irregular_cells[i] : vectorization_length);
+          const unsigned int dofs_per_cell = have_hp ?
+                                             this->dofs_per_cell[cell_active_fe_index[i]] : this->dofs_per_cell[0];
 
-          for (unsigned int j=0; j<n_comp; ++j)
+          for (unsigned int j=0; j<n_vect; ++j)
             {
-              glob_indices[j] = begin_indices(renumbering[position_cell+j]);
-              constr_ind[j] = begin_indicators(renumbering[position_cell+j]);
-              constr_end[j] = end_indicators(renumbering[position_cell+j]);
-              index[j] = 0;
-            }
-
-          bool has_constraints = false;
-          if (store_plain_indices == true)
-            {
-              for (unsigned int j=0; j<n_comp; ++j)
-                if (begin_indicators(renumbering[position_cell+j]) <
-                    end_indicators(renumbering[position_cell+j]))
-                  {
-                    plain_glob_indices[j] =
-                      begin_indices_plain (renumbering[position_cell+j]);
-                    has_constraints = true;
-                  }
-                else
-                  plain_glob_indices[j] =
-                    begin_indices (renumbering[position_cell+j]);
-              if (has_constraints == true)
-                new_rowstart_plain[i] = new_plain_indices.size();
+              const unsigned int cell_no = renumbering[position_cell+j]*n_components;
+              for (unsigned int comp=0; comp<n_components; ++comp)
+                {
+                  new_row_starts[(i*vectorization_length+j)*n_components+comp].first
+                    = new_dof_indices.size();
+                  new_row_starts[(i*vectorization_length+j)*n_components+comp].second
+                    = new_constraint_indicator.size();
+
+                  new_dof_indices.insert(new_dof_indices.end(),
+                                         &dof_indices[row_starts[cell_no+comp].first],
+                                         &dof_indices[row_starts[cell_no+comp+1].first]);
+                  for (unsigned int index = row_starts[cell_no+comp].second;
+                       index != row_starts[cell_no+comp+1].second; ++index)
+                    new_constraint_indicator.push_back(constraint_indicator[index]);
+                }
+              if (store_plain_indices && row_starts[cell_no].second !=
+                  row_starts[cell_no+n_components].second)
+                {
+                  new_rowstart_plain[i*vectorization_length+j] =
+                    new_plain_indices.size();
+                  new_plain_indices.insert(new_plain_indices.end(),
+                                           &plain_dof_indices[row_starts_plain_indices[cell_no/n_components]],
+                                           &plain_dof_indices[row_starts_plain_indices[cell_no/n_components]]+dofs_per_cell);
+                }
             }
-
-          unsigned int m_ind_local = 0, m_index = 0;
-          while (m_ind_local < dofs_mcell)
-            for (unsigned int j=0; j<vectorization_length; ++j)
+          for (unsigned int j=n_vect; j<vectorization_length; ++j)
+            for (unsigned int comp=0; comp<n_components; ++comp)
               {
-                // last cell: nothing to do
-                if (j >= n_comp)
-                  {
-                    ++m_ind_local;
-                    continue;
-                  }
-
-                // otherwise, check if we are a constrained dof. The dof is
-                // not constrained if we are at the end of the row for the
-                // constraints (indi[j] == n_indi[j]) or if the local index[j]
-                // is smaller than the next position for a constraint. Then,
-                // just copy it. otherwise, copy all the entries that come
-                // with this dof
-                if (constr_ind[j] == constr_end[j] ||
-                    index[j] < constr_ind[j]->first)
-                  {
-                    new_dof_indices.push_back (*glob_indices[j]);
-                    ++m_index;
-                    ++index[j];
-                    ++glob_indices[j];
-                  }
-                else
-                  {
-                    const unsigned short constraint_loc = constr_ind[j]->second;
-                    new_constraint_indicator.emplace_back (m_index, constraint_loc);
-                    for (unsigned int k=constraint_pool_row_index[constraint_loc];
-                         k<constraint_pool_row_index[constraint_loc+1];
-                         ++k, ++glob_indices[j])
-                      new_dof_indices.push_back (*glob_indices[j]);
-                    ++constr_ind[j];
-                    m_index = 0;
-                    index[j] = 0;
-                  }
-                if (store_plain_indices==true && has_constraints==true)
-                  new_plain_indices.push_back (*plain_glob_indices[j]++);
-                ++m_ind_local;
+                new_row_starts[(i*vectorization_length+j)*n_components+comp].first
+                  = new_dof_indices.size();
+                new_row_starts[(i*vectorization_length+j)*n_components+comp].second
+                  = new_constraint_indicator.size();
               }
-
-          for (unsigned int j=0; j<n_comp; ++j)
-            Assert (glob_indices[j]==end_indices(renumbering[position_cell+j]),
-                    ExcInternalError());
-          position_cell += n_comp;
+          position_cell += n_vect;
         }
-      AssertDimension (position_cell+1, row_starts.size());
-
-      new_row_starts[task_info.cell_partition_data.back()][0] = new_dof_indices.size();
-      new_row_starts[task_info.cell_partition_data.back()][1] = new_constraint_indicator.size();
-      new_row_starts[task_info.cell_partition_data.back()][2] = 0;
+      AssertDimension (position_cell*n_components+1, row_starts.size());
 
       AssertDimension(dof_indices.size(), new_dof_indices.size());
+      new_row_starts[task_info.cell_partition_data.back()*vectorization_length*n_components].first
+        = new_dof_indices.size();
+      new_row_starts[task_info.cell_partition_data.back()*vectorization_length*n_components].second
+        = new_constraint_indicator.size();
+
       AssertDimension(constraint_indicator.size(),
                       new_constraint_indicator.size());
 
@@ -592,9 +575,11 @@ no_constraint:
       // constraint pool.
       for (unsigned int row=0; row<task_info.cell_partition_data.back(); ++row)
         {
-          const unsigned int row_length_ind = row_length_indices(row);
+          const unsigned int row_length_ind = row_starts[(row*vectorization_length+1)*n_components].first -
+                                              row_starts[row*vectorization_length*n_components].first;
           const std::pair<unsigned short,unsigned short>
-          *con_it = begin_indicators(row), * end_con = end_indicators(row);
+          *con_it = &constraint_indicator[row_starts[row*vectorization_length*n_components].second],
+           * end_con = &constraint_indicator[row_starts[(row*vectorization_length+1)*n_components].second];
           for ( ; con_it != end_con; ++con_it)
             {
               AssertIndexRange (con_it->first, row_length_ind+1);
@@ -612,6 +597,251 @@ no_constraint:
           n_active_cells += vectorization_length;
       AssertDimension(n_active_cells, task_info.n_active_cells);
 #endif
+
+      compute_cell_index_compression(irregular_cells);
+    }
+
+
+
+    void
+    DoFInfo::compute_cell_index_compression
+    (const std::vector<unsigned char> &irregular_cells)
+    {
+      const bool have_hp = dofs_per_cell.size() > 1;
+      const unsigned int n_components = start_components.back();
+
+      Assert(row_starts.size() % vectorization_length == 1, ExcInternalError());
+      if (vectorization_length > 1)
+        AssertDimension(row_starts.size()/vectorization_length/n_components,
+                        irregular_cells.size());
+      index_storage_variants[2].resize(irregular_cells.size(),
+                                       IndexStorageVariants::full);
+      n_vectorization_lanes_filled[2].resize(irregular_cells.size());
+      for (unsigned int i=0; i<irregular_cells.size(); ++i)
+        if (irregular_cells[i] > 0)
+          n_vectorization_lanes_filled[2][i] = irregular_cells[i];
+        else
+          n_vectorization_lanes_filled[2][i] = vectorization_length;
+
+      dof_indices_contiguous[2].resize(irregular_cells.size()*vectorization_length,
+                                       numbers::invalid_unsigned_int);
+      dof_indices_interleaved.resize(dof_indices.size(), numbers::invalid_unsigned_int);
+
+      std::vector<unsigned int> index_kinds(static_cast<unsigned int>(IndexStorageVariants::contiguous)+1);
+      std::vector<unsigned int> offsets(vectorization_length);
+      for (unsigned int i=0; i<irregular_cells.size(); ++i)
+        {
+          const unsigned int ndofs = dofs_per_cell[have_hp ? cell_active_fe_index[i] : 0];
+          const unsigned int n_comp = n_vectorization_lanes_filled[2][i];
+
+          // check 1: Check if there are constraints -> no compression possible
+          bool has_constraints = false;
+          for (unsigned int j=0; j<n_comp; ++j)
+            {
+              const unsigned int cell_no = i*vectorization_length+j;
+              if (row_starts[cell_no*n_components].second != row_starts[(cell_no+1)*n_components].second)
+                {
+                  has_constraints = true;
+                  break;
+                }
+            }
+          if (has_constraints)
+            index_storage_variants[2][i] = IndexStorageVariants::full;
+          else
+            {
+              bool indices_are_contiguous = true;
+              for (unsigned int j=0; j<n_comp; ++j)
+                {
+                  const unsigned int cell_no = i*vectorization_length+j;
+                  const unsigned int *dof_indices = &this->dof_indices[row_starts[cell_no*n_components].first];
+                  AssertDimension(ndofs, row_starts[(cell_no+1)*n_components].first-
+                                  row_starts[cell_no*n_components].first);
+                  for (unsigned int i=1; i<ndofs; ++i)
+                    if (dof_indices[i] != dof_indices[0]+i)
+                      {
+                        indices_are_contiguous = false;
+                        break;
+                      }
+                }
+              bool indices_are_interleaved_and_contiguous = (ndofs > 1 &&
+                                                             n_comp == vectorization_length);
+              {
+                const unsigned int *dof_indices =
+                  &this->dof_indices[row_starts[i*vectorization_length*n_components].first];
+                for (unsigned int k=0; k<ndofs; ++k)
+                  for (unsigned int j=0; j<n_comp; ++j)
+                    if (dof_indices[j*ndofs+k] != dof_indices[0] + k*n_comp + j)
+                      {
+                        indices_are_interleaved_and_contiguous = false;
+                        break;
+                      }
+              }
+              if (indices_are_contiguous ||
+                  indices_are_interleaved_and_contiguous)
+                {
+                  for (unsigned int j=0; j<n_comp; ++j)
+                    dof_indices_contiguous[2][i*vectorization_length+j] =
+                      this->dof_indices[row_starts[(i*vectorization_length+j)*n_components].first];
+                }
+
+              if (indices_are_contiguous)
+                {
+                  index_storage_variants[2][i] = IndexStorageVariants::contiguous;
+                }
+              else
+                {
+                  const unsigned int *dof_indices =
+                    &this->dof_indices[row_starts[i*vectorization_length*n_components].first];
+                  if (n_comp == vectorization_length)
+                    index_storage_variants[2][i] = IndexStorageVariants::interleaved;
+                  else
+                    index_storage_variants[2][i] = IndexStorageVariants::full;
+                  for (unsigned int k=0; k<ndofs; ++k)
+                    for (unsigned int j=0; j<n_comp; ++j)
+                      if (dof_indices[j*ndofs+k] == dof_indices[k])
+                        {
+                          index_storage_variants[2][i] = IndexStorageVariants::full;
+                          break;
+                        }
+                  if (index_storage_variants[2][i] != IndexStorageVariants::full)
+                    {
+                      unsigned int *interleaved_dof_indices =
+                        &this->dof_indices_interleaved[row_starts[i*vectorization_length*n_components].first];
+                      for (unsigned int k=0; k<ndofs; ++k)
+                        for (unsigned int j=0; j<n_comp; ++j)
+                          interleaved_dof_indices[k*n_comp+j] = dof_indices[j*ndofs+k];
+                    }
+                }
+            }
+          index_kinds[static_cast<unsigned int>(index_storage_variants[2][i])]++;
+        }
+    }
+
+
+
+    template <int length>
+    void
+    DoFInfo::compute_face_index_compression
+    (const std::vector<FaceToCellTopology<length> > &faces)
+    {
+      AssertDimension(length, vectorization_length);
+
+      index_storage_variants[0].resize(faces.size(), IndexStorageVariants::full);
+      dof_indices_contiguous[0].resize(faces.size()*length, numbers::invalid_unsigned_int);
+      n_vectorization_lanes_filled[0].resize(faces.size());
+
+      // all interior faces come before the boundary faces
+      unsigned int n_plus_faces = 0;
+      for (; n_plus_faces < faces.size(); ++n_plus_faces)
+        if (faces[n_plus_faces].cells_exterior[0] == numbers::invalid_unsigned_int)
+          break;
+      index_storage_variants[1].resize(n_plus_faces, IndexStorageVariants::full);
+      dof_indices_contiguous[1].resize(n_plus_faces*length, numbers::invalid_unsigned_int);
+      n_vectorization_lanes_filled[1].resize(n_plus_faces);
+
+      for (unsigned int face=0; face<faces.size(); ++face)
+        {
+          auto face_computation = [&](const unsigned int  face_index,
+                                      const unsigned int *cell_indices_face)
+          {
+            bool is_contiguous = false;
+            bool needs_full_storage = false;
+            for (unsigned int v=0; v<length && cell_indices_face[v] !=
+                 numbers::invalid_unsigned_int; ++v)
+              {
+                n_vectorization_lanes_filled[face_index][face]++;
+                if (index_storage_variants[2][cell_indices_face[v]/length] ==
+                    IndexStorageVariants::contiguous)
+                  is_contiguous = true;
+                if (index_storage_variants[2][cell_indices_face[v]/length] <
+                    IndexStorageVariants::contiguous)
+                  needs_full_storage = true;
+              }
+            if (is_contiguous)
+              for (unsigned int v=0; v<n_vectorization_lanes_filled[face_index][face]; ++v)
+                dof_indices_contiguous[face_index][face*length+v] =
+                  dof_indices_contiguous[2][cell_indices_face[v]];
+            if (is_contiguous && !needs_full_storage)
+              index_storage_variants[face_index][face] = IndexStorageVariants::contiguous;
+            else
+              index_storage_variants[face_index][face] = IndexStorageVariants::full;
+          };
+
+          face_computation(0, faces[face].cells_interior);
+          if (face < n_plus_faces)
+            face_computation(1, faces[face].cells_exterior);
+        }
+    }
+
+
+
+    template <int length>
+    void
+    DoFInfo::compute_vector_zero_access_pattern
+    (const TaskInfo                                 &task_info,
+     const std::vector<FaceToCellTopology<length> > &faces)
+    {
+      // compute a list that tells us the first time a degree of freedom is
+      // touched by a cell
+      AssertDimension(length, vectorization_length);
+      const unsigned int n_components = start_components.back();
+      const unsigned int n_dofs = vector_partitioner->local_size() +
+                                  vector_partitioner->n_ghost_indices();
+      std::vector<unsigned int> touched_by((n_dofs+chunk_size_zero_vector-1)/
+                                           chunk_size_zero_vector,
+                                           numbers::invalid_unsigned_int);
+      for (unsigned int part = 0; part < task_info.partition_row_index.size()-2; ++part)
+        for (unsigned int chunk = task_info.partition_row_index[part];
+             chunk < task_info.partition_row_index[part+1]; ++chunk)
+          {
+            for (unsigned int cell = task_info.cell_partition_data[chunk];
+                 cell < task_info.cell_partition_data[chunk+1]; ++cell)
+              {
+                for (unsigned int it = row_starts[cell*vectorization_length*n_components].first;
+                     it != row_starts[(cell+1)*vectorization_length*n_components].first; ++it)
+                  {
+                    const unsigned int myindex = dof_indices[it]/chunk_size_zero_vector;
+                    if (touched_by[myindex]==numbers::invalid_unsigned_int)
+                      touched_by[myindex] = chunk;
+                  }
+              }
+            if (faces.size() > 0)
+              for (unsigned int face = task_info.face_partition_data[chunk];
+                   face < task_info.face_partition_data[chunk+1]; ++face)
+                for (unsigned int v=0; v<length &&
+                     faces[face].cells_exterior[v] != numbers::invalid_unsigned_int; ++v)
+                  {
+                    const unsigned int cell=faces[face].cells_exterior[v];
+                    for (unsigned int it = row_starts[cell*n_components].first;
+                         it != row_starts[(cell+1)*n_components].first; ++it)
+                      {
+                        const unsigned int myindex = dof_indices[it]/chunk_size_zero_vector;
+                        if (touched_by[myindex]==numbers::invalid_unsigned_int)
+                          touched_by[myindex] = chunk;
+                      }
+                  }
+          }
+
+      vector_zero_range_list_index.resize(1 + task_info.partition_row_index
+                                          [task_info.partition_row_index.size()-2],
+                                          numbers::invalid_unsigned_int);
+      std::map<unsigned int, std::vector<unsigned int> > chunk_must_zero_vector;
+      for (unsigned int i=0; i<touched_by.size(); ++i)
+        chunk_must_zero_vector[touched_by[i]].push_back(i);
+      vector_zero_range_list.clear();
+      vector_zero_range_list_index[0] = 0;
+      for (unsigned int chunk=0; chunk<vector_zero_range_list_index.size()-1; ++chunk)
+        {
+          auto it = chunk_must_zero_vector.find(chunk);
+          if (it != chunk_must_zero_vector.end())
+            {
+              for (unsigned int i : it->second)
+                vector_zero_range_list.push_back(i);
+              vector_zero_range_list_index[chunk+1] = vector_zero_range_list.size();
+            }
+          else
+            vector_zero_range_list_index[chunk+1] = vector_zero_range_list_index[chunk];
+        }
     }
 
 
@@ -660,6 +890,8 @@ no_constraint:
         }
       };
 
+
+
       // We construct the connectivity graph in parallel. we use one lock for
       // 256 degrees of freedom to keep the number of locks down to a
       // reasonable level and reduce the cost of locking to some extent.
@@ -672,13 +904,13 @@ no_constraint:
                                std::vector<unsigned int>   &row_lengths)
       {
         std::vector<unsigned int> scratch;
-        constexpr unsigned int n_components = 1;
+        const unsigned int n_components = dof_info.start_components.back();
         for (unsigned int block=begin; block<end; ++block)
           {
             scratch.clear();
             scratch.insert(scratch.end(),
-                           &dof_info.dof_indices[dof_info.row_starts[block*n_components][0]],
-                           &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components][0]]);
+                           &dof_info.dof_indices[dof_info.row_starts[block*n_components].first],
+                           &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components].first]);
             std::sort(scratch.begin(), scratch.end());
             std::vector<unsigned int>::const_iterator end_unique =
               std::unique(scratch.begin(), scratch.end());
@@ -707,13 +939,13 @@ no_constraint:
                                   dealii::SparsityPattern         &connectivity_dof)
       {
         std::vector<unsigned int> scratch;
-        const unsigned int n_components = 1;
+        const unsigned int n_components = dof_info.start_components.back();
         for (unsigned int block=begin; block<end; ++block)
           {
             scratch.clear();
             scratch.insert(scratch.end(),
-                           &dof_info.dof_indices[dof_info.row_starts[block*n_components][0]],
-                           &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components][0]]);
+                           &dof_info.dof_indices[dof_info.row_starts[block*n_components].first],
+                           &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components].first]);
             std::sort(scratch.begin(), scratch.end());
             std::vector<unsigned int>::const_iterator end_unique =
               std::unique(scratch.begin(), scratch.end());
@@ -738,14 +970,14 @@ no_constraint:
                              DynamicSparsityPattern          &connectivity)
       {
         ordered_vector row_entries;
-        const unsigned int n_components = 1;
+        const unsigned int n_components = dof_info.start_components.back();
         for (unsigned int block=begin; block < end; ++block)
           {
             row_entries.clear();
 
             const unsigned int
-            *it = &dof_info.dof_indices[dof_info.row_starts[block*n_components][0]],
-             *end_cell = &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components][0]];
+            *it = &dof_info.dof_indices[dof_info.row_starts[block*n_components].first],
+             *end_cell = &dof_info.dof_indices[dof_info.row_starts[(block+1)*n_components].first];
             for ( ; it != end_cell; ++it)
               {
                 SparsityPattern::iterator sp = connectivity_dof.begin(*it);
@@ -760,6 +992,7 @@ no_constraint:
     }
 
 
+
     void
     DoFInfo::make_connectivity_graph
     (const TaskInfo                  &task_info,
@@ -828,25 +1061,33 @@ no_constraint:
 
 
 
-    void DoFInfo::renumber_dofs (std::vector<types::global_dof_index> &renumbering)
+    void DoFInfo
+    ::compute_dof_renumbering (std::vector<types::global_dof_index> &renumbering)
     {
-      // first renumber all locally owned degrees of freedom
-      AssertDimension (vector_partitioner->local_size(),
-                       vector_partitioner->size());
       const unsigned int local_size = vector_partitioner->local_size();
       renumbering.resize (0);
       renumbering.resize (local_size, numbers::invalid_dof_index);
 
       types::global_dof_index counter = 0;
-      std::vector<unsigned int>::iterator dof_ind = dof_indices.begin(),
-                                          end_ind = dof_indices.end();
-      for ( ; dof_ind != end_ind; ++dof_ind)
+      const unsigned int n_components = start_components.back();
+      const unsigned int n_macro_cells = n_vectorization_lanes_filled[2].size();
+      Assert(n_macro_cells <= (row_starts.size()-1)/vectorization_length/n_components,
+             ExcInternalError());
+      for (unsigned int cell_no=0; cell_no<n_macro_cells; ++cell_no)
         {
-          if (*dof_ind < local_size)
+          // do not renumber in case we have constraints
+          if (row_starts[cell_no*n_components*vectorization_length].second ==
+              row_starts[(cell_no+1)*n_components*vectorization_length].second)
             {
-              if (renumbering[*dof_ind] == numbers::invalid_dof_index)
-                renumbering[*dof_ind] = counter++;
-              *dof_ind = renumbering[*dof_ind];
+              const unsigned int ndofs = dofs_per_cell.size() == 1 ? dofs_per_cell[0] :
+                                         (dofs_per_cell[cell_active_fe_index.size()>0 ?
+                                                        cell_active_fe_index[cell_no] : 0]);
+              const unsigned int *dof_ind = &dof_indices[row_starts[cell_no*n_components*vectorization_length].first];
+              for (unsigned int i=0; i<ndofs; ++i)
+                for (unsigned int j=0; j<n_vectorization_lanes_filled[2][cell_no]; ++j)
+                  if (dof_ind[j*ndofs+i]<local_size)
+                    if (renumbering[dof_ind[j*ndofs+i]] == numbers::invalid_dof_index)
+                      renumbering[dof_ind[j*ndofs+i]] = counter++;
             }
         }
 
@@ -855,19 +1096,6 @@ no_constraint:
         if (renumbering[i] == numbers::invalid_dof_index)
           renumbering[i] = counter++;
 
-      // adjust the constrained DoFs
-      std::vector<unsigned int> new_constrained_dofs (constrained_dofs.size());
-      for (std::size_t i=0; i<constrained_dofs.size(); ++i)
-        new_constrained_dofs[i] = renumbering[constrained_dofs[i]];
-
-      // the new constrained DoFs should be sorted already as they are not
-      // contained in dof_indices and then get contiguous numbers
-#ifdef DEBUG
-      for (std::size_t i=1; i<new_constrained_dofs.size(); ++i)
-        Assert (new_constrained_dofs[i] > new_constrained_dofs[i-1], ExcInternalError());
-#endif
-      std::swap (constrained_dofs, new_constrained_dofs);
-
       // transform indices to global index space
       for (std::size_t i=0; i<renumbering.size(); ++i)
         renumbering[i] = vector_partitioner->local_to_global(renumbering[i]);
@@ -881,7 +1109,7 @@ no_constraint:
     DoFInfo::memory_consumption () const
     {
       std::size_t memory = sizeof(*this);
-      memory += (row_starts.capacity()*sizeof(std::array<unsigned int,3>));
+      memory += (row_starts.capacity()*sizeof(std::pair<unsigned int,unsigned int>));
       memory += MemoryConsumption::memory_consumption (dof_indices);
       memory += MemoryConsumption::memory_consumption (row_starts_plain_indices);
       memory += MemoryConsumption::memory_consumption (plain_dof_indices);
@@ -899,7 +1127,7 @@ no_constraint:
     {
       out << "       Memory row starts indices:    ";
       task_info.print_memory_statistics
-      (out, (row_starts.capacity()*sizeof(std::array<unsigned int, 3>)));
+      (out, (row_starts.capacity()*sizeof(*row_starts.begin())));
       out << "       Memory dof indices:           ";
       task_info.print_memory_statistics
       (out, MemoryConsumption::memory_consumption (dof_indices));
@@ -926,16 +1154,18 @@ no_constraint:
       const unsigned int n_rows = row_starts.size() - 1;
       for (unsigned int row=0 ; row<n_rows ; ++row)
         {
+          if (row_starts[row].first == row_starts[row+1].first)
+            continue;
           out << "Entries row " << row << ": ";
-          const unsigned int *glob_indices = begin_indices(row),
-                              *end_row = end_indices(row);
+          const unsigned int *glob_indices = &dof_indices[row_starts[row].first],
+                              *end_row = &dof_indices[row_starts[row+1].first];
           unsigned int index = 0;
           const std::pair<unsigned short,unsigned short>
-          *con_it = begin_indicators(row),
-           * end_con = end_indicators(row);
+          *con_it = &constraint_indicator[row_starts[row].second],
+           * end_con = &constraint_indicator[row_starts[row+1].second];
           for ( ; con_it != end_con; ++con_it)
             {
-              for ( ; index<con_it->first; index++)
+              for (unsigned int j=0; j<con_it->first; ++j, ++index)
                 {
                   Assert (glob_indices+index != end_row, ExcInternalError());
                   out << glob_indices[index] << " ";
@@ -948,9 +1178,7 @@ no_constraint:
                 {
                   Assert (glob_indices+index != end_row, ExcInternalError());
                   out << glob_indices[index] << "/"
-                      << constraint_pool_data[k];
-                  if (k<constraint_pool_row_index[con_it->second+1]-1)
-                    out << " ";
+                      << constraint_pool_data[k] << " ";
                 }
               out << "] ";
             }
index 2001868d46467e04dec32acd7dea38397cd7b911..38ab4098030ed090c854415458cec6a177dd287b 100644 (file)
@@ -758,18 +758,22 @@ protected:
    * operation for several vectors at a time.
    */
   template <typename VectorType, typename VectorOperation>
-  void read_write_operation (const VectorOperation &operation,
-                             VectorType            *vectors[]) const;
+  void
+  read_write_operation (const VectorOperation &operation,
+                        VectorType            *vectors[],
+                        const bool             apply_constraints = true) const;
 
   /**
-   * For a collection of several vector @p src, read out the values on the
-   * degrees of freedom of the current cell for @p n_components (template
-   * argument), and store them internally. Similar functionality as the
-   * function DoFAccessor::read_dof_values. Note that if vectorization is
-   * enabled, the DoF values for several cells are set.
+   * A unified function to read from and write into vectors based on the given
+   * template operation for the case when we do not have an underlying
+   * MatrixFree object. It can perform the operation for @p read_dof_values,
+   * @p distribute_local_to_global, and @p set_dof_values. It performs the
+   * operation for several vectors at a time, depending on n_components.
    */
-  template <typename VectorType>
-  void read_dof_values_plain (const VectorType *src_data[]);
+  template <typename VectorType, typename VectorOperation>
+  void
+  read_write_operation_global (const VectorOperation &operation,
+                               VectorType            *vectors[]) const;
 
   /**
    * This is the general array for all data fields.
@@ -2338,9 +2342,10 @@ FEEvaluationBase<dim,n_components_,Number,is_face>
   :
   scratch_data_array (data_in.acquire_scratch_data()),
   quad_no            (quad_no_in),
-  n_fe_components    (data_in.get_dof_info(dof_no).n_components),
+  n_fe_components    (data_in.get_dof_info(dof_no).start_components.back()),
   active_fe_index    (fe_degree != numbers::invalid_unsigned_int ?
-                      data_in.get_dof_info(dof_no).fe_index_from_degree(fe_degree)
+                      data_in.get_dof_info(dof_no).fe_index_from_degree
+                      (first_selected_component, fe_degree)
                       :
                       0),
   active_quad_index  (fe_degree != numbers::invalid_unsigned_int ?
@@ -2363,8 +2368,9 @@ FEEvaluationBase<dim,n_components_,Number,is_face>
   dof_info           (&data_in.get_dof_info(dof_no)),
   mapping_data       (internal::MatrixFreeFunctions::MappingInfoCellsOrFaces<dim,Number,is_face>::get(data_in.get_mapping_info(), quad_no)),
   data               (&data_in.get_shape_info
-                      (dof_no, quad_no_in, active_fe_index,
-                       active_quad_index)),
+                      (dof_no, quad_no_in,
+                       dof_info->component_to_base_index[first_selected_component],
+                       active_fe_index, active_quad_index)),
   jacobian           (nullptr),
   J_value            (nullptr),
   normal_vectors     (nullptr),
@@ -2390,13 +2396,17 @@ FEEvaluationBase<dim,n_components_,Number,is_face>
                    n_quadrature_points);
   AssertDimension (n_quadrature_points,
                    mapping_data->descriptor[active_quad_index].n_q_points);
-  Assert (n_fe_components == 1 ||
-          n_components == 1 ||
-          n_components == n_fe_components,
-          ExcMessage ("The underlying FE is vector-valued. In this case, the "
-                      "template argument n_components must be a the same "
-                      "as the number of underlying vector components."));
-
+  Assert(dof_info->start_components.back() == 1 ||
+         (int)n_components_ <=
+         (int)dof_info->start_components[dof_info->component_to_base_index[first_selected_component]+1] - first_selected_component,
+         ExcMessage("You tried to construct a vector-valued evaluator with " +
+                    Utilities::to_string(n_components) + " components. However, "
+                    "the current base element has only " +
+                    Utilities::to_string(dof_info->start_components[dof_info->component_to_base_index[first_selected_component]+1] - first_selected_component)
+                    + " components left when starting from local element index " +
+                    Utilities::to_string(first_selected_component-dof_info->start_components[dof_info->component_to_base_index[first_selected_component]])
+                    + " (global index " + Utilities::to_string(first_selected_component)
+                    + ")"));
 
   // do not check for correct dimensions of data fields here, should be done
   // in derived classes
@@ -3020,9 +3030,9 @@ namespace internal
       write_pos = sum;
     }
 
-    void process_empty (Number &res) const
+    void process_empty (VectorizedArray<Number> &res) const
     {
-      res = Number();
+      res = VectorizedArray<Number>();
     }
   };
 
@@ -3100,7 +3110,7 @@ namespace internal
     {
     }
 
-    void process_empty (Number &) const
+    void process_empty (VectorizedArray<Number> &) const
     {
     }
   };
@@ -3163,7 +3173,7 @@ namespace internal
     {
     }
 
-    void process_empty (Number &) const
+    void process_empty (VectorizedArray<Number> &) const
     {
     }
   };
@@ -3247,350 +3257,293 @@ inline
 void
 FEEvaluationBase<dim,n_components_,Number,is_face>
 ::read_write_operation (const VectorOperation &operation,
-                        VectorType            *src[]) const
+                        VectorType            *src[],
+                        const bool             apply_constraints) const
 {
-  // This functions processes all the functions read_dof_values,
-  // distribute_local_to_global, and set_dof_values with the same code. The
-  // distinction between these three cases is made by the input
-  // VectorOperation that either reads values from a vector and puts the data
-  // into the local data field or write local data into the vector. Certain
-  // operations are no-ops for the given use case.
-
   // Case 1: No MatrixFree object given, simple case because we do not need to
   // process constraints and need not care about vectorization
   if (matrix_info == nullptr)
     {
-      Assert (!local_dof_indices.empty(), ExcNotInitialized());
-
-      unsigned int index = first_selected_component * this->data->dofs_per_component_on_cell;
-      for (unsigned int comp = 0; comp<n_components; ++comp)
-        {
-          for (unsigned int i=0; i<this->data->dofs_per_component_on_cell; ++i, ++index)
-            {
-              operation.process_dof_global(local_dof_indices[this->data->lexicographic_numbering[index]],
-                                           *src[0], values_dofs[comp][i][0]);
-              for (unsigned int v=1; v<VectorizedArray<Number>::n_array_elements; ++v)
-                operation.process_empty(values_dofs[comp][i][v]);
-            }
-        }
+      read_write_operation_global(operation, src);
       return;
     }
 
-  // Some standard checks
   Assert (dof_info != nullptr, ExcNotInitialized());
   Assert (matrix_info->indices_initialized() == true,
           ExcNotInitialized());
-  Assert (cell != numbers::invalid_unsigned_int, ExcNotInitialized());
 
-  // loop over all local dofs. ind_local holds local number on cell, index
-  // iterates over the elements of index_local_to_global and dof_indices
-  // points to the global indices stored in index_local_to_global
-  const unsigned int *dof_indices = dof_info->begin_indices(cell);
-  const std::pair<unsigned short,unsigned short> *indicators =
-    dof_info->begin_indicators(cell);
-  const std::pair<unsigned short,unsigned short> *indicators_end =
-    dof_info->end_indicators(cell);
-  unsigned int ind_local = 0;
+  constexpr unsigned int face_vector_access_index = 2;
+
+  const unsigned int n_vectorization = VectorizedArray<Number>::n_array_elements;
   const unsigned int dofs_per_component = this->data->dofs_per_component_on_cell;
+  if (dof_info->index_storage_variants[is_face ? face_vector_access_index : 2][cell] ==
+      internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::interleaved)
+    {
+      const unsigned int *dof_indices =
+        &dof_info->dof_indices_interleaved[dof_info->row_starts[cell*n_vectorization*n_fe_components+first_selected_component].first];
+      if (n_components == 1 || n_fe_components == 1)
+        for (unsigned int i=0; i<dofs_per_component; ++i, dof_indices += n_vectorization)
+          for (unsigned int comp=0; comp<n_components; ++comp)
+            operation.process_dof_gather (dof_indices, *src[comp],
+                                          values_dofs[comp][i],
+                                          std::integral_constant<bool, std::is_same<typename VectorType::value_type,Number>::value>());
+      else
+        for (unsigned int comp=0; comp<n_components; ++comp)
+          for (unsigned int i=0; i<dofs_per_component; ++i, dof_indices += n_vectorization)
+            operation.process_dof_gather (dof_indices,
+                                          *src[0], values_dofs[comp][i],
+                                          std::integral_constant<bool, std::is_same<typename VectorType::value_type,Number>::value>());
+      return;
+    }
 
-  const unsigned int n_irreg_components_filled = dof_info->row_starts[cell][2];
-  const bool at_irregular_cell = n_irreg_components_filled > 0;
+  const unsigned int *dof_indices[n_vectorization];
+  VectorizedArray<Number> **values_dofs =
+    const_cast<VectorizedArray<Number> * *>(&this->values_dofs[0]);
 
-  // scalar case (or case when all components have the same degrees of freedom
-  // and sit on a different vector each)
-  if (n_fe_components == 1)
+  unsigned int cells_copied[VectorizedArray<Number>::n_array_elements];
+  const unsigned int *cells;
+  unsigned int n_vectorization_actual =
+    dof_info->n_vectorization_lanes_filled[face_vector_access_index][cell];
+  bool has_constraints = false;
+  if (is_face)
     {
-      for (unsigned int c=0; c<n_components; ++c)
-        Assert(src[c] != nullptr,
-               ExcMessage("The finite element underlying this FEEvaluation "
-                          "object is scalar, but you requested " +
-                          std::to_string(n_components) +
-                          " components via the template argument in "
-                          "FEEvaluation. In that case, you must pass an "
-                          "std::vector<VectorType> or a BlockVector to " +
-                          "read_dof_values and distribute_local_to_global."));
-
-      const unsigned int n_local_dofs =
-        VectorizedArray<Number>::n_array_elements * dofs_per_component;
-      for (unsigned int comp=0; comp<n_components; ++comp)
-        internal::check_vector_compatibility (*src[comp], *dof_info);
-      Number *local_data [n_components];
-      for (unsigned int comp=0; comp<n_components; ++comp)
-        local_data[comp] =
-          const_cast<Number *>(&values_dofs[comp][0][0]);
-
-      // standard case where there are sufficiently many cells to fill all
-      // vectors
-      if (at_irregular_cell == false)
+      if (face_vector_access_index == 2)
+        for (unsigned int v=0; v<n_vectorization_actual; ++v)
+          cells_copied[v] = cell*VectorizedArray<Number>::n_array_elements+v;
+      cells =
+        face_vector_access_index == 2 ?
+        &cells_copied[0]
+        :
+        (is_interior_face ?
+         &this->matrix_info->get_face_info(cell).cells_interior[0] :
+         &this->matrix_info->get_face_info(cell).cells_exterior[0]);
+      for (unsigned int v=0; v<n_vectorization_actual; ++v)
         {
-          // check whether there is any constraint on the current cell
-          if (indicators != indicators_end)
-            {
-              for ( ; indicators != indicators_end; ++indicators)
-                {
-                  // run through values up to next constraint
-                  for (unsigned int j=0; j<indicators->first; ++j)
-                    for (unsigned int comp=0; comp<n_components; ++comp)
-                      operation.process_dof (dof_indices[j], *src[comp],
-                                             local_data[comp][ind_local+j]);
-
-                  ind_local += indicators->first;
-                  dof_indices   += indicators->first;
-
-                  // constrained case: build the local value as a linear
-                  // combination of the global value according to constraints
-                  Number value [n_components];
-                  for (unsigned int comp=0; comp<n_components; ++comp)
-                    operation.pre_constraints (local_data[comp][ind_local],
-                                               value[comp]);
-
-                  const Number *data_val =
-                    matrix_info->constraint_pool_begin(indicators->second);
-                  const Number *end_pool =
-                    matrix_info->constraint_pool_end(indicators->second);
-                  for ( ; data_val != end_pool; ++data_val, ++dof_indices)
-                    for (unsigned int comp=0; comp<n_components; ++comp)
-                      operation.process_constraint (*dof_indices, *data_val,
-                                                    *src[comp], value[comp]);
+          Assert(cells[v] < dof_info->row_starts.size()-1, ExcInternalError());
+          has_constraints = has_constraints &&
+                            dof_info->row_starts[cells[v]*n_fe_components+first_selected_component+n_components].second !=
+                            dof_info->row_starts[cells[v]*n_fe_components+first_selected_component].second;
+          dof_indices[v] = &dof_info->dof_indices[dof_info->row_starts[cells[v]*n_fe_components+first_selected_component].first];
+        }
+    }
+  else
+    {
+      AssertIndexRange((cell+1)*n_vectorization*n_fe_components, dof_info->row_starts.size());
+      const unsigned int n_components_read = n_fe_components > 1 ? n_components : 1;
+      for (unsigned int v=0; v<n_vectorization_actual; ++v)
+        {
+          if (dof_info->row_starts[(cell*n_vectorization+v)*n_fe_components+first_selected_component+n_components_read].second !=
+              dof_info->row_starts[(cell*n_vectorization+v)*n_fe_components+first_selected_component].second)
+            has_constraints = true;
+          dof_indices[v] = &dof_info->dof_indices[dof_info->row_starts[(cell*n_vectorization+v)*n_fe_components+first_selected_component].first];
+        }
+    }
 
-                  for (unsigned int comp=0; comp<n_components; ++comp)
-                    operation.post_constraints (value[comp],
-                                                local_data[comp][ind_local]);
+  // Case where we have no constraints throughout the whole cell: Can go
+  // through the list of DoFs directly
+  if (!has_constraints)
+    {
+      if (n_vectorization_actual < n_vectorization)
+        for (unsigned int comp=0; comp<n_components; ++comp)
+          for (unsigned int i=0; i<dofs_per_component; ++i)
+            operation.process_empty(values_dofs[comp][i]);
+      if (n_components == 1 || n_fe_components == 1)
+        {
+          for (unsigned int v=0; v<n_vectorization_actual; ++v)
+            for (unsigned int i=0; i<dofs_per_component; ++i)
+              for (unsigned int comp=0; comp<n_components; ++comp)
+                operation.process_dof (dof_indices[v][i], *src[comp],
+                                       values_dofs[comp][i][v]);
+        }
+      else
+        {
+          for (unsigned int comp=0; comp<n_components; ++comp)
+            for (unsigned int v=0; v<n_vectorization_actual; ++v)
+              for (unsigned int i=0; i<dofs_per_component; ++i)
+                operation.process_dof (dof_indices[v][comp*dofs_per_component+i],
+                                       *src[0], values_dofs[comp][i][v]);
+        }
+      return;
+    }
 
-                  ind_local++;
-                }
+  // In the case where there are some constraints to be resolved, loop over
+  // all vector components that are filled and then over local dofs. ind_local
+  // holds local number on cell, index iterates over the elements of
+  // index_local_to_global and dof_indices points to the global indices stored
+  // in index_local_to_global
+  if (n_vectorization_actual < n_vectorization)
+    for (unsigned int comp=0; comp<n_components; ++comp)
+      for (unsigned int i=0; i<dofs_per_component; ++i)
+        operation.process_empty(values_dofs[comp][i]);
+  for (unsigned int v=0; v<n_vectorization_actual; ++v)
+    {
+      unsigned int index_indicators, next_index_indicators;
+      const unsigned int n_components_read = n_fe_components > 1 ? n_components : 1;
+      if (is_face)
+        {
+          index_indicators = dof_info->row_starts[cells[v]*n_fe_components+first_selected_component].second;
+          next_index_indicators = dof_info->row_starts[cells[v]*n_fe_components+first_selected_component+1].second;
+        }
+      else
+        {
+          index_indicators = dof_info->row_starts[(cell*n_vectorization+v)*n_fe_components+first_selected_component].second;
+          next_index_indicators = dof_info->row_starts[(cell*n_vectorization+v)*n_fe_components+first_selected_component+1].second;
+        }
 
-              // get the dof values past the last constraint
-              for (; ind_local < n_local_dofs; ++dof_indices, ++ind_local)
-                {
-                  for (unsigned int comp=0; comp<n_components; ++comp)
-                    operation.process_dof (*dof_indices, *src[comp],
-                                           local_data[comp][ind_local]);
-                }
-            }
-          else
-            {
-              // no constraint at all: compiler can unroll at least the
-              // vectorization loop
-              AssertDimension (dof_info->end_indices(cell)-dof_indices,
-                               static_cast<int>(n_local_dofs));
-              for (unsigned int j=0, ind=0; j<dofs_per_component; ++j, ind += VectorizedArray<Number>::n_array_elements)
-                for (unsigned int comp=0; comp<n_components; ++comp)
-                  operation.process_dof_gather(dof_indices+ind,
-                                               *src[comp], values_dofs[comp][j],
-                                               std::integral_constant<bool, std::is_same<typename VectorType::value_type,Number>::value>());
-            }
+      if (apply_constraints == false &&
+          dof_info->row_starts[(cell*n_vectorization+v)*n_fe_components+first_selected_component].second !=
+          dof_info->row_starts[(cell*n_vectorization+v)*n_fe_components+first_selected_component+n_components_read].second)
+        {
+          Assert(dof_info->row_starts_plain_indices[cell*n_vectorization+v]
+                 != numbers::invalid_unsigned_int,
+                 ExcNotInitialized());
+          dof_indices[v] = is_face ?
+                           &dof_info->plain_dof_indices[dof_info->row_starts_plain_indices[cells[v]]]
+                           :
+                           &dof_info->plain_dof_indices[dof_info->row_starts_plain_indices[cell*n_vectorization+v]];
+          dof_indices[v] += dof_info->component_dof_indices_offset[active_fe_index][first_selected_component];
+          next_index_indicators = index_indicators;
         }
 
-      // non-standard case: need to fill in zeros for those components that
-      // are not present (a bit more expensive), but there is not more than
-      // one such cell
-      else
+      if (n_components == 1 || n_fe_components == 1)
         {
-          Assert (n_irreg_components_filled > 0, ExcInternalError());
-          for ( ; indicators != indicators_end; ++indicators)
+          for (unsigned int c=0; c<n_components; ++c)
+            Assert(src[c] != nullptr,
+                   ExcMessage("The finite element underlying this FEEvaluation "
+                              "object is scalar, but you requested " +
+                              std::to_string(n_components) +
+                              " components via the template argument in "
+                              "FEEvaluation. In that case, you must pass an "
+                              "std::vector<VectorType> or a BlockVector to " +
+                              "read_dof_values and distribute_local_to_global."));
+
+          unsigned int ind_local = 0;
+          for ( ; index_indicators != next_index_indicators; ++index_indicators)
             {
-              for (unsigned int j=0; j<indicators->first; ++j)
-                {
-                  // non-constrained case: copy the data from the global
-                  // vector, src, to the local one, local_src.
-                  for (unsigned int comp=0; comp<n_components; ++comp)
-                    operation.process_dof (dof_indices[j], *src[comp],
-                                           local_data[comp][ind_local]);
-
-                  // here we jump over all the components that are artificial
-                  ++ind_local;
-                  while (ind_local % VectorizedArray<Number>::n_array_elements
-                         >= n_irreg_components_filled)
-                    {
-                      for (unsigned int comp=0; comp<n_components; ++comp)
-                        operation.process_empty (local_data[comp][ind_local]);
-                      ++ind_local;
-                    }
-                }
-              dof_indices += indicators->first;
+              std::pair<unsigned short,unsigned short> indicator =
+                dof_info->constraint_indicator[index_indicators];
+              // run through values up to next constraint
+              for (unsigned int j=0; j<indicator.first; ++j)
+                for (unsigned int comp=0; comp<n_components; ++comp)
+                  operation.process_dof (dof_indices[v][j], *src[comp],
+                                         values_dofs[comp][ind_local+j][v]);
+
+              ind_local += indicator.first;
+              dof_indices[v] += indicator.first;
 
               // constrained case: build the local value as a linear
-              // combination of the global value according to constraint
+              // combination of the global value according to constraints
               Number value [n_components];
               for (unsigned int comp=0; comp<n_components; ++comp)
-                operation.pre_constraints (local_data[comp][ind_local],
+                operation.pre_constraints (values_dofs[comp][ind_local][v],
                                            value[comp]);
 
               const Number *data_val =
-                matrix_info->constraint_pool_begin(indicators->second);
+                matrix_info->constraint_pool_begin(indicator.second);
               const Number *end_pool =
-                matrix_info->constraint_pool_end(indicators->second);
-
-              for ( ; data_val != end_pool; ++data_val, ++dof_indices)
+                matrix_info->constraint_pool_end(indicator.second);
+              for ( ; data_val != end_pool; ++data_val, ++dof_indices[v])
                 for (unsigned int comp=0; comp<n_components; ++comp)
-                  operation.process_constraint (*dof_indices, *data_val,
+                  operation.process_constraint (*dof_indices[v], *data_val,
                                                 *src[comp], value[comp]);
 
               for (unsigned int comp=0; comp<n_components; ++comp)
                 operation.post_constraints (value[comp],
-                                            local_data[comp][ind_local]);
+                                            values_dofs[comp][ind_local][v]);
               ind_local++;
-              while (ind_local % VectorizedArray<Number>::n_array_elements
-                     >= n_irreg_components_filled)
-                {
-                  for (unsigned int comp=0; comp<n_components; ++comp)
-                    operation.process_empty (local_data[comp][ind_local]);
-                  ++ind_local;
-                }
             }
-          for (; ind_local<n_local_dofs; ++dof_indices)
-            {
-              Assert (dof_indices != dof_info->end_indices(cell),
-                      ExcInternalError());
 
-              // non-constrained case: copy the data from the global vector,
-              // src, to the local one, local_dst.
-              for (unsigned int comp=0; comp<n_components; ++comp)
-                operation.process_dof (*dof_indices, *src[comp],
-                                       local_data[comp][ind_local]);
-              ++ind_local;
-              while (ind_local % VectorizedArray<Number>::n_array_elements
-                     >= n_irreg_components_filled)
-                {
-                  for (unsigned int comp=0; comp<n_components; ++comp)
-                    operation.process_empty(local_data[comp][ind_local]);
-                  ++ind_local;
-                }
-            }
+          AssertIndexRange(ind_local, dofs_per_component+1);
+
+          for (; ind_local < dofs_per_component; ++dof_indices[v], ++ind_local)
+            for (unsigned int comp=0; comp<n_components; ++comp)
+              operation.process_dof (*dof_indices[v], *src[comp],
+                                     values_dofs[comp][ind_local][v]);
         }
-    }
-  else
-    // case with vector-valued finite elements where all components are
-    // included in one single vector. Assumption: first come all entries to
-    // the first component, then all entries to the second one, and so
-    // on. This is ensured by the way MatrixFree reads out the indices.
-    {
-      internal::check_vector_compatibility (*src[0], *dof_info);
-      Assert (n_fe_components == n_components_, ExcNotImplemented());
-      const unsigned int n_local_dofs =
-        dofs_per_component*VectorizedArray<Number>::n_array_elements * n_components;
-      Number   *local_data =
-        const_cast<Number *>(&values_dofs[0][0][0]);
-      if (at_irregular_cell == false)
+      else
         {
-          // check whether there is any constraint on the current cell
-          if (indicators != indicators_end)
+          // case with vector-valued finite elements where all components are
+          // included in one single vector. Assumption: first come all entries
+          // to the first component, then all entries to the second one, and
+          // so on. This is ensured by the way MatrixFree reads out the
+          // indices.
+          for (unsigned int comp=0; comp<n_components; ++comp)
             {
-              for ( ; indicators != indicators_end; ++indicators)
+              unsigned int ind_local = 0;
+
+              // check whether there is any constraint on the current cell
+              for ( ; index_indicators != next_index_indicators; ++index_indicators)
                 {
+                  std::pair<unsigned short,unsigned short> indicator =
+                    dof_info->constraint_indicator[index_indicators];
+
                   // run through values up to next constraint
-                  for (unsigned int j=0; j<indicators->first; ++j)
-                    operation.process_dof (dof_indices[j], *src[0],
-                                           local_data[ind_local+j]);
-                  ind_local += indicators->first;
-                  dof_indices   += indicators->first;
+                  for (unsigned int j=0; j<indicator.first; ++j)
+                    operation.process_dof (dof_indices[v][j], *src[0],
+                                           values_dofs[comp][ind_local+j][v]);
+                  ind_local      += indicator.first;
+                  dof_indices[v] += indicator.first;
 
                   // constrained case: build the local value as a linear
                   // combination of the global value according to constraints
                   Number value;
-                  operation.pre_constraints (local_data[ind_local], value);
+                  operation.pre_constraints (values_dofs[comp][ind_local][v], value);
 
                   const Number *data_val =
-                    matrix_info->constraint_pool_begin(indicators->second);
+                    matrix_info->constraint_pool_begin(indicator.second);
                   const Number *end_pool =
-                    matrix_info->constraint_pool_end(indicators->second);
+                    matrix_info->constraint_pool_end(indicator.second);
 
-                  for ( ; data_val != end_pool; ++data_val, ++dof_indices)
-                    operation.process_constraint (*dof_indices, *data_val,
+                  for ( ; data_val != end_pool; ++data_val, ++dof_indices[v])
+                    operation.process_constraint (*dof_indices[v], *data_val,
                                                   *src[0], value);
 
-                  operation.post_constraints (value, local_data[ind_local]);
+                  operation.post_constraints (value, values_dofs[comp][ind_local][v]);
                   ind_local++;
                 }
 
+              AssertIndexRange(ind_local, dofs_per_component+1);
+
               // get the dof values past the last constraint
-              for (; ind_local<n_local_dofs; ++dof_indices, ++ind_local)
-                operation.process_dof (*dof_indices, *src[0],
-                                       local_data[ind_local]);
-              Assert (dof_indices == dof_info->end_indices(cell),
-                      ExcInternalError());
-            }
-          else
-            {
-              // no constraint at all: compiler can unroll at least the
-              // vectorization loop
-              AssertDimension (dof_info->end_indices(cell)-dof_indices,
-                               static_cast<int>(n_local_dofs));
-              for (unsigned int comp=0, ind=0; comp<n_components; ++comp)
-                for (unsigned int j=0; j<dofs_per_component; ++j, ind += VectorizedArray<Number>::n_array_elements)
-                  operation.process_dof_gather(dof_indices+ind,
-                                               *src[0], values_dofs[comp][j],
-                                               std::integral_constant<bool, std::is_same<typename VectorType::value_type,Number>::value>());
-            }
-        }
+              for (; ind_local<dofs_per_component; ++dof_indices[v], ++ind_local)
+                {
+                  AssertIndexRange(*dof_indices[v], src[0]->size());
+                  operation.process_dof (*dof_indices[v], *src[0],
+                                         values_dofs[comp][ind_local][v]);
+                }
 
-      // non-standard case: need to fill in zeros for those components that
-      // are not present (a bit more expensive), but there is not more than
-      // one such cell
-      else
-        {
-          Assert (n_irreg_components_filled > 0, ExcInternalError());
-          for ( ; indicators != indicators_end; ++indicators)
-            {
-              for (unsigned int j=0; j<indicators->first; ++j)
+              if (apply_constraints == true)
                 {
-                  // non-constrained case: copy the data from the global
-                  // vector, src, to the local one, local_src.
-                  operation.process_dof (dof_indices[j], *src[0],
-                                         local_data[ind_local]);
-
-                  // here we jump over all the components that are artificial
-                  ++ind_local;
-                  while (ind_local % VectorizedArray<Number>::n_array_elements
-                         >= n_irreg_components_filled)
-                    {
-                      operation.process_empty (local_data[ind_local]);
-                      ++ind_local;
-                    }
+                  if (is_face)
+                    next_index_indicators = dof_info->row_starts[cells[v]*n_fe_components+first_selected_component+comp+2].second;
+                  else
+                    next_index_indicators = dof_info->row_starts[(cell*n_vectorization+v)*n_fe_components+first_selected_component+comp+2].second;
                 }
-              dof_indices += indicators->first;
+            }
+        }
+    }
+}
 
-              // constrained case: build the local value as a linear
-              // combination of the global value according to constraint
-              Number value;
-              operation.pre_constraints (local_data[ind_local], value);
 
-              const Number *data_val =
-                matrix_info->constraint_pool_begin(indicators->second);
-              const Number *end_pool =
-                matrix_info->constraint_pool_end(indicators->second);
 
-              for ( ; data_val != end_pool; ++data_val, ++dof_indices)
-                operation.process_constraint (*dof_indices, *data_val,
-                                              *src[0], value);
+template <int dim, int n_components_, typename Number, bool is_face>
+template <typename VectorType, typename VectorOperation>
+inline
+void
+FEEvaluationBase<dim,n_components_,Number,is_face>
+::read_write_operation_global (const VectorOperation &operation,
+                               VectorType            *src[]) const
+{
+  Assert (!local_dof_indices.empty(), ExcNotInitialized());
 
-              operation.post_constraints (value, local_data[ind_local]);
-              ind_local++;
-              while (ind_local % VectorizedArray<Number>::n_array_elements
-                     >= n_irreg_components_filled)
-                {
-                  operation.process_empty (local_data[ind_local]);
-                  ++ind_local;
-                }
-            }
-          for (; ind_local<n_local_dofs; ++dof_indices)
-            {
-              Assert (dof_indices != dof_info->end_indices(cell),
-                      ExcInternalError());
-
-              // non-constrained case: copy the data from the global vector,
-              // src, to the local one, local_dst.
-              operation.process_dof (*dof_indices, *src[0],
-                                     local_data[ind_local]);
-              ++ind_local;
-              while (ind_local % VectorizedArray<Number>::n_array_elements
-                     >= n_irreg_components_filled)
-                {
-                  operation.process_empty (local_data[ind_local]);
-                  ++ind_local;
-                }
-            }
+  unsigned int index = first_selected_component * data->dofs_per_component_on_cell;
+  for (unsigned int comp = 0; comp<n_components; ++comp)
+    {
+      for (unsigned int i=0; i<data->dofs_per_component_on_cell; ++i, ++index)
+        {
+          operation.process_empty(values_dofs[comp][i]);
+          operation.process_dof_global(local_dof_indices[data->lexicographic_numbering[index]],
+                                       *src[0], values_dofs[comp][i][0]);
         }
     }
 }
@@ -3613,7 +3566,7 @@ FEEvaluationBase<dim,n_components_,Number,is_face>
     src_data[d] = internal::BlockVectorSelector<VectorType, IsBlockVector<VectorType>::value>::get_vector_component(const_cast<VectorType &>(src), d+first_index);
 
   internal::VectorReader<Number> reader;
-  read_write_operation (reader, src_data);
+  read_write_operation (reader, src_data, true);
 
 #ifdef DEBUG
   dof_values_initialized = true;
@@ -3632,12 +3585,17 @@ FEEvaluationBase<dim,n_components_,Number,is_face>
 {
   // select between block vectors and non-block vectors. Note that the number
   // of components is checked in the internal data
-  const typename internal::BlockVectorSelector<VectorType,
-        IsBlockVector<VectorType>::value>::BaseVectorType *src_data[n_components];
+  typename internal::BlockVectorSelector<VectorType,
+           IsBlockVector<VectorType>::value>::BaseVectorType *src_data[n_components];
   for (unsigned int d=0; d<n_components; ++d)
     src_data[d] = internal::BlockVectorSelector<VectorType, IsBlockVector<VectorType>::value>::get_vector_component(const_cast<VectorType &>(src), d+first_index);
 
-  read_dof_values_plain (src_data);
+  internal::VectorReader<Number> reader;
+  read_write_operation (reader, src_data, false);
+
+#ifdef DEBUG
+  dof_values_initialized = true;
+#endif
 }
 
 
@@ -3690,141 +3648,6 @@ FEEvaluationBase<dim,n_components_,Number,is_face>
 
 
 
-template <int dim, int n_components_, typename Number, bool is_face>
-template <typename VectorType>
-inline
-void
-FEEvaluationBase<dim,n_components_,Number,is_face>
-::read_dof_values_plain (const VectorType *src[])
-{
-  // Case without MatrixFree initialization object
-  if (matrix_info == nullptr)
-    {
-      internal::VectorReader<Number> reader;
-      read_write_operation (reader, src);
-      return;
-    }
-
-  // this is different from the other three operations because we do not use
-  // constraints here, so this is a separate function.
-  Assert (dof_info != nullptr, ExcNotInitialized());
-  Assert (matrix_info->indices_initialized() == true,
-          ExcNotInitialized());
-  Assert (cell != numbers::invalid_unsigned_int, ExcNotInitialized());
-  Assert (dof_info->store_plain_indices == true, ExcNotInitialized());
-
-  // loop over all local dofs. ind_local holds local number on cell, index
-  // iterates over the elements of index_local_to_global and dof_indices
-  // points to the global indices stored in index_local_to_global
-  const unsigned int *dof_indices = dof_info->begin_indices_plain(cell);
-  const unsigned int dofs_per_component = this->data->dofs_per_component_on_cell;
-
-  const unsigned int n_irreg_components_filled = dof_info->row_starts[cell][2];
-  const bool at_irregular_cell = n_irreg_components_filled > 0;
-
-  // scalar case (or case when all components have the same degrees of freedom
-  // and sit on a different vector each)
-  if (n_fe_components == 1)
-    {
-      for (unsigned int c=0; c<n_components; ++c)
-        Assert(src[c] != nullptr,
-               ExcMessage("The finite element underlying this FEEvaluation "
-                          "object is scalar, but you requested " +
-                          std::to_string(n_components) +
-                          " components via the template argument in "
-                          "FEEvaluation. In that case, you must pass an "
-                          "std::vector<VectorType> or a BlockVector to " +
-                          "read_dof_values_plain."));
-
-      const unsigned int n_local_dofs =
-        VectorizedArray<Number>::n_array_elements * dofs_per_component;
-      for (unsigned int comp=0; comp<n_components; ++comp)
-        internal::check_vector_compatibility (*src[comp], *dof_info);
-      Number *local_src_number [n_components];
-      for (unsigned int comp=0; comp<n_components; ++comp)
-        local_src_number[comp] = &values_dofs[comp][0][0];
-
-      // standard case where there are sufficiently many cells to fill all
-      // vectors
-      if (at_irregular_cell == false)
-        {
-          for (unsigned int j=0; j<n_local_dofs; ++j)
-            for (unsigned int comp=0; comp<n_components; ++comp)
-              local_src_number[comp][j] =
-                internal::vector_access (*src[comp], dof_indices[j]);
-        }
-
-      // non-standard case: need to fill in zeros for those components that
-      // are not present (a bit more expensive), but there is not more than
-      // one such cell
-      else
-        {
-          Assert (n_irreg_components_filled > 0, ExcInternalError());
-          for (unsigned int ind_local=0; ind_local<n_local_dofs;
-               ++dof_indices)
-            {
-              // non-constrained case: copy the data from the global vector,
-              // src, to the local one, local_dst.
-              for (unsigned int comp=0; comp<n_components; ++comp)
-                local_src_number[comp][ind_local] =
-                  internal::vector_access (*src[comp], *dof_indices);
-              ++ind_local;
-              while (ind_local % VectorizedArray<Number>::n_array_elements >= n_irreg_components_filled)
-                {
-                  for (unsigned int comp=0; comp<n_components; ++comp)
-                    local_src_number[comp][ind_local] = 0.;
-                  ++ind_local;
-                }
-            }
-        }
-    }
-  else
-    // case with vector-valued finite elements where all components are
-    // included in one single vector. Assumption: first come all entries to
-    // the first component, then all entries to the second one, and so
-    // on. This is ensured by the way MatrixFree reads out the indices.
-    {
-      internal::check_vector_compatibility (*src[0], *dof_info);
-      Assert (n_fe_components == n_components_, ExcNotImplemented());
-      const unsigned int n_local_dofs =
-        dofs_per_component * VectorizedArray<Number>::n_array_elements * n_components;
-      Number *local_src_number = &values_dofs[0][0][0];
-      if (at_irregular_cell == false)
-        {
-          for (unsigned int j=0; j<n_local_dofs; ++j)
-            local_src_number[j] =
-              internal::vector_access (*src[0], dof_indices[j]);
-        }
-
-      // non-standard case: need to fill in zeros for those components that
-      // are not present (a bit more expensive), but there is not more than
-      // one such cell
-      else
-        {
-          Assert (n_irreg_components_filled > 0, ExcInternalError());
-          for (unsigned int ind_local=0; ind_local<n_local_dofs; ++dof_indices)
-            {
-              // non-constrained case: copy the data from the global vector,
-              // src, to the local one, local_dst.
-              local_src_number[ind_local] =
-                internal::vector_access (*src[0], *dof_indices);
-              ++ind_local;
-              while (ind_local % VectorizedArray<Number>::n_array_elements >= n_irreg_components_filled)
-                {
-                  local_src_number[ind_local] = 0.;
-                  ++ind_local;
-                }
-            }
-        }
-    }
-
-#ifdef DEBUG
-  dof_values_initialized = true;
-#endif
-}
-
-
-
 
 /*------------------------------ access to data fields ----------------------*/
 
@@ -5693,13 +5516,14 @@ FEEvaluation<dim,fe_degree,n_q_points_1d,n_components_,Number>
             }
           else
             for (unsigned int no=0; no<this->matrix_info->n_components(); ++no)
-              if (this->matrix_info->get_shape_info(no,0,this->active_fe_index,0).fe_degree
-                  == static_cast<unsigned int>(fe_degree))
-                {
-                  proposed_dof_comp = no;
-                  proposed_fe_comp = 0;
-                  break;
-                }
+              for (unsigned int nf=0; nf<this->matrix_info->n_base_elements(no); ++nf)
+                if (this->matrix_info->get_shape_info(no,0,nf,this->active_fe_index,0).fe_degree
+                    == static_cast<unsigned int>(fe_degree))
+                  {
+                    proposed_dof_comp = no;
+                    proposed_fe_comp = nf;
+                    break;
+                  }
           if (n_q_points ==
               this->mapping_data->descriptor[this->active_quad_index].n_q_points)
             proposed_quad_comp = this->quad_no;
@@ -5778,12 +5602,8 @@ FEEvaluation<dim,fe_degree,n_q_points_1d,n_components_,Number>
               ExcMessage(message));
     }
   if (dof_no != numbers::invalid_unsigned_int)
-    {
-      AssertDimension (n_q_points,
-                       this->mapping_data->descriptor[this->active_quad_index].n_q_points);
-      AssertDimension (this->data->dofs_per_component_on_cell * this->n_fe_components,
-                       this->dof_info->dofs_per_cell[this->active_fe_index]);
-    }
+    AssertDimension (n_q_points,
+                     this->mapping_data->descriptor[this->active_quad_index].n_q_points);
 #endif
 }
 
index 68d57544c974c1f59e426022951a4a74add0b8da..9f0e1beee62b10e9cf847289e1d92121d67e4d2b 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <deal.II/base/aligned_vector.h>
 #include <deal.II/base/exceptions.h>
-#include <deal.II/base/parallel.h>
 #include <deal.II/base/quadrature.h>
 #include <deal.II/base/vectorization.h>
 #include <deal.II/base/thread_local_storage.h>
@@ -32,6 +31,7 @@
 #include <deal.II/lac/block_vector_base.h>
 #include <deal.II/lac/constraint_matrix.h>
 #include <deal.II/dofs/dof_handler.h>
+#include <deal.II/grid/grid_tools.h>
 #include <deal.II/hp/dof_handler.h>
 #include <deal.II/hp/q_collection.h>
 #include <deal.II/matrix_free/task_info.h>
@@ -104,6 +104,16 @@ template <int dim, typename Number=double>
 class MatrixFree : public Subscriptor
 {
 public:
+  /**
+   * A typedef for the underlying number type specified by the template
+   * argument.
+   */
+  typedef Number            value_type;
+
+  /**
+   * The dimension set by the template argument `dim`.
+   */
+  static const unsigned int dimension = dim;
 
   /**
    * Collects the options for initialization of the MatrixFree class. The
@@ -125,13 +135,31 @@ public:
    * class should also allow for access to vectors without resolving
    * constraints.
    *
-   * The last two parameters allow the user to disable some of the
-   * initialization processes. For example, if only the scheduling that avoids
-   * touching the same vector/matrix indices simultaneously is to be found,
-   * the mapping needs not be initialized. Likewise, if the mapping has
-   * changed from one iteration to the next but the topology has not (like
-   * when using a deforming mesh with MappingQEulerian), it suffices to
-   * initialize the mapping only.
+   * The two parameters `initialize_indices` and `initialize_mapping` allow
+   * the user to disable some of the initialization processes. For example, if
+   * only the scheduling that avoids touching the same vector/matrix indices
+   * simultaneously is to be found, the mapping needs not be
+   * initialized. Likewise, if the mapping has changed from one iteration to
+   * the next but the topology has not (like when using a deforming mesh with
+   * MappingQEulerian), it suffices to initialize the mapping only.
+   *
+   * The two parameters `cell_vectorization_categories` and
+   * `cell_vectorization_categories_strict` control the formation of batches
+   * for vectorization over several cells. It is used implicitly when working
+   * with hp adaptivity but can also be useful in other contexts, such as in
+   * local time stepping where one would like to control which elements
+   * together form a batch of cells. The array `cell_vectorization_categories`
+   * is accessed by the number given by cell->active_cell_index() when working
+   * on the active cells with `level_mg_handler` set to `-1` and by
+   * cell->index() for the level cells. By default, the different categories
+   * in `cell_vectorization_category` can be mixed and the algorithm is
+   * allowed to merge lower category numbers with the next higher categories
+   * if it is necessary inside the algorithm, in order to avoid partially
+   * filled SIMD lanes as much as possible. This gives a better utilization of
+   * the vectorization but might need special treatment, in particular for
+   * face integrals. If set to @p true, the algorithm will instead keep
+   * different categories separate and not mix them in a single vectorized
+   * array.
    */
   struct AdditionalData
   {
@@ -145,20 +173,20 @@ public:
       /**
        * Perform application in serial.
        */
-      none,
+      none = internal::MatrixFreeFunctions::TaskInfo::none,
       /**
        * Partition the cells into two levels and afterwards form chunks.
        */
-      partition_partition,
+      partition_partition = internal::MatrixFreeFunctions::TaskInfo::partition_partition,
       /**
        * Partition on the global level and color cells within the partitions.
        */
-      partition_color,
+      partition_color = internal::MatrixFreeFunctions::TaskInfo::partition_color,
       /**
        * Use the traditional coloring algorithm: this is like
        * TasksParallelScheme::partition_color, but only uses one partition.
        */
-      color
+      color = internal::MatrixFreeFunctions::TaskInfo::color
     };
 
     /**
@@ -167,21 +195,32 @@ public:
     AdditionalData (const TasksParallelScheme tasks_parallel_scheme = partition_partition,
                     const unsigned int        tasks_block_size   = 0,
                     const UpdateFlags         mapping_update_flags  = update_gradients | update_JxW_values,
-                    const unsigned int level_mg_handler = numbers::invalid_unsigned_int,
+                    const UpdateFlags         mapping_update_flags_boundary_faces = update_default,
+                    const UpdateFlags         mapping_update_flags_inner_faces = update_default,
+                    const UpdateFlags         mapping_update_flags_faces_by_cells = update_default,
+                    const unsigned int        level_mg_handler = numbers::invalid_unsigned_int,
                     const bool                store_plain_indices = true,
                     const bool                initialize_indices = true,
-                    const bool                initialize_mapping = true)
+                    const bool                initialize_mapping = true,
+                    const bool                overlap_communication_computation = true,
+                    const bool                hold_all_faces_to_owned_cells = false,
+                    const bool                cell_vectorization_categories_strict = false)
       :
       tasks_parallel_scheme (tasks_parallel_scheme),
       tasks_block_size      (tasks_block_size),
       mapping_update_flags  (mapping_update_flags),
+      mapping_update_flags_boundary_faces (mapping_update_flags_boundary_faces),
+      mapping_update_flags_inner_faces (mapping_update_flags_inner_faces),
+      mapping_update_flags_faces_by_cells (mapping_update_flags_faces_by_cells),
       level_mg_handler      (level_mg_handler),
       store_plain_indices   (store_plain_indices),
       initialize_indices    (initialize_indices),
-      initialize_mapping    (initialize_mapping)
+      initialize_mapping    (initialize_mapping),
+      overlap_communication_computation(overlap_communication_computation),
+      hold_all_faces_to_owned_cells(hold_all_faces_to_owned_cells),
+      cell_vectorization_categories_strict(cell_vectorization_categories_strict)
     {};
 
-
     /**
      * Set the scheme for task parallelism. There are four options available.
      * If set to @p none, the operator application is done in serial without
@@ -224,7 +263,7 @@ public:
     unsigned int        tasks_block_size;
 
     /**
-     * This flag is used to determine which quantities should be cached. This
+     * This flag determines the mapping data on cells that is cached. This
      * class can cache data needed for gradient computations (inverse
      * Jacobians), Jacobian determinants (JxW), quadrature points as well as
      * data for Hessians (derivative of Jacobians). By default, only data for
@@ -236,6 +275,66 @@ public:
      */
     UpdateFlags         mapping_update_flags;
 
+    /**
+     * This flag determines the mapping data on boundary faces to be
+     * cached. Note that MatrixFree uses a separate loop layout for face
+     * integrals in order to effectively vectorize also in the case of hanging
+     * nodes (which require different subface settings on the two sides) or
+     * some cells in the batch of a VectorizedArray of cells that are adjacent
+     * to the boundary and others that are not.
+     *
+     * If set to a value different from update_general (default), the face
+     * information is explicitly built. Currently, MatrixFree supports to
+     * cache the following data on faces: inverse Jacobians, Jacobian
+     * determinants (JxW), quadrature points, data for Hessians (derivative of
+     * Jacobians), and normal vectors.
+     */
+    UpdateFlags         mapping_update_flags_boundary_faces;
+
+    /**
+     * This flag determines the mapping data on interior faces to be
+     * cached. Note that MatrixFree uses a separate loop layout for face
+     * integrals in order to effectively vectorize also in the case of hanging
+     * nodes (which require different subface settings on the two sides) or
+     * some cells in the batch of a VectorizedArray of cells that are adjacent
+     * to the boundary and others that are not.
+     *
+     * If set to a value different from update_general (default), the face
+     * information is explicitly built. Currently, MatrixFree supports to
+     * cache the following data on faces: inverse Jacobians, Jacobian
+     * determinants (JxW), quadrature points, data for Hessians (derivative of
+     * Jacobians), and normal vectors.
+     */
+    UpdateFlags         mapping_update_flags_inner_faces;
+
+    /**
+     * This flag determines the mapping data for faces in a different layout
+     * with respect to vectorizations. Whereas
+     * `mapping_update_flags_inner_faces` and
+     * `mapping_update_flags_boundary_faces` trigger building the data in a
+     * face-centric way with proper vectorization, the current data field
+     * attaches the face information to the cells and their way of
+     * vectorization. This is only needed in special situations, as for
+     * example for block-Jacobi methods where the full operator to a cell
+     * including its faces are evaluated. This data is accessed by
+     * <code>FEFaceEvaluation::reinit(cell_batch_index,
+     * face_number)</code>. However, currently no coupling terms to neighbors
+     * can be computed with this approach because the neighbors are not laid
+     * out by the VectorizedArray data layout with an
+     * array-of-struct-of-array-type data structures.
+     *
+     * Note that you should only compute this data field in case you really
+     * need it as it more than doubles the memory required by the mapping data
+     * on faces.
+     *
+     * If set to a value different from update_general (default), the face
+     * information is explicitly built. Currently, MatrixFree supports to
+     * cache the following data on faces: inverse Jacobians, Jacobian
+     * determinants (JxW), quadrature points, data for Hessians (derivative of
+     * Jacobians), and normal vectors.
+     */
+    UpdateFlags         mapping_update_flags_faces_by_cells;
+
     /**
      * This option can be used to define whether we work on a certain level of
      * the mesh, and not the active cells. If set to invalid_unsigned_int
@@ -255,22 +354,76 @@ public:
     bool                store_plain_indices;
 
     /**
-     * Option to control whether the indices stored in the DoFHandler should
-     * be read and the pattern for task parallelism should be set up in the
-     * initialize method of MatrixFree. Defaults to true. Can be disabled in
-     * case the mapping should be recomputed (e.g. when using a deforming mesh
-     * described through MappingEulerian) but the topology of cells has
-     * remained the same.
+     * Option to control whether the indices stored in the DoFHandler
+     * should be read and the pattern for task parallelism should be
+     * set up in the initialize method of MatrixFree. The default
+     * value is true. Can be disabled in case the mapping should be
+     * recomputed (e.g. when using a deforming mesh described through
+     * MappingEulerian) but the topology of cells has remained the
+     * same.
      */
     bool                initialize_indices;
 
     /**
-     * Option to control whether the mapping information should be computed in
-     * the initialize method of MatrixFree. Defaults to true. Can be disabled
-     * when only some indices should be set up (e.g. when only a set of
-     * independent cells should be computed).
+     * Option to control whether the mapping information should be
+     * computed in the initialize method of MatrixFree. The default
+     * value is true. Can be disabled when only some indices should be
+     * set up (e.g. when only a set of independent cells should be
+     * computed).
      */
     bool                initialize_mapping;
+
+    /**
+     * Option to control whether the loops should overlap communications and
+     * computations as far as possible in case the vectors passed to the loops
+     * support non-blocking data exchange. In most situations, overlapping is
+     * faster in case the amount of data to be sent is more than a few
+     * kilobytes. If less data is sent, the communication is latency bound on
+     * most clusters (point-to-point latency is around 1 microsecond on good
+     * clusters by 2016 standards). Depending on the MPI implementation and
+     * the fabric, it may be faster to not overlap and wait for the data to
+     * arrive. The default is true, i.e., communication and computation are
+     * overlapped.
+     **/
+    bool                overlap_communication_computation;
+
+    /**
+     * By default, the face part will only hold those faces (and ghost
+     * elements behind faces) that are going to be processed locally. In case
+     * MatrixFree should have access to all neighbors on locally owned cells,
+     * this option enables adding the respective faces at the end of the face
+     * range.
+     **/
+    bool                hold_all_faces_to_owned_cells;
+
+    /**
+     * This data structure allows to assign a fraction of cells to different
+     * categories when building the information for vectorization. It is used
+     * implicitly when working with hp adaptivity but can also be useful in
+     * other contexts, such as in local time stepping where one would like to
+     * control which elements together form a batch of cells.
+     *
+     * This array is accessed by the number given by cell->active_cell_index()
+     * when working on the active cells with @p level_mg_handler set to -1 and
+     * by cell->index() for the level cells.
+     *
+     * @note This field is empty upon construction of AdditionalData. It is
+     * the responsibility of the user to resize this field to
+     * `triangulation.n_active_cells()` or `triangulation.n_cells(level)` when
+     * filling data.
+     */
+    std::vector<unsigned int> cell_vectorization_category;
+
+    /**
+     * By default, the different categories in @p cell_vectorization_category
+     * can be mixed and the algorithm is allowed to merge lower categories with
+     * the next higher categories if it is necessary inside the algorithm. This
+     * gives a better utilization of the vectorization but might need special
+     * treatment, in particular for face integrals. If set to @p true, the
+     * algorithm will instead keep different categories separate and not mix
+     * them in a single vectorized array.
+     */
+    bool cell_vectorization_categories_strict;
   };
 
   /**
@@ -361,7 +514,7 @@ public:
                const std::vector<const DoFHandlerType *>   &dof_handler,
                const std::vector<const ConstraintMatrix *> &constraint,
                const std::vector<QuadratureType>           &quad,
-               const AdditionalData                        additional_data = AdditionalData());
+               const AdditionalData                         additional_data = AdditionalData());
 
   /**
    * Initializes the data structures. Same as above, but  using a $Q_1$
@@ -371,7 +524,7 @@ public:
   void reinit (const std::vector<const DoFHandlerType *>   &dof_handler,
                const std::vector<const ConstraintMatrix *> &constraint,
                const std::vector<QuadratureType>           &quad,
-               const AdditionalData                        additional_data = AdditionalData());
+               const AdditionalData                         additional_data = AdditionalData());
 
   /**
    * Same as above.
@@ -401,7 +554,7 @@ public:
                const std::vector<const DoFHandlerType *>   &dof_handler,
                const std::vector<const ConstraintMatrix *> &constraint,
                const QuadratureType                        &quad,
-               const AdditionalData                        additional_data = AdditionalData());
+               const AdditionalData                         additional_data = AdditionalData());
 
   /**
    * Initializes the data structures. Same as above, but  using a $Q_1$
@@ -411,7 +564,7 @@ public:
   void reinit (const std::vector<const DoFHandlerType *>   &dof_handler,
                const std::vector<const ConstraintMatrix *> &constraint,
                const QuadratureType                        &quad,
-               const AdditionalData                        additional_data = AdditionalData());
+               const AdditionalData                         additional_data = AdditionalData());
 
   /**
    * Copy function. Creates a deep copy of all data structures. It is usually
@@ -434,16 +587,40 @@ public:
   //@{
   /**
    * This method runs the loop over all cells (in parallel) and performs the
-   * MPI data exchange on the source vector and destination vector. The first
-   * argument indicates a function object that has the following signature:
-   * <code>cell_operation (const MatrixFree<dim,Number> &, OutVector &,
-   * InVector &, std::pair<unsigned int,unsigned int> &)</code>, where the
-   * first argument passes the data of the calling class and the last argument
-   * defines the range of cells which should be worked on (typically more than
-   * one cell should be worked on in order to reduce overheads).  One can pass
-   * a pointer to an object in this place if it has an <code>operator()</code>
-   * with the correct set of arguments since such a pointer can be converted
-   * to the function object.
+   * MPI data exchange on the source vector and destination vector.
+   *
+   * @param cell_operation `std::function` with the signature `cell_operation
+   * (const MatrixFree<dim,Number> &, OutVector &, InVector &,
+   * std::pair<unsigned int,unsigned int> &)` where the first argument passes
+   * the data of the calling class and the last argument defines the range of
+   * cells which should be worked on (typically more than one cell should be
+   * worked on in order to reduce overheads).  One can pass a pointer to an
+   * object in this place if it has an <code>operator()</code> with the
+   * correct set of arguments since such a pointer can be converted to the
+   * function object.
+   *
+   * @param dst Destination vector holding the result. If the vector is of
+   * type LinearAlgebra::distributed::Vector (or composite objects thereof
+   * such as LinearAlgebra::distributed::BlockVector), the loop calls
+   * LinearAlgebra::distributed::Vector::compress() at the end of the call
+   * internally.
+   *
+   * @param src Input vector. If the vector is of type
+   * LinearAlgebra::distributed::Vector (or composite objects thereof such as
+   * LinearAlgebra::distributed::BlockVector), the loop calls
+   * LinearAlgebra::distributed::Vector::update_ghost_values() at the start of
+   * the call internally to make sure all necessary data is locally
+   * available. Note, however, that the vector is reset to its original state
+   * at the end of the loop, i.e., if the vector was not ghosted upon entry of
+   * the loop, it will not be ghosted upon finishing the loop.
+   *
+   * @param zero_dst_vector If this flag is set to `true`, the vector `dst`
+   * will be set to zero inside the loop. Use this case in case you perform a
+   * typical `vmult()` operation on a matrix object, as it will typically be
+   * faster than calling `dst = 0;` before the loop separately. This is
+   * because the vector entries are set to zero only on subranges of the
+   * vector, making sure that the vector entries stay in caches as much as
+   * possible.
    */
   template <typename OutVector, typename InVector>
   void cell_loop (const std::function<void (const MatrixFree<dim,Number> &,
@@ -452,39 +629,352 @@ public:
                                             const std::pair<unsigned int,
                                             unsigned int> &)> &cell_operation,
                   OutVector      &dst,
-                  const InVector &src) const;
+                  const InVector &src,
+                  const bool      zero_dst_vector = false) const;
 
   /**
    * This is the second variant to run the loop over all cells, now providing
-   * a function pointer to a member function of class @p CLASS with the
-   * signature <code>cell_operation (const MatrixFree<dim,Number> &, OutVector
-   * &, InVector &, std::pair<unsigned int,unsigned int>&)const</code>. This
-   * method obviates the need to call std::bind to bind the class into
-   * the given function in case the local function needs to access data in the
-   * class (i.e., it is a non-static member function).
+   * a function pointer to a member function of class `CLASS`. This method
+   * obviates the need to call std::bind to bind the class into the given
+   * function in case the local function needs to access data in the class
+   * (i.e., it is a non-static member function).
+   *
+   * @param cell_operation Pointer to member function of `CLASS` with the
+   * signature `cell_operation (const MatrixFree<dim,Number> &, OutVector &,
+   * InVector &, std::pair<unsigned int,unsigned int> &)` where the first
+   * argument passes the data of the calling class and the last argument
+   * defines the range of cells which should be worked on (typically more than
+   * one cell should be worked on in order to reduce overheads).
+   *
+   * @param owning class The object which provides the `cell_operation`
+   * call. To be compatible with this interface, the class must allow to call
+   * `owning_class->cell_operation(...)`.
+   *
+   * @param dst Destination vector holding the result. If the vector is of
+   * type LinearAlgebra::distributed::Vector (or composite objects thereof
+   * such as LinearAlgebra::distributed::BlockVector), the loop calls
+   * LinearAlgebra::distributed::Vector::compress() at the end of the call
+   * internally.
+   *
+   * @param src Input vector. If the vector is of type
+   * LinearAlgebra::distributed::Vector (or composite objects thereof such as
+   * LinearAlgebra::distributed::BlockVector), the loop calls
+   * LinearAlgebra::distributed::Vector::update_ghost_values() at the start of
+   * the call internally to make sure all necessary data is locally
+   * available. Note, however, that the vector is reset to its original state
+   * at the end of the loop, i.e., if the vector was not ghosted upon entry of
+   * the loop, it will not be ghosted upon finishing the loop.
+   *
+   * @param zero_dst_vector If this flag is set to `true`, the vector `dst`
+   * will be set to zero inside the loop. Use this case in case you perform a
+   * typical `vmult()` operation on a matrix object, as it will typically be
+   * faster than calling `dst = 0;` before the loop separately. This is
+   * because the vector entries are set to zero only on subranges of the
+   * vector, making sure that the vector entries stay in caches as much as
+   * possible.
    */
   template <typename CLASS, typename OutVector, typename InVector>
-  void cell_loop (void (CLASS::*function_pointer)(const MatrixFree &,
-                                                  OutVector &,
-                                                  const InVector &,
-                                                  const std::pair<unsigned int,
-                                                  unsigned int> &)const,
+  void cell_loop (void (CLASS::*cell_operation)(const MatrixFree &,
+                                                OutVector &,
+                                                const InVector &,
+                                                const std::pair<unsigned int,
+                                                unsigned int> &)const,
                   const CLASS    *owning_class,
                   OutVector      &dst,
-                  const InVector &src) const;
+                  const InVector &src,
+                  const bool      zero_dst_vector = false) const;
 
   /**
    * Same as above, but for class member functions which are non-const.
    */
   template <typename CLASS, typename OutVector, typename InVector>
-  void cell_loop (void (CLASS::*function_pointer)(const MatrixFree &,
-                                                  OutVector &,
-                                                  const InVector &,
-                                                  const std::pair<unsigned int,
-                                                  unsigned int> &),
+  void cell_loop (void (CLASS::*cell_operation)(const MatrixFree &,
+                                                OutVector &,
+                                                const InVector &,
+                                                const std::pair<unsigned int,
+                                                unsigned int> &),
                   CLASS          *owning_class,
                   OutVector      &dst,
-                  const InVector &src) const;
+                  const InVector &src,
+                  const bool      zero_dst_vector = false) const;
+
+  /**
+   * This class defines the type of data access for face integrals that is
+   * passed on to the `update_ghost_values` and `compress` functions of the
+   * parallel vectors, with the purpose of being able to reduce the amount of
+   * data that must be exchanged. The data exchange is a real bottleneck in
+   * particular for high-degree DG methods, therefore a more restrictive way
+   * of exchange is clearly beneficial. Note that this selection applies to
+   * FEFaceEvaluation objects assigned to the exterior side of cells accessing
+   * `FaceToCellTopology::exterior_cells` only; all <i>interior</i> objects
+   * are available in any case.
+   */
+  enum class DataAccessOnFaces
+  {
+    /**
+     * The loop does not involve any FEFaceEvaluation access into neighbors,
+     * as is the case with only boundary integrals (but no interior face
+     * integrals) or when doing mass matrices in a MatrixFree::cell_loop()
+     * like setup.
+     */
+    none,
+
+    /**
+     * The loop does only involve FEFaceEvaluation access into neighbors by
+     * function values, such as `FEFaceEvaluation::gather_evaluate(src, true,
+     * false);`, but no access to shape function derivatives (which typically
+     * need to access more data). For FiniteElement types where only some of
+     * the shape functions have support on a face, such as an FE_DGQ element
+     * with Lagrange polynomials with nodes on the element surface, the data
+     * exchange is reduced from `(k+1)^dim` to `(k+1)^(dim-1)`.
+     */
+    values,
+
+    /**
+     * The loop does involve FEFaceEvaluation access into neighbors by
+     * function values and gradients, but no second derivatives, such as
+     * `FEFaceEvaluation::gather_evaluate(src, true, true);`. For
+     * FiniteElement types where only some of the shape functions have
+     * non-zero value and first derivative on a face, such as an FE_DGQHermite
+     * element, the data exchange is reduced, e.g. from `(k+1)^dim` to
+     * `2(k+1)^(dim-1)`. Note that for bases that do not have this special
+     * property, the full neighboring data is sent anyway.
+     */
+    gradients,
+
+    /**
+     * General setup where the user does not want to make a restriction. This
+     * is typically more expensive than the other options, but also the most
+     * conservative one because the full data of elements behind the faces to
+     * be computed locally will be exchanged.
+     */
+    unspecified
+  };
+
+  /**
+   * This method runs a loop over all cells (in parallel) and performs the MPI
+   * data exchange on the source vector and destination vector. As opposed to
+   * the other variants that only runs a function on cells, this method also
+   * takes as arguments a function for the interior faces and for the boundary
+   * faces, respectively.
+   *
+   * @param cell_operation `std::function` with the signature `cell_operation
+   * (const MatrixFree<dim,Number> &, OutVector &, InVector &,
+   * std::pair<unsigned int,unsigned int> &)` where the first argument passes
+   * the data of the calling class and the last argument defines the range of
+   * cells which should be worked on (typically more than one cell should be
+   * worked on in order to reduce overheads).  One can pass a pointer to an
+   * object in this place if it has an <code>operator()</code> with the
+   * correct set of arguments since such a pointer can be converted to the
+   * function object.
+   *
+   * @param face_operation `std::function` with the signature `face_operation
+   * (const MatrixFree<dim,Number> &, OutVector &, InVector &,
+   * std::pair<unsigned int,unsigned int> &)` in analogy to `cell_operation`,
+   * but now the part associated to the work on interior faces. Note that the
+   * MatrixFree framework treats periodic faces as interior ones, so they will
+   * be assigned their correct neighbor after applying periodicity constraints
+   * within the face_operation calls.
+   *
+   * @param face_operation `std::function` with the signature
+   * `boundary_operation (const MatrixFree<dim,Number> &, OutVector &,
+   * InVector &, std::pair<unsigned int,unsigned int> &)` in analogy to
+   * `cell_operation` and `face_operation`, but now the part associated to the
+   * work on boundary faces. Boundary faces are separated by their
+   * `boundary_id` and it is possible to query that id using
+   * MatrixFree::get_boundary_id(). Note that both interior and faces use the
+   * same numbering, and faces in the interior are assigned lower numbers than
+   * the boundary faces.
+   *
+   * @param dst Destination vector holding the result. If the vector is of
+   * type LinearAlgebra::distributed::Vector (or composite objects thereof
+   * such as LinearAlgebra::distributed::BlockVector), the loop calls
+   * LinearAlgebra::distributed::Vector::compress() at the end of the call
+   * internally.
+   *
+   * @param src Input vector. If the vector is of type
+   * LinearAlgebra::distributed::Vector (or composite objects thereof such as
+   * LinearAlgebra::distributed::BlockVector), the loop calls
+   * LinearAlgebra::distributed::Vector::update_ghost_values() at the start of
+   * the call internally to make sure all necessary data is locally
+   * available. Note, however, that the vector is reset to its original state
+   * at the end of the loop, i.e., if the vector was not ghosted upon entry of
+   * the loop, it will not be ghosted upon finishing the loop.
+   *
+   * @param zero_dst_vector If this flag is set to `true`, the vector `dst`
+   * will be set to zero inside the loop. Use this case in case you perform a
+   * typical `vmult()` operation on a matrix object, as it will typically be
+   * faster than calling `dst = 0;` before the loop separately. This is
+   * because the vector entries are set to zero only on subranges of the
+   * vector, making sure that the vector entries stay in caches as much as
+   * possible.
+   *
+   * @param dst_vector_face_access Set the type of access into the vector
+   * `dst` that will happen inside the body of the @p face_operation
+   * function. As explained in the description of the DataAccessOnFaces
+   * struct, the purpose of this selection is to reduce the amount of data
+   * that must be exchanged over the MPI network (or via `memcpy` if within
+   * the shared memory region of a node) to gain performance. Note that there
+   * is no way to communicate this setting with the FEFaceEvaluation class,
+   * therefore this selection must be made at this site in addition to what is
+   * implemented inside the `face_operation` function. As a consequence, there
+   * is also no way to check that the setting passed to this call is
+   * consistent with what is later done by `FEFaceEvaluation`, and it is the
+   * user's responsibility to ensure correctness of data.
+   *
+   * @param src_vector_face_access Set the type of access into the vector
+   * `src` that will happen inside the body of the @p face_operation function,
+   * in analogy to `dst_vector_face_access`.
+   */
+  template <typename OutVector, typename InVector>
+  void loop (const std::function<void (const MatrixFree<dim,Number> &,
+                                       OutVector &,
+                                       const InVector &,
+                                       const std::pair<unsigned int,
+                                       unsigned int> &)> &cell_operation,
+             const std::function<void (const MatrixFree<dim,Number> &,
+                                       OutVector &,
+                                       const InVector &,
+                                       const std::pair<unsigned int,
+                                       unsigned int> &)> &face_operation,
+             const std::function<void (const MatrixFree<dim,Number> &,
+                                       OutVector &,
+                                       const InVector &,
+                                       const std::pair<unsigned int,
+                                       unsigned int> &)> &boundary_operation,
+             OutVector      &dst,
+             const InVector &src,
+             const bool      zero_dst_vector = false,
+             const DataAccessOnFaces dst_vector_face_access = DataAccessOnFaces::unspecified,
+             const DataAccessOnFaces src_vector_face_access = DataAccessOnFaces::unspecified) const;
+
+  /**
+   * This is the second variant to run the loop over all cells, interior
+   * faces, and boundary faces, now providing three function pointers to
+   * member functions of class @p CLASS with the signature <code>operation
+   * (const MatrixFree<dim,Number> &, OutVector &, InVector &,
+   * std::pair<unsigned int,unsigned int>&)const</code>. This method obviates
+   * the need to call std::bind to bind the class into the given
+   * function in case the local function needs to access data in the class
+   * (i.e., it is a non-static member function).
+   *
+   * @param cell_operation Pointer to member function of `CLASS` with the
+   * signature `cell_operation (const MatrixFree<dim,Number> &, OutVector &,
+   * InVector &, std::pair<unsigned int,unsigned int> &)` where the first
+   * argument passes the data of the calling class and the last argument
+   * defines the range of cells which should be worked on (typically more than
+   * one cell should be worked on in order to reduce overheads). Note that the
+   * loop will typically split the `cell_range` into smaller pieces and work
+   * on `cell_operation`, `face_operation`, and `boundary_operation`
+   * alternately, in order to increase the potential reuse of vector entries
+   * in caches.
+   *
+   * @param face_operation Pointer to member function of `CLASS` with the
+   * signature `face_operation (const MatrixFree<dim,Number> &, OutVector &,
+   * InVector &, std::pair<unsigned int,unsigned int> &)` in analogy to
+   * `cell_operation`, but now the part associated to the work on interior
+   * faces. Note that the MatrixFree framework treats periodic faces as
+   * interior ones, so they will be assigned their correct neighbor after
+   * applying periodicity constraints within the face_operation calls.
+   *
+   * @param face_operation Pointer to member function of `CLASS` with the
+   * signature `boundary_operation (const MatrixFree<dim,Number> &, OutVector
+   * &, InVector &, std::pair<unsigned int,unsigned int> &)` in analogy to
+   * `cell_operation` and `face_operation`, but now the part associated to the
+   * work on boundary faces. Boundary faces are separated by their
+   * `boundary_id` and it is possible to query that id using
+   * MatrixFree::get_boundary_id(). Note that both interior and faces use the
+   * same numbering, and faces in the interior are assigned lower numbers than
+   * the boundary faces.
+   *
+   * @param dst Destination vector holding the result. If the vector is of
+   * type LinearAlgebra::distributed::Vector (or composite objects thereof
+   * such as LinearAlgebra::distributed::BlockVector), the loop calls
+   * LinearAlgebra::distributed::Vector::compress() at the end of the call
+   * internally.
+   *
+   * @param src Input vector. If the vector is of type
+   * LinearAlgebra::distributed::Vector (or composite objects thereof such as
+   * LinearAlgebra::distributed::BlockVector), the loop calls
+   * LinearAlgebra::distributed::Vector::update_ghost_values() at the start of
+   * the call internally to make sure all necessary data is locally
+   * available. Note, however, that the vector is reset to its original state
+   * at the end of the loop, i.e., if the vector was not ghosted upon entry of
+   * the loop, it will not be ghosted upon finishing the loop.
+   *
+   * @param zero_dst_vector If this flag is set to `true`, the vector `dst`
+   * will be set to zero inside the loop. Use this case in case you perform a
+   * typical `vmult()` operation on a matrix object, as it will typically be
+   * faster than calling `dst = 0;` before the loop separately. This is
+   * because the vector entries are set to zero only on subranges of the
+   * vector, making sure that the vector entries stay in caches as much as
+   * possible.
+   *
+   * @param dst_vector_face_access Set the type of access into the vector
+   * `dst` that will happen inside the body of the @p face_operation
+   * function. As explained in the description of the DataAccessOnFaces
+   * struct, the purpose of this selection is to reduce the amount of data
+   * that must be exchanged over the MPI network (or via `memcpy` if within
+   * the shared memory region of a node) to gain performance. Note that there
+   * is no way to communicate this setting with the FEFaceEvaluation class,
+   * therefore this selection must be made at this site in addition to what is
+   * implemented inside the `face_operation` function. As a consequence, there
+   * is also no way to check that the setting passed to this call is
+   * consistent with what is later done by `FEFaceEvaluation`, and it is the
+   * user's responsibility to ensure correctness of data.
+   *
+   * @param src_vector_face_access Set the type of access into the vector
+   * `src` that will happen inside the body of the @p face_operation function,
+   * in analogy to `dst_vector_face_access`.
+   */
+  template <typename CLASS, typename OutVector, typename InVector>
+  void loop (void (CLASS::*cell_operation)(const MatrixFree &,
+                                           OutVector &,
+                                           const InVector &,
+                                           const std::pair<unsigned int,
+                                           unsigned int> &)const,
+             void (CLASS::*face_operation)(const MatrixFree &,
+                                           OutVector &,
+                                           const InVector &,
+                                           const std::pair<unsigned int,
+                                           unsigned int> &)const,
+             void (CLASS::*boundary_operation)(const MatrixFree &,
+                                               OutVector &,
+                                               const InVector &,
+                                               const std::pair<unsigned int,
+                                               unsigned int> &)const,
+             const CLASS    *owning_class,
+             OutVector      &dst,
+             const InVector &src,
+             const bool      zero_dst_vector = false,
+             const DataAccessOnFaces dst_vector_face_access = DataAccessOnFaces::unspecified,
+             const DataAccessOnFaces src_vector_face_access = DataAccessOnFaces::unspecified) const;
+
+  /**
+   * Same as above, but for class member functions which are non-const.
+   */
+  template <typename CLASS, typename OutVector, typename InVector>
+  void loop (void (CLASS::*cell_operation)(const MatrixFree &,
+                                           OutVector &,
+                                           const InVector &,
+                                           const std::pair<unsigned int,
+                                           unsigned int> &),
+             void (CLASS::*face_operation)(const MatrixFree &,
+                                           OutVector &,
+                                           const InVector &,
+                                           const std::pair<unsigned int,
+                                           unsigned int> &),
+             void (CLASS::*boundary_operation)(const MatrixFree &,
+                                               OutVector &,
+                                               const InVector &,
+                                               const std::pair<unsigned int,
+                                               unsigned int> &),
+             CLASS          *owning_class,
+             OutVector      &dst,
+             const InVector &src,
+             const bool      zero_dst_vector = false,
+             const DataAccessOnFaces dst_vector_face_access = DataAccessOnFaces::unspecified,
+             const DataAccessOnFaces src_vector_face_access = DataAccessOnFaces::unspecified) const;
 
   /**
    * In the hp adaptive case, a subrange of cells as computed during the cell
@@ -496,7 +986,7 @@ public:
   std::pair<unsigned int,unsigned int>
   create_cell_subrange_hp (const std::pair<unsigned int,unsigned int> &range,
                            const unsigned int fe_degree,
-                           const unsigned int vector_component = 0) const;
+                           const unsigned int dof_handler_index = 0) const;
 
   /**
    * In the hp adaptive case, a subrange of cells as computed during the cell
@@ -507,7 +997,7 @@ public:
   std::pair<unsigned int,unsigned int>
   create_cell_subrange_hp_by_index (const std::pair<unsigned int,unsigned int> &range,
                                     const unsigned int fe_index,
-                                    const unsigned int vector_component = 0) const;
+                                    const unsigned int dof_handler_index = 0) const;
 
   //@}
 
@@ -537,7 +1027,7 @@ public:
    */
   template <typename VectorType>
   void initialize_dof_vector(VectorType &vec,
-                             const unsigned int vector_component=0) const;
+                             const unsigned int dof_handler_index=0) const;
 
   /**
    * Initialize function for a distributed vector. The length of the vector is
@@ -561,7 +1051,7 @@ public:
    */
   template <typename Number2>
   void initialize_dof_vector(LinearAlgebra::distributed::Vector<Number2> &vec,
-                             const unsigned int vector_component=0) const;
+                             const unsigned int dof_handler_index=0) const;
 
   /**
    * Return the partitioner that represents the locally owned data and the
@@ -574,19 +1064,19 @@ public:
    * be reused from one vector to another.
    */
   const std::shared_ptr<const Utilities::MPI::Partitioner> &
-  get_vector_partitioner (const unsigned int vector_component=0) const;
+  get_vector_partitioner (const unsigned int dof_handler_index=0) const;
 
   /**
    * Return the set of cells that are oned by the processor.
    */
   const IndexSet &
-  get_locally_owned_set (const unsigned int fe_component = 0) const;
+  get_locally_owned_set (const unsigned int dof_handler_index=0) const;
 
   /**
    * Return the set of ghost cells needed but not owned by the processor.
    */
   const IndexSet &
-  get_ghost_set (const unsigned int fe_component = 0) const;
+  get_ghost_set (const unsigned int dof_handler_index=0) const;
 
   /**
    * Return a list of all degrees of freedom that are constrained. The list
@@ -598,14 +1088,20 @@ public:
    * that are owned locally, not for ghosts.
    */
   const std::vector<unsigned int> &
-  get_constrained_dofs (const unsigned int fe_component = 0) const;
+  get_constrained_dofs (const unsigned int dof_handler_index=0) const;
 
   /**
-   * Calls renumber_dofs function in dof_info which renumbers the degrees of
-   * freedom according to the ordering for parallelization.
+   * Computes a renumbering of degrees of freedom that better fits with the
+   * data layout in MatrixFree according to the given layout of data. Note that
+   * this function does not re-arrange the information stored in this class,
+   * but rather creates a renumbering for consumption of
+   * DoFHandler::renumber_dofs. To have any effect a MatrixFree object must be
+   * set up again using the renumbered DoFHandler and ConstraintMatrix. Note
+   * that if a DoFHandler calls DoFHandler::renumber_dofs, all information in
+   * MatrixFree becomes invalid.
    */
   void renumber_dofs (std::vector<types::global_dof_index> &renumbering,
-                      const unsigned int vector_component = 0);
+                      const unsigned int dof_handler_index=0);
 
   //@}
 
@@ -626,33 +1122,97 @@ public:
   unsigned int n_components () const;
 
   /**
-   * Return the number of cells this structure is based on. If you are using
-   * a usual DoFHandler, it corresponds to the number of (locally owned)
-   * active cells. Note that most data structures in this class do not
-   * directly act on this number but rather on n_macro_cells() which gives the
-   * number of cells as seen when lumping several cells together with
-   * vectorization.
+   * For the finite element underlying the DoFHandler specified by @p
+   * dof_handler_index, return the number of base elements.
+   */
+  unsigned int n_base_elements (const unsigned int dof_handler_index) const;
+
+  /**
+   * Return the number of cells this structure is based on. If you are using a
+   * usual DoFHandler, it corresponds to the number of (locally owned) active
+   * cells. Note that most data structures in this class do not directly act
+   * on this number but rather on n_cell_batches() which gives the number of
+   * cells as seen when lumping several cells together with vectorization.
    */
   unsigned int n_physical_cells () const;
 
   /**
-   * Return the number of macro cells that this structure works on, i.e., the
-   * number of cell chunks that are worked on after the application of
-   * vectorization which in general works on several cells at once. The cell
-   * range in @p cell_loop runs from zero to n_macro_cells() (exclusive), so
-   * this is the appropriate size if you want to store arrays of data for all
-   * cells to be worked on. This number is approximately
+   * Return the number of cell batches that this structure works on.  The
+   * batches are formed by application of vectorization over several cells in
+   * general. The cell range in @p cell_loop runs from zero to n_cell_batches()
+   * (exclusive), so this is the appropriate size if you want to store arrays
+   * of data for all cells to be worked on. This number is approximately
    * n_physical_cells()/VectorizedArray::n_array_elements (depending on how
    * many cell chunks that do not get filled up completely).
    */
   unsigned int n_macro_cells () const;
 
+  /**
+   * Return the number of cell batches that this structure works on. The
+   * batches are formed by application of vectorization over several cells in
+   * general. The cell range in @p cell_loop runs from zero to
+   * n_cell_batches() (exclusive), so this is the appropriate size if you want
+   * to store arrays of data for all cells to be worked on. This number is
+   * approximately n_physical_cells()/VectorizedArray::n_array_elements
+   * (depending on how many cell chunks that do not get filled up completely).
+   */
+  unsigned int n_cell_batches () const;
+
+  /**
+   * Returns the number of additional cell batches that this structure keeps
+   * for face integration. Note that not all cells that are ghosted in the
+   * triangulation are kept in this data structure, but only the ones which
+   * are necessary for evaluating face integrals from both sides.
+   */
+  unsigned int n_ghost_cell_batches () const;
+
+  /**
+   * Returns the number of interior face batches that this structure works on.
+   * The batches are formed by application of vectorization over several faces
+   * in general. The face range in @p loop runs from zero to
+   * n_inner_face_batches() (exclusive), so this is the appropriate size if
+   * you want to store arrays of data for all interior faces to be worked on.
+   */
+  unsigned int n_inner_face_batches () const;
+
+  /**
+   * Returns the number of boundary face batches that this structure works on.
+   * The batches are formed by application of vectorization over several faces
+   * in general. The face range in @p loop runs from n_inner_face_batches() to
+   * n_inner_face_batches()+n_boundary_face_batches() (exclusive), so if you
+   * need to store arrays that hold data for all boundary faces but not the
+   * interior ones, this number gives the appropriate size.
+   */
+  unsigned int n_boundary_face_batches () const;
+
+  /**
+   * Returns the number of faces that are not processed locally but belong to
+   * locally owned faces.
+   */
+  unsigned int n_ghost_inner_face_batches() const;
+
+  /**
+   * In order to apply different operators to different parts of the boundary,
+   * this method can be used to query the boundary id of a given face in the
+   * faces' own sorting by lanes in a VectorizedArray. Only valid for an index
+   * indicating a boundary face.
+   */
+  types::boundary_id get_boundary_id (const unsigned int macro_face) const;
+
+  /**
+   * Returns the boundary ids for the faces within a cell, using the cells'
+   * sorting by lanes in the VectorizedArray.
+   */
+  std::array<types::boundary_id, VectorizedArray<Number>::n_array_elements>
+  get_faces_by_cells_boundary_id (const unsigned int macro_cell,
+                                  const unsigned int face_number) const;
+
   /**
    * In case this structure was built based on a DoFHandler, this returns the
    * DoFHandler.
    */
   const DoFHandler<dim> &
-  get_dof_handler (const unsigned int fe_component = 0) const;
+  get_dof_handler (const unsigned int dof_handler_index = 0) const;
 
   /**
    * This returns the cell iterator in deal.II speak to a given cell in the
@@ -683,7 +1243,7 @@ public:
   typename hp::DoFHandler<dim>::active_cell_iterator
   get_hp_cell_iterator (const unsigned int macro_cell_number,
                         const unsigned int vector_number,
-                        const unsigned int fe_component = 0) const;
+                        const unsigned int dof_handler_index = 0) const;
 
   /**
    * Since this class uses vectorized data types with usually more than one
@@ -694,27 +1254,50 @@ public:
    * mixed with deal.II access to cells, care needs to be taken. This function
    * returns @p true if not all @p vectorization_length cells for the given @p
    * macro_cell are real cells. To find out how many cells are actually used,
-   * use the function @p n_components_filled.
+   * use the function @p n_active_entries_per_cell_batch.
    */
   bool
   at_irregular_cell (const unsigned int macro_cell_number) const;
 
   /**
-   * Use this function to find out how many cells over the length of
-   * vectorization data types correspond to real cells in the mesh. For most
-   * given @p macro_cells, this is just @p vectorization_length many, but
-   * there might be one or a few meshes (where the numbers do not add up)
-   * where there are less such components filled, indicated by the function @p
+   * This query returns how many cells over the length of vectorization data
+   * types correspond to actual cells in the mesh. For most given @p
+   * cell_batch_number, this is just @p vectorization_length many, but there
+   * might be one or a few meshes (where the numbers do not add up) where
+   * there are less such components filled, indicated by the function @p
+   * at_irregular_cell.
+   */
+  unsigned int
+  n_components_filled (const unsigned int cell_batch_number) const;
+
+  /**
+   * This query returns how many cells over the length of vectorization data
+   * types correspond to actual cells in the mesh. For most given cell batches
+   * in n_cell_batches(), this is just @p vectorization_length many, but there
+   * might be one or a few meshes (where the numbers do not add up) where
+   * there are less such components filled, indicated by the function @p
    * at_irregular_cell.
    */
   unsigned int
-  n_components_filled (const unsigned int macro_cell_number) const;
+  n_active_entries_per_cell_batch (const unsigned int cell_batch_number) const;
+
+  /**
+   * Use this function to find out how many faces over the length of
+   * vectorization data types correspond to real faces (both interior and
+   * boundary faces, as those use the same indexing but with different ranges)
+   * in the mesh. For most given indices in n_inner_faces_batches() and
+   * n_boundary_face_batches(), this is just @p vectorization_length many, but
+   * there might be one or a few meshes (where the numbers do not add up)
+   * where there are less such lanes filled.
+   */
+  unsigned int
+  n_active_entries_per_face_batch (const unsigned int face_batch_number) const;
 
   /**
    * Return the number of degrees of freedom per cell for a given hp index.
    */
   unsigned int
-  get_dofs_per_cell (const unsigned int fe_component = 0,
+  get_dofs_per_cell (const unsigned int dof_handler_index = 0,
                      const unsigned int hp_active_fe_index = 0) const;
 
   /**
@@ -754,6 +1337,21 @@ public:
   get_face_quadrature (const unsigned int quad_index = 0,
                        const unsigned int hp_active_fe_index = 0) const;
 
+  /**
+   * Return the category the current batch of cells was assigned to. Categories
+   * run between the given values in the field
+   * AdditionalData::cell_vectorization_category for non-hp DoFHandler types
+   * and return the active FE index in the hp-adaptive case.
+   */
+  unsigned int get_cell_category (const unsigned int macro_cell) const;
+
+  /**
+   * Return the category on the cells on the two sides of the current batch of
+   * faces.
+   */
+  std::pair<unsigned int,unsigned int>
+  get_face_category (const unsigned int macro_face) const;
+
   /**
    * Queries whether or not the indexation has been set.
    */
@@ -788,7 +1386,8 @@ public:
   //@}
 
   /**
-   * @name 5: Access of internal data structure (expert mode)
+   * @name 5: Access of internal data structure (expert mode, interface not
+   * stable between releases)
    */
   //@{
   /**
@@ -814,7 +1413,7 @@ public:
    * Return information on indexation degrees of freedom.
    */
   const internal::MatrixFreeFunctions::DoFInfo &
-  get_dof_info (const unsigned int fe_component = 0) const;
+  get_dof_info (const unsigned int dof_handler_index_component = 0) const;
 
   /**
    * Return the number of weights in the constraint pool.
@@ -839,11 +1438,18 @@ public:
   /**
    * Return the unit cell information for given hp index.
    */
-  const internal::MatrixFreeFunctions::ShapeInfo<VectorizedArray<Number>> &
-      get_shape_info (const unsigned int fe_component = 0,
-                      const unsigned int quad_index   = 0,
-                      const unsigned int hp_active_fe_index = 0,
-                      const unsigned int hp_active_quad_index = 0) const;
+  const internal::MatrixFreeFunctions::ShapeInfo<VectorizedArray<Number> > &
+  get_shape_info (const unsigned int dof_handler_index_component = 0,
+                  const unsigned int quad_index                  = 0,
+                  const unsigned int fe_base_element             = 0,
+                  const unsigned int hp_active_fe_index          = 0,
+                  const unsigned int hp_active_quad_index        = 0) const;
+
+  /**
+   * Return the connectivity information of a face.
+   */
+  const internal::MatrixFreeFunctions::FaceToCellTopology<VectorizedArray<Number>::n_array_elements> &
+  get_face_info (const unsigned int face_batch_number) const;
 
   /**
    * Obtains a scratch data object for internal use. Make sure to release it
@@ -865,6 +1471,22 @@ public:
    */
   void release_scratch_data(const AlignedVector<VectorizedArray<Number> > *memory) const;
 
+  /**
+   * Obtains a scratch data object for internal use. Make sure to release it
+   * afterwards by passing the pointer you obtain from this object to the
+   * release_scratch_data_non_threadsafe() function. Note that, as opposed to
+   * acquire_scratch_data(), this method can only be called by a single thread
+   * at a time, but opposed to the acquire_scratch_data() it is also possible
+   * that the thread releasing the scratch data can be different than the one
+   * that acquired it.
+   */
+  AlignedVector<Number> *acquire_scratch_data_non_threadsafe() const;
+
+  /**
+   * Makes the object of the scratch data available again.
+   */
+  void release_scratch_data_non_threadsafe(const AlignedVector<Number> *memory) const;
+
   //@}
 
 private:
@@ -878,7 +1500,7 @@ private:
                         const std::vector<const ConstraintMatrix *> &constraint,
                         const std::vector<IndexSet>       &locally_owned_set,
                         const std::vector<hp::QCollection<1> > &quad,
-                        const AdditionalData               additional_data);
+                        const AdditionalData              &additional_data);
 
   /**
    * Same as before but for hp::DoFHandler instead of generic DoFHandler type.
@@ -888,7 +1510,7 @@ private:
                         const std::vector<const ConstraintMatrix *> &constraint,
                         const std::vector<IndexSet>      &locally_owned_set,
                         const std::vector<hp::QCollection<1> > &quad,
-                        const AdditionalData              additional_data);
+                        const AdditionalData             &additional_data);
 
   /**
    * Initializes the fields in DoFInfo together with the constraint pool that
@@ -898,19 +1520,26 @@ private:
    */
   void
   initialize_indices (const std::vector<const ConstraintMatrix *> &constraint,
-                      const std::vector<IndexSet> &locally_owned_set);
+                      const std::vector<IndexSet> &locally_owned_set,
+                      const AdditionalData        &additional_data);
 
   /**
    * Initializes the DoFHandlers based on a DoFHandler<dim> argument.
    */
   void initialize_dof_handlers (const std::vector<const DoFHandler<dim>*> &dof_handlers,
-                                const unsigned int                         level);
+                                const AdditionalData &additional_data);
 
   /**
    * Initializes the DoFHandlers based on a hp::DoFHandler<dim> argument.
    */
   void initialize_dof_handlers (const std::vector<const hp::DoFHandler<dim>*> &dof_handlers,
-                                const unsigned int                             level);
+                                const AdditionalData &additional_data);
+
+  /**
+   * Setup connectivity graph with information on the dependencies between
+   * block due to shared faces.
+   */
+  void make_connectivity_graph_faces (DynamicSparsityPattern &connectivity);
 
   /**
    * This struct defines which DoFHandler has actually been given at
@@ -987,6 +1616,7 @@ private:
    */
   std::vector<std::pair<unsigned int,unsigned int> > cell_level_index;
 
+
   /**
    * For discontinuous Galerkin, the cell_level_index includes cells that are
    * not on the local processor but that are needed to evaluate the cell
@@ -996,13 +1626,18 @@ private:
   unsigned int cell_level_index_end_local;
 
   /**
-   * Stores how many cells we have, how many cells that we see after applying
-   * vectorization (i.e., the number of macro cells), MPI-related stuff, and,
-   * if threads are enabled, information regarding the shared memory
-   * parallelization.
+   * Stores the basic layout of the cells and faces to be treated, including
+   * the task layout for the shared memory parallelization and possible
+   * overlaps between communications and computations with MPI.
    */
   internal::MatrixFreeFunctions::TaskInfo task_info;
 
+  /**
+   * Vector holding face information. Only initialized if
+   * build_face_info=true.
+   */
+  internal::MatrixFreeFunctions::FaceInfo<VectorizedArray<Number>::n_array_elements> face_info;
+
   /**
    * Stores whether indices have been initialized.
    */
@@ -1021,6 +1656,12 @@ private:
    * objects.
    */
   mutable Threads::ThreadLocalStorage<std::list<std::pair<bool, AlignedVector<VectorizedArray<Number> > > > > scratch_pad;
+
+  /**
+   * Scratchpad memory for use in evaluation and other contexts, non-thread
+   * safe variant.
+   */
+  mutable std::list<std::pair<bool, AlignedVector<Number> > > scratch_pad_non_threadsafe;
 };
 
 
@@ -1030,6 +1671,7 @@ private:
 #ifndef DOXYGEN
 
 
+
 template <int dim, typename Number>
 template <typename VectorType>
 inline
@@ -1037,7 +1679,7 @@ void
 MatrixFree<dim,Number>::initialize_dof_vector(VectorType &vec,
                                               const unsigned int comp) const
 {
-  AssertIndexRange (comp, n_components());
+  AssertIndexRange(comp, n_components());
   vec.reinit(dof_info[comp].vector_partitioner->size());
 }
 
@@ -1050,7 +1692,7 @@ void
 MatrixFree<dim,Number>::initialize_dof_vector(LinearAlgebra::distributed::Vector<Number2> &vec,
                                               const unsigned int comp) const
 {
-  AssertIndexRange (comp, n_components());
+  AssertIndexRange(comp, n_components());
   vec.reinit(dof_info[comp].vector_partitioner);
 }
 
@@ -1061,7 +1703,7 @@ inline
 const std::shared_ptr<const Utilities::MPI::Partitioner> &
 MatrixFree<dim,Number>::get_vector_partitioner (const unsigned int comp) const
 {
-  AssertIndexRange (comp, n_components());
+  AssertIndexRange(comp, n_components());
   return dof_info[comp].vector_partitioner;
 }
 
@@ -1072,7 +1714,7 @@ inline
 const std::vector<unsigned int> &
 MatrixFree<dim,Number>::get_constrained_dofs (const unsigned int comp) const
 {
-  AssertIndexRange (comp, n_components());
+  AssertIndexRange(comp, n_components());
   return dof_info[comp].constrained_dofs;
 }
 
@@ -1089,6 +1731,18 @@ MatrixFree<dim,Number>::n_components () const
 
 
 
+template <int dim, typename Number>
+inline
+unsigned int
+MatrixFree<dim,Number>::n_base_elements (const unsigned int dof_no) const
+{
+  AssertDimension (dof_handlers.n_dof_handlers, dof_info.size());
+  AssertIndexRange(dof_no, dof_handlers.n_dof_handlers);
+  return dof_handlers.dof_handler[dof_no]->get_fe().n_base_elements();
+}
+
+
+
 template <int dim, typename Number>
 inline
 const internal::MatrixFreeFunctions::TaskInfo &
@@ -1129,6 +1783,97 @@ MatrixFree<dim,Number>::n_physical_cells () const
 
 
 
+template <int dim, typename Number>
+inline
+unsigned int
+MatrixFree<dim,Number>::n_cell_batches () const
+{
+  return *(task_info.cell_partition_data.end()-2);
+}
+
+
+
+template <int dim, typename Number>
+inline
+unsigned int
+MatrixFree<dim,Number>::n_ghost_cell_batches () const
+{
+  return *(task_info.cell_partition_data.end()-1)-
+         *(task_info.cell_partition_data.end()-2);
+}
+
+
+
+template <int dim, typename Number>
+inline
+unsigned int
+MatrixFree<dim,Number>::n_inner_face_batches () const
+{
+  if (task_info.face_partition_data.size() == 0)
+    return 0;
+  return task_info.face_partition_data.back();
+}
+
+
+
+template <int dim, typename Number>
+inline
+unsigned int
+MatrixFree<dim,Number>::n_boundary_face_batches () const
+{
+  if (task_info.face_partition_data.size() == 0)
+    return 0;
+  return task_info.boundary_partition_data.back()-task_info.face_partition_data.back();
+}
+
+
+
+template <int dim, typename Number>
+inline
+unsigned int
+MatrixFree<dim,Number>::n_ghost_inner_face_batches () const
+{
+  if (task_info.face_partition_data.size() == 0)
+    return 0;
+  return face_info.faces.size() - task_info.boundary_partition_data.back();
+}
+
+
+
+template <int dim, typename Number>
+inline
+types::boundary_id
+MatrixFree<dim,Number>::get_boundary_id(const unsigned int macro_face) const
+{
+  Assert(macro_face >= task_info.boundary_partition_data[0] &&
+         macro_face < task_info.boundary_partition_data.back(),
+         ExcIndexRange(macro_face,
+                       task_info.boundary_partition_data[0],
+                       task_info.boundary_partition_data.back()));
+  return types::boundary_id(face_info.faces[macro_face].exterior_face_no);
+}
+
+
+
+template <int dim, typename Number>
+inline
+std::array<types::boundary_id, VectorizedArray<Number>::n_array_elements>
+MatrixFree<dim,Number>::get_faces_by_cells_boundary_id (const unsigned int macro_cell,
+                                                        const unsigned int face_number) const
+{
+  AssertIndexRange(macro_cell, n_macro_cells());
+  AssertIndexRange(face_number, GeometryInfo<dim>::faces_per_cell);
+  Assert(face_info.cell_and_face_boundary_id.size(0)>=n_macro_cells(),
+         ExcNotInitialized());
+  std::array<types::boundary_id, VectorizedArray<Number>::n_array_elements> result;
+  result.fill(numbers::invalid_boundary_id);
+  for (unsigned int v=0; v<n_active_entries_per_cell_batch(macro_cell); ++v)
+    result[v] = face_info.cell_and_face_boundary_id(macro_cell, face_number, v);
+  return result;
+}
+
+
+
 template <int dim, typename Number>
 inline
 const internal::MatrixFreeFunctions::MappingInfo<dim,Number> &
@@ -1142,7 +1887,7 @@ MatrixFree<dim,Number>::get_mapping_info () const
 template <int dim, typename Number>
 inline
 const internal::MatrixFreeFunctions::DoFInfo &
-MatrixFree<dim,Number>::get_dof_info (unsigned int dof_index) const
+MatrixFree<dim,Number>::get_dof_info (const unsigned int dof_index) const
 {
   AssertIndexRange (dof_index, n_components());
   return dof_info[dof_index];
@@ -1190,204 +1935,92 @@ std::pair<unsigned int,unsigned int>
 MatrixFree<dim,Number>::create_cell_subrange_hp
 (const std::pair<unsigned int,unsigned int> &range,
  const unsigned int degree,
- const unsigned int vector_component) const
+ const unsigned int dof_handler_component) const
 {
-  AssertIndexRange (vector_component, dof_info.size());
-  if (dof_info[vector_component].cell_active_fe_index.empty())
+  if (dof_info[dof_handler_component].cell_active_fe_index.empty())
     {
-      AssertDimension (dof_info[vector_component].fe_index_conversion.size(),1);
-      if (dof_info[vector_component].fe_index_conversion[0].first == degree)
+      AssertDimension (dof_info[dof_handler_component].fe_index_conversion.size(),1);
+      AssertDimension (dof_info[dof_handler_component].fe_index_conversion[0].size(), 1);
+      if (dof_info[dof_handler_component].fe_index_conversion[0][0] == degree)
         return range;
       else
         return std::pair<unsigned int,unsigned int> (range.second,range.second);
     }
 
   const unsigned int fe_index =
-    dof_info[vector_component].fe_index_from_degree(degree);
-  if (fe_index >= dof_info[vector_component].max_fe_index)
+    dof_info[dof_handler_component].fe_index_from_degree(0, degree);
+  if (fe_index >= dof_info[dof_handler_component].max_fe_index)
     return std::pair<unsigned int,unsigned int>(range.second, range.second);
   else
-    return create_cell_subrange_hp_by_index (range, fe_index, vector_component);
+    return create_cell_subrange_hp_by_index (range, fe_index, dof_handler_component);
 }
 
 
 
 template <int dim, typename Number>
 inline
-std::pair<unsigned int,unsigned int>
-MatrixFree<dim,Number>::create_cell_subrange_hp_by_index
-(const std::pair<unsigned int,unsigned int> &range,
- const unsigned int fe_index,
- const unsigned int vector_component) const
+bool
+MatrixFree<dim,Number>::at_irregular_cell (const unsigned int macro_cell) const
 {
-  AssertIndexRange (fe_index, dof_info[vector_component].max_fe_index);
-  const std::vector<unsigned int> &fe_indices =
-    dof_info[vector_component].cell_active_fe_index;
-  if (fe_indices.size() == 0)
-    return range;
-  else
-    {
-      // the range over which we are searching must be ordered, otherwise we
-      // got a range that spans over too many cells
-#ifdef DEBUG
-      for (unsigned int i=range.first+1; i<range.second; ++i)
-        Assert (fe_indices[i] >= fe_indices[i-1],
-                ExcMessage ("Cell range must be over sorted range of fe indices in hp case!"));
-      AssertIndexRange(range.first,fe_indices.size()+1);
-      AssertIndexRange(range.second,fe_indices.size()+1);
-#endif
-      std::pair<unsigned int,unsigned int> return_range;
-      return_range.first =
-        std::lower_bound(fe_indices.data() + range.first,
-                         fe_indices.data() + range.second, fe_index)
-        -fe_indices.data() ;
-      return_range.second =
-        std::lower_bound(fe_indices.data() + return_range.first,
-                         fe_indices.data() + range.second,
-                         fe_index + 1)-fe_indices.data();
-      Assert(return_range.first >= range.first &&
-             return_range.second <= range.second, ExcInternalError());
-      return return_range;
-    }
+  AssertIndexRange (macro_cell, task_info.cell_partition_data.back());
+  return VectorizedArray<Number>::n_array_elements > 1 &&
+         cell_level_index[(macro_cell+1)*VectorizedArray<Number>::n_array_elements-1] ==
+         cell_level_index[(macro_cell+1)*VectorizedArray<Number>::n_array_elements-2];
 }
 
 
 
 template <int dim, typename Number>
 inline
-void
-MatrixFree<dim,Number>::renumber_dofs (std::vector<types::global_dof_index> &renumbering,
-                                       const unsigned int vector_component)
+unsigned int
+MatrixFree<dim,Number>::n_components_filled (const unsigned int cell_batch_number) const
 {
-  AssertIndexRange(vector_component, dof_info.size());
-  dof_info[vector_component].renumber_dofs (renumbering);
+  return n_active_entries_per_cell_batch(cell_batch_number);
 }
 
 
 
 template <int dim, typename Number>
 inline
-const DoFHandler<dim> &
-MatrixFree<dim,Number>::get_dof_handler (const unsigned int dof_index) const
+unsigned int
+MatrixFree<dim,Number>::n_active_entries_per_cell_batch(const unsigned int cell_batch_number) const
 {
-  AssertIndexRange (dof_index, n_components());
-  if (dof_handlers.active_dof_handler == DoFHandlers::usual)
-    {
-      AssertDimension (dof_handlers.dof_handler.size(),
-                       dof_handlers.n_dof_handlers);
-      return *dof_handlers.dof_handler[dof_index];
-    }
-  else
-    {
-      Assert (false, ExcNotImplemented());
-      // put pseudo return argument to avoid compiler error, but trigger a
-      // segfault in case this is only run in optimized mode
-      return *dof_handlers.dof_handler[numbers::invalid_unsigned_int];
-    }
+  AssertIndexRange (cell_batch_number, task_info.cell_partition_data.back());
+  unsigned int n_components = VectorizedArray<Number>::n_array_elements;
+  while (n_components > 1 &&
+         cell_level_index[cell_batch_number*VectorizedArray<Number>::n_array_elements+n_components-1] ==
+         cell_level_index[cell_batch_number*VectorizedArray<Number>::n_array_elements+n_components-2])
+    --n_components;
+  AssertIndexRange(n_components-1, VectorizedArray<Number>::n_array_elements);
+  return n_components;
 }
 
 
 
 template <int dim, typename Number>
 inline
-typename DoFHandler<dim>::cell_iterator
-MatrixFree<dim,Number>::get_cell_iterator(const unsigned int macro_cell_number,
-                                          const unsigned int vector_number,
-                                          const unsigned int dof_index) const
+unsigned int
+MatrixFree<dim,Number>::n_active_entries_per_face_batch(const unsigned int face_batch_number) const
 {
-  const unsigned int vectorization_length=VectorizedArray<Number>::n_array_elements;
-#ifdef DEBUG
-  AssertIndexRange (dof_index, dof_handlers.n_dof_handlers);
-  AssertIndexRange (macro_cell_number, n_macro_cells());
-  AssertIndexRange (vector_number, vectorization_length);
-  const unsigned int irreg_filled = dof_info[dof_index].row_starts[macro_cell_number][2];
-  if (irreg_filled > 0)
-    AssertIndexRange (vector_number, irreg_filled);
-#endif
-
-  const DoFHandler<dim> *dofh = nullptr;
-  if (dof_handlers.active_dof_handler == DoFHandlers::usual)
-    {
-      AssertDimension (dof_handlers.dof_handler.size(),
-                       dof_handlers.n_dof_handlers);
-      dofh = dof_handlers.dof_handler[dof_index];
-    }
-  else
-    {
-      Assert (false, ExcMessage ("Cannot return DoFHandler<dim>::cell_iterator "
-                                 "for underlying DoFHandler!"));
-    }
-
-  std::pair<unsigned int,unsigned int> index =
-    cell_level_index[macro_cell_number*vectorization_length+vector_number];
-  return typename DoFHandler<dim>::cell_iterator
-         (&dofh->get_triangulation(), index.first, index.second, dofh);
+  AssertIndexRange (face_batch_number, face_info.faces.size());
+  unsigned int n_components = VectorizedArray<Number>::n_array_elements;
+  while (n_components > 1 &&
+         face_info.faces[face_batch_number].cells_interior[n_components-1] ==
+         numbers::invalid_unsigned_int)
+    --n_components;
+  AssertIndexRange(n_components-1, VectorizedArray<Number>::n_array_elements);
+  return n_components;
 }
 
 
 
 template <int dim, typename Number>
 inline
-typename hp::DoFHandler<dim>::active_cell_iterator
-MatrixFree<dim,Number>::get_hp_cell_iterator(const unsigned int macro_cell_number,
-                                             const unsigned int vector_number,
-                                             const unsigned int dof_index) const
+unsigned int
+MatrixFree<dim,Number>::get_dofs_per_cell(const unsigned int dof_handler_index,
+                                          const unsigned int active_fe_index) const
 {
-  const unsigned int vectorization_length=VectorizedArray<Number>::n_array_elements;
-#ifdef DEBUG
-  AssertIndexRange (dof_index, dof_handlers.n_dof_handlers);
-  AssertIndexRange (macro_cell_number, n_macro_cells());
-  AssertIndexRange (vector_number, vectorization_length);
-  const unsigned int irreg_filled = dof_info[dof_index].row_starts[macro_cell_number][2];
-  if (irreg_filled > 0)
-    AssertIndexRange (vector_number, irreg_filled);
-#endif
-
-  Assert (dof_handlers.active_dof_handler == DoFHandlers::hp,
-          ExcNotImplemented());
-  const hp::DoFHandler<dim> *dofh = dof_handlers.hp_dof_handler[dof_index];
-  std::pair<unsigned int,unsigned int> index =
-    cell_level_index[macro_cell_number*vectorization_length+vector_number];
-  return typename hp::DoFHandler<dim>::cell_iterator
-         (&dofh->get_triangulation(), index.first, index.second, dofh);
-}
-
-
-
-template <int dim, typename Number>
-inline
-bool
-MatrixFree<dim,Number>::at_irregular_cell (const unsigned int macro_cell) const
-{
-  AssertIndexRange (macro_cell, n_macro_cells());
-  return dof_info[0].row_starts[macro_cell][2] > 0;
-}
-
-
-
-template <int dim, typename Number>
-inline
-unsigned int
-MatrixFree<dim,Number>::n_components_filled (const unsigned int macro_cell) const
-{
-  AssertIndexRange (macro_cell, n_macro_cells());
-  const unsigned int n_filled = dof_info[0].row_starts[macro_cell][2];
-  if (n_filled == 0)
-    return VectorizedArray<Number>::n_array_elements;
-  else
-    return n_filled;
-}
-
-
-
-template <int dim, typename Number>
-inline
-unsigned int
-MatrixFree<dim,Number>::get_dofs_per_cell(const unsigned int dof_index,
-                                          const unsigned int active_fe_index) const
-{
-  AssertIndexRange (dof_index, dof_info.size());
-  return dof_info[dof_index].dofs_per_cell[active_fe_index];
+  return dof_info[dof_handler_index].dofs_per_cell[active_fe_index];
 }
 
 
@@ -1407,11 +2040,10 @@ MatrixFree<dim,Number>::get_n_q_points(const unsigned int quad_index,
 template <int dim, typename Number>
 inline
 unsigned int
-MatrixFree<dim,Number>::get_dofs_per_face(const unsigned int dof_index,
+MatrixFree<dim,Number>::get_dofs_per_face(const unsigned int dof_handler_index,
                                           const unsigned int active_fe_index) const
 {
-  AssertIndexRange (dof_index, dof_info.size());
-  return dof_info[dof_index].dofs_per_face[active_fe_index];
+  return dof_info[dof_handler_index].dofs_per_face[active_fe_index];
 }
 
 
@@ -1431,10 +2063,9 @@ MatrixFree<dim,Number>::get_n_q_points_face(const unsigned int quad_index,
 template <int dim, typename Number>
 inline
 const IndexSet &
-MatrixFree<dim,Number>::get_locally_owned_set(const unsigned int dof_index) const
+MatrixFree<dim,Number>::get_locally_owned_set(const unsigned int dof_handler_index) const
 {
-  AssertIndexRange (dof_index, dof_info.size());
-  return dof_info[dof_index].vector_partitioner->locally_owned_range();
+  return dof_info[dof_handler_index].vector_partitioner->locally_owned_range();
 }
 
 
@@ -1442,32 +2073,45 @@ MatrixFree<dim,Number>::get_locally_owned_set(const unsigned int dof_index) cons
 template <int dim, typename Number>
 inline
 const IndexSet &
-MatrixFree<dim,Number>::get_ghost_set(const unsigned int dof_index) const
+MatrixFree<dim,Number>::get_ghost_set(const unsigned int dof_handler_index) const
 {
-  AssertIndexRange (dof_index, dof_info.size());
-  return dof_info[dof_index].vector_partitioner->ghost_indices();
+  return dof_info[dof_handler_index].vector_partitioner->ghost_indices();
 }
 
 
 
 template <int dim, typename Number>
 inline
-const internal::MatrixFreeFunctions::ShapeInfo<VectorizedArray<Number>> &
-    MatrixFree<dim,Number>::get_shape_info (const unsigned int index_fe,
-                                            const unsigned int index_quad,
-                                            const unsigned int active_fe_index,
-                                            const unsigned int active_quad_index) const
+const internal::MatrixFreeFunctions::ShapeInfo<VectorizedArray<Number> > &
+MatrixFree<dim,Number>::get_shape_info (const unsigned int dof_handler_index,
+                                        const unsigned int index_quad,
+                                        const unsigned int index_fe,
+                                        const unsigned int active_fe_index,
+                                        const unsigned int active_quad_index) const
 {
-  AssertIndexRange (index_fe, shape_info.size(0));
+  AssertIndexRange(dof_handler_index, dof_info.size());
+  const unsigned int ind = dof_info[dof_handler_index].global_base_element_offset+index_fe;
+  AssertIndexRange (ind, shape_info.size(0));
   AssertIndexRange (index_quad, shape_info.size(1));
   AssertIndexRange (active_fe_index, shape_info.size(2));
   AssertIndexRange (active_quad_index, shape_info.size(3));
-  return shape_info(index_fe, index_quad,
+  return shape_info(ind, index_quad,
                     active_fe_index, active_quad_index);
 }
 
 
 
+template <int dim, typename Number>
+inline
+const internal::MatrixFreeFunctions::FaceToCellTopology<VectorizedArray<Number>::n_array_elements> &
+MatrixFree<dim,Number>::get_face_info (const unsigned int macro_face) const
+{
+  AssertIndexRange(macro_face, face_info.faces.size());
+  return face_info.faces[macro_face];
+}
+
+
+
 template <int dim, typename Number>
 inline
 const Quadrature<dim> &
@@ -1492,6 +2136,47 @@ MatrixFree<dim,Number>::get_face_quadrature (const unsigned int quad_index,
 
 
 
+template <int dim, typename Number>
+inline
+unsigned int
+MatrixFree<dim,Number>::get_cell_category (const unsigned int macro_cell) const
+{
+  AssertIndexRange(0, dof_info.size());
+  AssertIndexRange(macro_cell, dof_info[0].cell_active_fe_index.size());
+  if (dof_info[0].cell_active_fe_index.empty())
+    return 0;
+  else
+    return dof_info[0].cell_active_fe_index[macro_cell];
+}
+
+
+
+template <int dim, typename Number>
+inline
+std::pair<unsigned int,unsigned int>
+MatrixFree<dim,Number>::get_face_category (const unsigned int macro_face) const
+{
+  AssertIndexRange(macro_face, face_info.faces.size());
+  if (dof_info[0].cell_active_fe_index.empty())
+    return std::make_pair(0U, 0U);
+
+  std::pair<unsigned int,unsigned int> result;
+  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements &&
+       face_info.faces[macro_face].cells_interior[v] != numbers::invalid_unsigned_int; ++v)
+    result.first = std::max(result.first,
+                            dof_info[0].cell_active_fe_index[face_info.faces[macro_face].cells_interior[v]]);
+  if (face_info.faces[macro_face].cells_exterior[0] != numbers::invalid_unsigned_int)
+    for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements &&
+         face_info.faces[macro_face].cells_exterior[v] != numbers::invalid_unsigned_int; ++v)
+      result.second = std::max(result.first,
+                               dof_info[0].cell_active_fe_index[face_info.faces[macro_face].cells_exterior[v]]);
+  else
+    result.second = numbers::invalid_unsigned_int;
+  return result;
+}
+
+
+
 template <int dim, typename Number>
 inline
 bool
@@ -1548,6 +2233,40 @@ MatrixFree<dim,Number>::release_scratch_data(const AlignedVector<VectorizedArray
 
 
 
+template <int dim,typename Number>
+AlignedVector<Number> *
+MatrixFree<dim,Number>::acquire_scratch_data_non_threadsafe() const
+{
+  for (typename std::list<std::pair<bool, AlignedVector<Number> > >::iterator
+       it=scratch_pad_non_threadsafe.begin(); it!=scratch_pad_non_threadsafe.end(); ++it)
+    if (it->first == false)
+      {
+        it->first = true;
+        return &it->second;
+      }
+  scratch_pad_non_threadsafe.push_front(std::make_pair(true,AlignedVector<Number>()));
+  return &scratch_pad_non_threadsafe.front().second;
+}
+
+
+
+template <int dim, typename Number>
+void
+MatrixFree<dim,Number>::release_scratch_data_non_threadsafe(const AlignedVector<Number> *scratch) const
+{
+  for (typename std::list<std::pair<bool, AlignedVector<Number> > >::iterator
+       it=scratch_pad_non_threadsafe.begin(); it!=scratch_pad_non_threadsafe.end(); ++it)
+    if (&it->second == scratch)
+      {
+        Assert(it->first == true, ExcInternalError());
+        it->first = false;
+        return;
+      }
+  AssertThrow(false, ExcMessage("Tried to release invalid scratch pad"));
+}
+
+
+
 // ------------------------------ reinit functions ---------------------------
 
 namespace internal
@@ -1748,125 +2467,499 @@ reinit(const Mapping<dim>                                    &mapping,
 
 
 
-// ------------------------------ implementation of cell_loop ---------------
+// ------------------------------ implementation of loops --------------------
 
 // internal helper functions that define how to call MPI data exchange
 // functions: for generic vectors, do nothing at all. For distributed vectors,
 // call update_ghost_values_start function and so on. If we have collections
 // of vectors, just do the individual functions of the components. In order to
-// keep ghost values consistent (whether we are in read or write mode). the
-// whole situation is a bit complicated by the fact that we need to treat
-// block vectors differently, which use some additional helper functions to
-// select the blocks and template magic.
+// keep ghost values consistent (whether we are in read or write mode), we
+// also reset the values at the end. the whole situation is a bit complicated
+// by the fact that we need to treat block vectors differently, which use some
+// additional helper functions to select the blocks and template magic.
 namespace internal
 {
+  template <int dim, typename Number>
+  struct VectorDataExchange
+  {
+    // An arbitrary shift for communication to reduce the risk for accidental
+    // interaction with other open communications that a user program might
+    // set up
+    static constexpr unsigned int channel_shift = 103;
+
+    VectorDataExchange (const dealii::MatrixFree<dim,Number> &matrix_free,
+                        const typename dealii::MatrixFree<dim,Number>::DataAccessOnFaces vector_face_access,
+                        const unsigned int n_components)
+      :
+      matrix_free (matrix_free),
+      vector_face_access (matrix_free.get_task_info().face_partition_data.empty() ?
+                          dealii::MatrixFree<dim,Number>::DataAccessOnFaces::unspecified :
+                          vector_face_access),
+      ghosts_were_set (false)
+#ifdef DEAL_II_WITH_MPI
+      , tmp_data(n_components),
+      requests(n_components)
+#endif
+    {
+      (void)n_components;
+      if (this->vector_face_access != dealii::MatrixFree<dim,Number>::DataAccessOnFaces::unspecified)
+        for (unsigned int c=0; c<matrix_free.n_components(); ++c)
+          AssertDimension(matrix_free.get_dof_info(c).vector_partitioner_face_variants.size(), 3);
+    }
+
+    ~VectorDataExchange ()
+    {
+#ifdef DEAL_II_WITH_MPI
+      for (unsigned int i=0; i<tmp_data.size(); ++i)
+        if (tmp_data[i] != nullptr)
+          matrix_free.release_scratch_data_non_threadsafe(tmp_data[i]);
+#endif
+    }
+
+    unsigned int find_vector_in_mf (const LinearAlgebra::distributed::Vector<Number> &vec,
+                                    const bool check_global_compatibility = true) const
+    {
+      unsigned int mf_component = numbers::invalid_unsigned_int;
+      (void)check_global_compatibility;
+      for (unsigned int c=0; c<matrix_free.n_components(); ++c)
+        if (
+#ifdef DEBUG
+          check_global_compatibility
+          ?
+          vec.get_partitioner()->is_globally_compatible(*matrix_free.get_dof_info(c).vector_partitioner)
+          :
+#endif
+          vec.get_partitioner()->is_compatible(*matrix_free.get_dof_info(c).vector_partitioner))
+          {
+            mf_component = c;
+            break;
+          }
+      return mf_component;
+    }
+
+    const Utilities::MPI::Partitioner &
+    get_partitioner(const unsigned int mf_component) const
+    {
+      AssertDimension(matrix_free.get_dof_info(mf_component).vector_partitioner_face_variants.size(),3);
+      if (vector_face_access == dealii::MatrixFree<dim,Number>::DataAccessOnFaces::none)
+        return *matrix_free.get_dof_info(mf_component).vector_partitioner_face_variants[0];
+      else if (vector_face_access == dealii::MatrixFree<dim,Number>::DataAccessOnFaces::values)
+        return *matrix_free.get_dof_info(mf_component).vector_partitioner_face_variants[1];
+      else
+        return *matrix_free.get_dof_info(mf_component).vector_partitioner_face_variants[2];
+    }
+
+    void update_ghost_values_start(const unsigned int component_in_block_vector,
+                                   const LinearAlgebra::distributed::Vector<Number> &vec)
+    {
+      (void)component_in_block_vector;
+      bool ghosts_set = vec.has_ghost_elements();
+      if (ghosts_set)
+        ghosts_were_set = true;
+      if (vector_face_access == dealii::MatrixFree<dim,Number>::DataAccessOnFaces::unspecified ||
+          vec.size() == 0)
+        vec.update_ghost_values_start(component_in_block_vector + channel_shift);
+      else
+        {
+#ifdef DEAL_II_WITH_MPI
+          const unsigned int mf_component = find_vector_in_mf(vec);
+          if (&get_partitioner(mf_component) == matrix_free.get_dof_info(mf_component)
+              .vector_partitioner.get())
+            {
+              vec.update_ghost_values_start(component_in_block_vector + channel_shift);
+              return;
+            }
+
+          const Utilities::MPI::Partitioner &part = get_partitioner(mf_component);
+          if (part.n_ghost_indices()==0 && part.n_import_indices()==0)
+            return;
+
+          tmp_data[component_in_block_vector] = matrix_free.acquire_scratch_data_non_threadsafe();
+          tmp_data[component_in_block_vector]->resize_fast(part.n_import_indices());
+          AssertDimension(requests.size(), tmp_data.size());
+
+          part.export_to_ghosted_array_start
+          (component_in_block_vector+channel_shift,
+           ArrayView<const Number>(vec.begin(), part.local_size()),
+           ArrayView<Number>(tmp_data[component_in_block_vector]->begin(),
+                             part.n_import_indices()),
+           ArrayView<Number>(const_cast<Number *>(vec.begin()) +
+                             vec.get_partitioner()->local_size(),
+                             vec.get_partitioner()->n_ghost_indices()),
+           this->requests[component_in_block_vector]);
+#endif
+        }
+    }
+
+    void update_ghost_values_finish (const unsigned int component_in_block_vector,
+                                     const LinearAlgebra::distributed::Vector<Number> &vec)
+    {
+      (void)component_in_block_vector;
+      if (vector_face_access == dealii::MatrixFree<dim,Number>::DataAccessOnFaces::unspecified ||
+          vec.size() == 0)
+        vec.update_ghost_values_finish();
+      else
+        {
+#ifdef DEAL_II_WITH_MPI
+
+          AssertIndexRange(component_in_block_vector, tmp_data.size());
+          AssertDimension(requests.size(), tmp_data.size());
+
+          const unsigned int mf_component = find_vector_in_mf(vec);
+          const Utilities::MPI::Partitioner &part = get_partitioner(mf_component);
+          if (&part == matrix_free.get_dof_info(mf_component).vector_partitioner.get())
+            {
+              vec.update_ghost_values_finish();
+              return;
+            }
+
+          if (part.n_ghost_indices()==0 && part.n_import_indices()==0)
+            return;
+
+          part.export_to_ghosted_array_finish
+          (ArrayView<Number>(const_cast<Number *>(vec.begin()) +
+                             vec.get_partitioner()->local_size(),
+                             vec.get_partitioner()->n_ghost_indices()),
+           this->requests[component_in_block_vector]);
+
+          matrix_free.release_scratch_data_non_threadsafe(tmp_data[component_in_block_vector]);
+          tmp_data[component_in_block_vector] = 0;
+#endif
+        }
+    }
+
+    void compress_start(const unsigned int component_in_block_vector,
+                        LinearAlgebra::distributed::Vector<Number> &vec)
+    {
+      (void)component_in_block_vector;
+      Assert(vec.has_ghost_elements() == false, ExcNotImplemented());
+      if (vector_face_access == dealii::MatrixFree<dim,Number>::DataAccessOnFaces::unspecified ||
+          vec.size() == 0)
+        vec.compress_start(component_in_block_vector + channel_shift);
+      else
+        {
+#ifdef DEAL_II_WITH_MPI
+
+          const unsigned int mf_component = find_vector_in_mf(vec);
+          const Utilities::MPI::Partitioner &part = get_partitioner(mf_component);
+          if (&part == matrix_free.get_dof_info(mf_component).vector_partitioner.get())
+            {
+              vec.compress_start(component_in_block_vector + channel_shift);
+              return;
+            }
+
+          if (part.n_ghost_indices()==0 && part.n_import_indices()==0)
+            return;
+
+          tmp_data[component_in_block_vector] = matrix_free.acquire_scratch_data_non_threadsafe();
+          tmp_data[component_in_block_vector]->resize_fast(part.n_import_indices());
+          AssertDimension(requests.size(), tmp_data.size());
+
+          part.import_from_ghosted_array_start
+          (dealii::VectorOperation::add,
+           component_in_block_vector+channel_shift,
+           ArrayView<Number>(vec.begin()+vec.get_partitioner()->local_size(),
+                             vec.get_partitioner()->n_ghost_indices()),
+           ArrayView<Number>(tmp_data[component_in_block_vector]->begin(),
+                             part.n_import_indices()),
+           this->requests[component_in_block_vector]);
+#endif
+        }
+    }
+
+    void compress_finish (const unsigned int component_in_block_vector,
+                          LinearAlgebra::distributed::Vector<Number> &vec)
+    {
+      (void)component_in_block_vector;
+      if (vector_face_access == dealii::MatrixFree<dim,Number>::DataAccessOnFaces::unspecified ||
+          vec.size() == 0)
+        vec.compress_finish(dealii::VectorOperation::add);
+      else
+        {
+#ifdef DEAL_II_WITH_MPI
+          AssertIndexRange(component_in_block_vector, tmp_data.size());
+          AssertDimension(requests.size(), tmp_data.size());
+
+          const unsigned int mf_component = find_vector_in_mf(vec);
+
+          const Utilities::MPI::Partitioner &part = get_partitioner(mf_component);
+          if (&part == matrix_free.get_dof_info(mf_component).vector_partitioner.get())
+            {
+              vec.compress_finish(dealii::VectorOperation::add);
+              return;
+            }
+
+          if (part.n_ghost_indices()==0 && part.n_import_indices()==0)
+            return;
+
+          part.import_from_ghosted_array_finish
+          (VectorOperation::add,
+           ArrayView<const Number>(tmp_data[component_in_block_vector]->begin(),
+                                   part.n_import_indices()),
+           ArrayView<Number>(vec.begin(), part.local_size()),
+           ArrayView<Number>(vec.begin()+vec.get_partitioner()->local_size(),
+                             vec.get_partitioner()->n_ghost_indices()),
+           this->requests[component_in_block_vector]);
+
+          matrix_free.release_scratch_data_non_threadsafe(tmp_data[component_in_block_vector]);
+          tmp_data[component_in_block_vector] = 0;
+#endif
+        }
+    }
+
+    void reset_ghost_values(const LinearAlgebra::distributed::Vector<Number> &vec) const
+    {
+      if (ghosts_were_set == true)
+        return;
+
+      if (vector_face_access == dealii::MatrixFree<dim,Number>::DataAccessOnFaces::unspecified ||
+          vec.size() == 0)
+        const_cast<LinearAlgebra::distributed::Vector<Number> &>(vec).zero_out_ghosts();
+      else
+        {
+#ifdef DEAL_II_WITH_MPI
+          AssertDimension(requests.size(), tmp_data.size());
+
+          const unsigned int mf_component = find_vector_in_mf(vec);
+          const Utilities::MPI::Partitioner &part = get_partitioner(mf_component);
+          if (&part == matrix_free.get_dof_info(mf_component).vector_partitioner.get())
+            const_cast<LinearAlgebra::distributed::Vector<Number> &>(vec).zero_out_ghosts();
+          else if (part.n_ghost_indices() > 0)
+            {
+              for (std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+                   my_ghosts = part.ghost_indices_within_larger_ghost_set().begin();
+                   my_ghosts != part.ghost_indices_within_larger_ghost_set().end();
+                   ++my_ghosts)
+                for (unsigned int j=my_ghosts->first; j<my_ghosts->second; j++)
+                  {
+                    const_cast<LinearAlgebra::distributed::Vector<Number> &>(vec)
+                    .local_element(j+part.local_size()) = 0.;
+                  }
+            }
+#endif
+        }
+    }
+
+    void zero_vector_region(const unsigned int range_index,
+                            LinearAlgebra::distributed::Vector<Number> &vec) const
+    {
+      if (range_index == numbers::invalid_unsigned_int)
+        vec = 0;
+      else
+        {
+          const unsigned int mf_component = find_vector_in_mf(vec, false);
+          const internal::MatrixFreeFunctions::DoFInfo &dof_info =
+            matrix_free.get_dof_info(mf_component);
+          Assert(dof_info.vector_zero_range_list_index.empty() == false,
+                 ExcNotInitialized());
+
+          Assert(vec.partitioners_are_compatible(*dof_info.vector_partitioner),
+                 ExcInternalError());
+          AssertIndexRange(range_index, dof_info.vector_zero_range_list_index.size()-1);
+          for (unsigned int id=dof_info.vector_zero_range_list_index[range_index];
+               id != dof_info.vector_zero_range_list_index[range_index+1]; ++id)
+            {
+              const unsigned int start_pos = dof_info.vector_zero_range_list[id]*
+                                             internal::MatrixFreeFunctions::DoFInfo::chunk_size_zero_vector;
+              const unsigned int end_pos = std::min((dof_info.vector_zero_range_list[id]+1)*
+                                                    internal::MatrixFreeFunctions::DoFInfo::chunk_size_zero_vector,
+                                                    dof_info.vector_partitioner->local_size()+
+                                                    dof_info.vector_partitioner->n_ghost_indices());
+              std::memset(vec.begin()+start_pos, 0, (end_pos-start_pos)*sizeof(Number));
+            }
+        }
+    }
+
+    const dealii::MatrixFree<dim,Number> &matrix_free;
+    const typename dealii::MatrixFree<dim,Number>::DataAccessOnFaces vector_face_access;
+    bool ghosts_were_set;
+#ifdef DEAL_II_WITH_MPI
+    std::vector<AlignedVector<Number> *> tmp_data;
+    std::vector<std::vector<MPI_Request> > requests;
+#endif
+  };
+
   template <typename VectorStruct>
-  bool update_ghost_values_start_block (const VectorStruct &vec,
-                                        const unsigned int channel,
-                                        std::integral_constant<bool, true>);
-  template <typename VectorStruct>
-  void reset_ghost_values_block (const VectorStruct &vec,
-                                 const bool          zero_out_ghosts,
-                                 std::integral_constant<bool, true>);
+  unsigned int n_components (const VectorStruct &vec);
+
   template <typename VectorStruct>
-  void update_ghost_values_finish_block (const VectorStruct &vec,
-                                         std::integral_constant<bool, true>);
+  unsigned int n_components_block (const VectorStruct &vec,
+                                   std::integral_constant<bool,true>)
+  {
+    unsigned int components = 0;
+    for (unsigned int bl=0; bl<vec.n_blocks(); ++bl)
+      components += n_components(vec.block(bl));
+    return components;
+  }
+
   template <typename VectorStruct>
-  void compress_start_block (VectorStruct       &vec,
-                             const unsigned int channel,
-                             std::integral_constant<bool, true>);
+  unsigned int n_components_block (const VectorStruct &,
+                                   std::integral_constant<bool,false>)
+  {
+    return 1;
+  }
+
   template <typename VectorStruct>
-  void compress_finish_block (VectorStruct &vec,
-                              std::integral_constant<bool, true>);
+  unsigned int n_components (const VectorStruct &vec)
+  {
+    return n_components_block(vec, std::integral_constant<bool,IsBlockVector<VectorStruct>::value>());
+  }
 
   template <typename VectorStruct>
-  bool update_ghost_values_start_block (const VectorStruct &,
-                                        const unsigned int,
-                                        std::integral_constant<bool, false>)
+  inline
+  unsigned int n_components (const std::vector<VectorStruct> &vec)
   {
-    return false;
+    unsigned int components = 0;
+    for (unsigned int comp=0; comp<vec.size(); comp++)
+      components += n_components_block(vec[comp], std::integral_constant<bool,IsBlockVector<VectorStruct>::value>());
+    return components;
   }
+
   template <typename VectorStruct>
+  inline
+  unsigned int n_components (const std::vector<VectorStruct *> &vec)
+  {
+    unsigned int components = 0;
+    for (unsigned int comp=0; comp<vec.size(); comp++)
+      components += n_components_block(*vec[comp], std::integral_constant<bool,IsBlockVector<VectorStruct>::value>());
+    return components;
+  }
+
+  template <int dim, typename VectorStruct, typename Number>
+  void update_ghost_values_start_block (const VectorStruct &vec,
+                                        const unsigned int channel,
+                                        std::integral_constant<bool,true>,
+                                        VectorDataExchange<dim,Number> &exchanger);
+  template <int dim, typename VectorStruct, typename Number>
+  void reset_ghost_values_block (const VectorStruct &vec,
+                                 std::integral_constant<bool,true>,
+                                 VectorDataExchange<dim,Number> &exchanger);
+  template <int dim, typename VectorStruct, typename Number>
+  void update_ghost_values_finish_block (const VectorStruct &vec,
+                                         const unsigned int channel,
+                                         std::integral_constant<bool,true>,
+                                         VectorDataExchange<dim,Number> &exchanger);
+  template <int dim, typename VectorStruct, typename Number>
+  void compress_start_block (const VectorStruct &vec,
+                             const unsigned int channel,
+                             std::integral_constant<bool,true>,
+                             VectorDataExchange<dim,Number> &exchanger);
+  template <int dim, typename VectorStruct, typename Number>
+  void compress_finish_block (const VectorStruct &vec,
+                              const unsigned int channel,
+                              std::integral_constant<bool,true>,
+                              VectorDataExchange<dim,Number> &exchanger);
+  template <int dim, typename VectorStruct, typename Number>
+  void zero_vector_region_block (const unsigned int range_index,
+                                 VectorStruct &,
+                                 std::integral_constant<bool,true>,
+                                 VectorDataExchange<dim,Number> &);
+
+  template <int dim, typename VectorStruct, typename Number>
+  void update_ghost_values_start_block (const VectorStruct &,
+                                        const unsigned int ,
+                                        std::integral_constant<bool,false>,
+                                        VectorDataExchange<dim,Number> &)
+  {}
+  template <int dim, typename VectorStruct, typename Number>
   void reset_ghost_values_block (const VectorStruct &,
-                                 const bool,
-                                 std::integral_constant<bool, false>)
+                                 std::integral_constant<bool,false>,
+                                 VectorDataExchange<dim,Number> &)
   {}
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   void update_ghost_values_finish_block (const VectorStruct &,
-                                         std::integral_constant<bool, false>)
+                                         const unsigned int ,
+                                         std::integral_constant<bool,false>,
+                                         VectorDataExchange<dim,Number> &)
   {}
-  template <typename VectorStruct>
-  void compress_start_block (VectorStruct &,
-                             const unsigned int,
-                             std::integral_constant<bool, false>)
+  template <int dim, typename VectorStruct, typename Number>
+  void compress_start_block (const VectorStruct &,
+                             const unsigned int ,
+                             std::integral_constant<bool,false>,
+                             VectorDataExchange<dim,Number> &)
   {}
-  template <typename VectorStruct>
-  void compress_finish_block (VectorStruct &,
-                              std::integral_constant<bool, false>)
+  template <int dim, typename VectorStruct, typename Number>
+  void compress_finish_block (const VectorStruct &,
+                              const unsigned int ,
+                              std::integral_constant<bool,false>,
+                              VectorDataExchange<dim,Number> &)
   {}
+  template <int dim, typename VectorStruct, typename Number>
+  void zero_vector_region_block (const unsigned int range_index,
+                                 VectorStruct &vec,
+                                 std::integral_constant<bool,false>,
+                                 VectorDataExchange<dim,Number> &)
+  {
+    if (range_index == 0 || range_index == numbers::invalid_unsigned_int)
+      vec = 0;
+  }
 
 
 
-  // returns true if the vector was in a state without ghost values before,
-  // i.e., we need to zero out ghosts in the very end
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  bool update_ghost_values_start (const VectorStruct &vec,
+  void update_ghost_values_start (const VectorStruct &vec,
+                                  VectorDataExchange<dim,Number> &exchanger,
                                   const unsigned int channel = 0)
   {
-    return
-      update_ghost_values_start_block(vec, channel,
-                                      std::integral_constant<bool, IsBlockVector<VectorStruct>::value>());
+    update_ghost_values_start_block(vec, channel,
+                                    std::integral_constant<bool,
+                                    IsBlockVector<VectorStruct>::value>(),
+                                    exchanger);
   }
 
 
 
-  template <typename Number>
+  template <int dim, typename Number, typename Number2>
   inline
-  bool update_ghost_values_start (const LinearAlgebra::distributed::Vector<Number> &vec,
-                                  const unsigned int                  channel = 0)
+  void update_ghost_values_start (const LinearAlgebra::distributed::Vector<Number> &vec,
+                                  VectorDataExchange<dim,Number2> &exchanger,
+                                  const unsigned int channel = 0)
   {
-    bool return_value = !vec.has_ghost_elements();
-    vec.update_ghost_values_start(channel);
-    return return_value;
+    exchanger.update_ghost_values_start(channel, vec);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  bool update_ghost_values_start (const std::vector<VectorStruct> &vec)
+  void update_ghost_values_start (const std::vector<VectorStruct> &vec,
+                                  VectorDataExchange<dim,Number> &exchanger)
   {
-    bool return_value = false;
+    unsigned int component_index = 0;
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      return_value = update_ghost_values_start(vec[comp], comp);
-    return return_value;
+      {
+        update_ghost_values_start(vec[comp], exchanger, component_index);
+        component_index += n_components(vec[comp]);
+      }
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  bool update_ghost_values_start (const std::vector<VectorStruct *> &vec)
+  void update_ghost_values_start (const std::vector<VectorStruct *> &vec,
+                                  VectorDataExchange<dim,Number> &exchanger)
   {
-    bool return_value = false;
+    unsigned int component_index = 0;
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      return_value = update_ghost_values_start(*vec[comp], comp);
-    return return_value;
+      {
+        update_ghost_values_start(*vec[comp], exchanger, component_index);
+        component_index += n_components(*vec[comp]);
+      }
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  bool update_ghost_values_start_block (const VectorStruct &vec,
+  void update_ghost_values_start_block (const VectorStruct &vec,
                                         const unsigned int channel,
-                                        std::integral_constant<bool, true>)
+                                        std::integral_constant<bool,true>,
+                                        VectorDataExchange<dim,Number> &exchanger)
   {
-    bool return_value = false;
     for (unsigned int i=0; i<vec.n_blocks(); ++i)
-      return_value = update_ghost_values_start(vec.block(i), channel+509*i);
-    return return_value;
+      update_ghost_values_start(vec.block(i), exchanger, channel+i);
   }
 
 
@@ -1874,215 +2967,328 @@ namespace internal
   // if the input vector did not have ghosts imported, clear them here again
   // in order to avoid subsequent operations e.g. in linear solvers to work
   // with ghosts all the time
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
   void reset_ghost_values (const VectorStruct &vec,
-                           const bool          zero_out_ghosts)
+                           VectorDataExchange<dim,Number> &exchanger)
   {
-    reset_ghost_values_block(vec, zero_out_ghosts,
-                             std::integral_constant<bool, IsBlockVector<VectorStruct>::value>());
+    reset_ghost_values_block(vec,
+                             std::integral_constant<bool,
+                             IsBlockVector<VectorStruct>::value>(),
+                             exchanger);
   }
 
 
 
-  template <typename Number>
+  template <int dim, typename Number, typename Number2>
   inline
   void reset_ghost_values (const LinearAlgebra::distributed::Vector<Number> &vec,
-                           const bool zero_out_ghosts)
+                           VectorDataExchange<dim,Number2> &exchanger)
   {
-    if (zero_out_ghosts)
-      const_cast<LinearAlgebra::distributed::Vector<Number>&>(vec).zero_out_ghosts();
+    exchanger.reset_ghost_values(vec);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
   void reset_ghost_values (const std::vector<VectorStruct> &vec,
-                           const bool zero_out_ghosts)
+                           VectorDataExchange<dim,Number> &exchanger)
   {
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      reset_ghost_values(vec[comp], zero_out_ghosts);
+      reset_ghost_values(vec[comp], exchanger);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
   void reset_ghost_values (const std::vector<VectorStruct *> &vec,
-                           const bool zero_out_ghosts)
+                           VectorDataExchange<dim,Number> &exchanger)
   {
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      reset_ghost_values(*vec[comp], zero_out_ghosts);
+      reset_ghost_values(*vec[comp], exchanger);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
   void reset_ghost_values_block (const VectorStruct &vec,
-                                 const bool          zero_out_ghosts,
-                                 std::integral_constant<bool, true>)
+                                 std::integral_constant<bool,true>,
+                                 VectorDataExchange<dim,Number> &exchanger)
   {
     for (unsigned int i=0; i<vec.n_blocks(); ++i)
-      reset_ghost_values(vec.block(i), zero_out_ghosts);
+      reset_ghost_values(vec.block(i), exchanger);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  void update_ghost_values_finish (const VectorStruct &vec)
+  void update_ghost_values_finish (const VectorStruct &vec,
+                                   VectorDataExchange<dim,Number> &exchanger,
+                                   const unsigned int channel = 0)
   {
-    update_ghost_values_finish_block(vec,
-                                     std::integral_constant<bool, IsBlockVector<VectorStruct>::value>());
+    update_ghost_values_finish_block(vec, channel,
+                                     std::integral_constant<bool,
+                                     IsBlockVector<VectorStruct>::value>(),
+                                     exchanger);
   }
 
 
 
-  template <typename Number>
+  template <int dim, typename Number, typename Number2>
   inline
-  void update_ghost_values_finish (const LinearAlgebra::distributed::Vector<Number> &vec)
+  void update_ghost_values_finish (const LinearAlgebra::distributed::Vector<Number> &vec,
+                                   VectorDataExchange<dim,Number2> &exchanger,
+                                   const unsigned int channel = 0)
   {
-    vec.update_ghost_values_finish();
+    exchanger.update_ghost_values_finish(channel, vec);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  void update_ghost_values_finish (const std::vector<VectorStruct> &vec)
+  void update_ghost_values_finish (const std::vector<VectorStruct> &vec,
+                                   VectorDataExchange<dim,Number> &exchanger)
   {
+    unsigned int component_index = 0;
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      update_ghost_values_finish(vec[comp]);
+      {
+        update_ghost_values_finish(vec[comp], exchanger, component_index);
+        component_index += n_components(vec[comp]);
+      }
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  void update_ghost_values_finish (const std::vector<VectorStruct *> &vec)
+  void update_ghost_values_finish (const std::vector<VectorStruct *> &vec,
+                                   VectorDataExchange<dim,Number> &exchanger)
   {
+    unsigned int component_index = 0;
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      update_ghost_values_finish(*vec[comp]);
+      {
+        update_ghost_values_finish(*vec[comp], exchanger, component_index);
+        component_index += n_components(*vec[comp]);
+      }
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
   void update_ghost_values_finish_block (const VectorStruct &vec,
-                                         std::integral_constant<bool, true>)
+                                         const unsigned int channel,
+                                         std::integral_constant<bool,true>,
+                                         VectorDataExchange<dim,Number> &exchanger)
   {
     for (unsigned int i=0; i<vec.n_blocks(); ++i)
-      update_ghost_values_finish(vec.block(i));
+      update_ghost_values_finish(vec.block(i), exchanger, channel+i);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
   void compress_start (VectorStruct &vec,
+                       VectorDataExchange<dim, Number> &exchanger,
                        const unsigned int channel = 0)
   {
     compress_start_block (vec, channel,
-                          std::integral_constant<bool, IsBlockVector<VectorStruct>::value>());
+                          std::integral_constant<bool,
+                          IsBlockVector<VectorStruct>::value>(),
+                          exchanger);
   }
 
 
 
-  template <typename Number>
+  template <int dim, typename Number, typename Number2>
   inline
   void compress_start (LinearAlgebra::distributed::Vector<Number> &vec,
+                       VectorDataExchange<dim,Number2> &exchanger,
                        const unsigned int           channel = 0)
   {
-    vec.compress_start(channel);
+    exchanger.compress_start(channel, vec);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  void compress_start (std::vector<VectorStruct> &vec)
+  void compress_start (std::vector<VectorStruct> &vec,
+                       VectorDataExchange<dim, Number> &exchanger)
   {
+    unsigned int component_index = 0;
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      compress_start (vec[comp], comp);
+      {
+        compress_start(vec[comp], exchanger, component_index);
+        component_index += n_components(vec[comp]);
+      }
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  void compress_start (std::vector<VectorStruct *> &vec)
+  void compress_start (std::vector<VectorStruct *> &vec,
+                       VectorDataExchange<dim, Number> &exchanger)
   {
+    unsigned int component_index = 0;
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      compress_start (*vec[comp], comp);
+      {
+        compress_start(*vec[comp], exchanger, component_index);
+        component_index += n_components(*vec[comp]);
+      }
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
   void compress_start_block (VectorStruct      &vec,
                              const unsigned int channel,
-                             std::integral_constant<bool, true>)
+                             std::integral_constant<bool,true>,
+                             VectorDataExchange<dim, Number> &exchanger)
   {
     for (unsigned int i=0; i<vec.n_blocks(); ++i)
-      compress_start(vec.block(i), channel + 500*i);
+      compress_start(vec.block(i), exchanger, channel+i);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  void compress_finish (VectorStruct &vec)
+  void compress_finish (VectorStruct &vec,
+                        VectorDataExchange<dim, Number> &exchanger,
+                        const unsigned int channel = 0)
   {
-    compress_finish_block(vec,
-                          std::integral_constant<bool, IsBlockVector<VectorStruct>::value>());
+    compress_finish_block(vec, channel,
+                          std::integral_constant<bool,
+                          IsBlockVector<VectorStruct>::value>(),
+                          exchanger);
   }
 
 
 
-  template <typename Number>
+  template <int dim, typename Number, typename Number2>
   inline
-  void compress_finish (LinearAlgebra::distributed::Vector<Number> &vec)
+  void compress_finish (LinearAlgebra::distributed::Vector<Number> &vec,
+                        VectorDataExchange<dim, Number2> &exchanger,
+                        const unsigned int channel = 0)
   {
-    vec.compress_finish(::dealii::VectorOperation::add);
+    exchanger.compress_finish(channel, vec);
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  void compress_finish (std::vector<VectorStruct> &vec)
+  void compress_finish (std::vector<VectorStruct> &vec,
+                        VectorDataExchange<dim, Number> &exchanger)
   {
+    unsigned int component_index = 0;
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      compress_finish(vec[comp]);
+      {
+        compress_finish(vec[comp], exchanger, component_index);
+        component_index += n_components(vec[comp]);
+      }
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
-  void compress_finish (std::vector<VectorStruct *> &vec)
+  void compress_finish (std::vector<VectorStruct *> &vec,
+                        VectorDataExchange<dim, Number> &exchanger)
   {
+    unsigned int component_index = 0;
     for (unsigned int comp=0; comp<vec.size(); comp++)
-      compress_finish(*vec[comp]);
+      {
+        compress_finish(*vec[comp], exchanger, component_index);
+        component_index += n_components(*vec[comp]);
+      }
   }
 
 
 
-  template <typename VectorStruct>
+  template <int dim, typename VectorStruct, typename Number>
   inline
   void compress_finish_block (VectorStruct &vec,
-                              std::integral_constant<bool, true>)
+                              const unsigned int channel,
+                              std::integral_constant<bool,true>,
+                              VectorDataExchange<dim, Number> &exchanger)
   {
     for (unsigned int i=0; i<vec.n_blocks(); ++i)
-      compress_finish(vec.block(i));
+      compress_finish(vec.block(i), exchanger, channel+i);
+  }
+
+
+
+  template <int dim, typename VectorStruct, typename Number>
+  inline
+  void zero_vector_region (const unsigned int range_index,
+                           VectorStruct &vec,
+                           VectorDataExchange<dim, Number> &exchanger)
+  {
+    zero_vector_region_block(range_index, vec,
+                             std::integral_constant<bool,
+                             IsBlockVector<VectorStruct>::value>(),
+                             exchanger);
+  }
+
+
+
+  template <int dim, typename Number, typename Number2>
+  inline
+  void zero_vector_region (const unsigned int range_index,
+                           LinearAlgebra::distributed::Vector<Number> &vec,
+                           VectorDataExchange<dim, Number2> &exchanger)
+  {
+    exchanger.zero_vector_region(range_index, vec);
+  }
+
+
+
+  template <int dim, typename VectorStruct, typename Number>
+  inline
+  void zero_vector_region (const unsigned int range_index,
+                           std::vector<VectorStruct> &vec,
+                           VectorDataExchange<dim, Number> &exchanger)
+  {
+    for (unsigned int comp=0; comp<vec.size(); comp++)
+      zero_vector_region(range_index, vec[comp], exchanger);
+  }
+
+
+
+  template <int dim, typename VectorStruct, typename Number>
+  inline
+  void zero_vector_region (const unsigned int range_index,
+                           std::vector<VectorStruct *> &vec,
+                           VectorDataExchange<dim, Number> &exchanger)
+  {
+    for (unsigned int comp=0; comp<vec.size(); comp++)
+      zero_vector_region(range_index, *vec[comp], exchanger);
   }
 
 
 
+  template <int dim, typename VectorStruct, typename Number>
+  inline
+  void zero_vector_region_block (const unsigned int range_index,
+                                 VectorStruct &vec,
+                                 std::integral_constant<bool,true>,
+                                 VectorDataExchange<dim, Number> &exchanger)
+  {
+    for (unsigned int i=0; i<vec.n_blocks(); ++i)
+      zero_vector_region(range_index, vec.block(i), exchanger);
+  }
+
+
 
   namespace MatrixFreeFunctions
   {
@@ -2132,7 +3338,11 @@ namespace internal
               const Container &container,
               function_type cell_function,
               function_type face_function,
-              function_type boundary_function)
+              function_type boundary_function,
+              const typename MF::DataAccessOnFaces src_vector_face_access =
+                MF::DataAccessOnFaces::none,
+              const typename MF::DataAccessOnFaces dst_vector_face_access =
+                MF::DataAccessOnFaces::none)
       :
       matrix_free (matrix_free),
       container (const_cast<Container &>(container)),
@@ -2141,13 +3351,16 @@ namespace internal
       boundary_function (boundary_function),
       src (src),
       dst (dst),
-      ghosts_were_set(false),
+      src_data_exchanger (matrix_free, src_vector_face_access,
+                          n_components(src)),
+      dst_data_exchanger (matrix_free, dst_vector_face_access,
+                          n_components(dst)),
       src_and_dst_are_same (PointerComparison::equal(&src, &dst)),
-      zero_dst_vector_setting(zero_dst_vector_setting  &&!src_and_dst_are_same)
+      zero_dst_vector_setting(zero_dst_vector_setting && !src_and_dst_are_same)
     {}
 
     // Runs the cell work. If no function is given, nothing is done
-    virtual void cell(const std::pair<unsigned int,unsigned int> &cell_range) override
+    virtual void cell(const std::pair<unsigned int,unsigned int> &cell_range)
     {
       if (cell_function != nullptr && cell_range.second > cell_range.first)
         (container.*cell_function)(matrix_free, this->dst, this->src, cell_range);
@@ -2155,7 +3368,7 @@ namespace internal
 
     // Runs the assembler on interior faces. If no function is given, nothing
     // is done
-    virtual void face(const std::pair<unsigned int,unsigned int> &face_range) override
+    virtual void face(const std::pair<unsigned int,unsigned int> &face_range)
     {
       if (face_function != nullptr && face_range.second > face_range.first)
         (container.*face_function)(matrix_free, this->dst, this->src, face_range);
@@ -2163,7 +3376,7 @@ namespace internal
 
     // Runs the assembler on boundary faces. If no function is given, nothing
     // is done
-    virtual void boundary(const std::pair<unsigned int,unsigned int> &face_range) override
+    virtual void boundary(const std::pair<unsigned int,unsigned int> &face_range)
     {
       if (boundary_function != nullptr && face_range.second > face_range.first)
         (container.*boundary_function)(matrix_free, this->dst, this->src, face_range);
@@ -2175,38 +3388,38 @@ namespace internal
     // the problem that reading from a vector that we also write to is usually
     // not intended in case there is overlap, but this is up to the
     // application code to decide and we cannot catch this case here).
-    virtual void vector_update_ghosts_start() override
+    virtual void vector_update_ghosts_start()
     {
       if (!src_and_dst_are_same)
-        ghosts_were_set = internal::update_ghost_values_start(src);
+        internal::update_ghost_values_start(src, src_data_exchanger);
     }
 
     // Finishes the communication for the update ghost values operation
-    virtual void vector_update_ghosts_finish() override
+    virtual void vector_update_ghosts_finish()
     {
       if (!src_and_dst_are_same)
-        internal::update_ghost_values_finish(src);
+        internal::update_ghost_values_finish(src, src_data_exchanger);
     }
 
     // Starts the communication for the vector compress operation
-    virtual void vector_compress_start() override
+    virtual void vector_compress_start()
     {
-      internal::compress_start(dst);
+      internal::compress_start(dst, dst_data_exchanger);
     }
 
     // Finishes the communication for the vector compress operation
-    virtual void vector_compress_finish() override
+    virtual void vector_compress_finish()
     {
-      internal::compress_finish(dst);
+      internal::compress_finish(dst, dst_data_exchanger);
       if (!src_and_dst_are_same)
-        internal::reset_ghost_values(src, !ghosts_were_set);
+        internal::reset_ghost_values(src, src_data_exchanger);
     }
 
     // Zeros the given input vector
-    virtual void zero_dst_vector_range(const unsigned int /*range_index*/) override
+    virtual void zero_dst_vector_range(const unsigned int range_index)
     {
-      // currently not implemented
-      (void)zero_dst_vector_setting;
+      if (zero_dst_vector_setting)
+        internal::zero_vector_region(range_index, dst, dst_data_exchanger);
     }
 
   private:
@@ -2218,7 +3431,8 @@ namespace internal
 
     const InVector &src;
     OutVector      &dst;
-    bool            ghosts_were_set;
+    VectorDataExchange<MF::dimension,typename MF::value_type> src_data_exchanger;
+    VectorDataExchange<MF::dimension,typename MF::value_type> dst_data_exchanger;
     const bool      src_and_dst_are_same;
     const bool      zero_dst_vector_setting;
   };
@@ -2285,12 +3499,13 @@ MatrixFree<dim, Number>::cell_loop
                            const std::pair<unsigned int,
                            unsigned int> &)> &cell_operation,
  OutVector       &dst,
- const InVector  &src) const
+ const InVector  &src,
+ const bool       zero_dst_vector) const
 {
   typedef internal::MFClassWrapper<MatrixFree<dim, Number>, InVector, OutVector> Wrapper;
   Wrapper wrap (cell_operation, nullptr, nullptr);
   internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, Wrapper, true>
-  worker(*this, src, dst, false, wrap, &Wrapper::cell_integrator,
+  worker(*this, src, dst, zero_dst_vector, wrap, &Wrapper::cell_integrator,
          &Wrapper::face_integrator, &Wrapper::boundary_integrator);
 
   task_info.loop (worker);
@@ -2298,6 +3513,44 @@ MatrixFree<dim, Number>::cell_loop
 
 
 
+template <int dim, typename Number>
+template <typename OutVector, typename InVector>
+inline
+void
+MatrixFree<dim, Number>::loop
+(const std::function<void (const MatrixFree<dim,Number> &,
+                           OutVector &,
+                           const InVector &,
+                           const std::pair<unsigned int,
+                           unsigned int> &)> &cell_operation,
+ const std::function<void (const MatrixFree<dim,Number> &,
+                           OutVector &,
+                           const InVector &,
+                           const std::pair<unsigned int,
+                           unsigned int> &)> &face_operation,
+ const std::function<void (const MatrixFree<dim,Number> &,
+                           OutVector &,
+                           const InVector &,
+                           const std::pair<unsigned int,
+                           unsigned int> &)> &boundary_operation,
+ OutVector       &dst,
+ const InVector  &src,
+ const bool       zero_dst_vector,
+ const DataAccessOnFaces dst_vector_face_access,
+ const DataAccessOnFaces src_vector_face_access) const
+{
+  typedef internal::MFClassWrapper<MatrixFree<dim, Number>, InVector, OutVector> Wrapper;
+  Wrapper wrap (cell_operation, face_operation, boundary_operation);
+  internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, Wrapper, true>
+  worker(*this, src, dst, zero_dst_vector, wrap, &Wrapper::cell_integrator,
+         &Wrapper::face_integrator, &Wrapper::boundary_integrator,
+         src_vector_face_access, dst_vector_face_access);
+
+  task_info.loop(worker);
+}
+
+
+
 template <int dim, typename Number>
 template <typename CLASS, typename OutVector, typename InVector>
 inline
@@ -2306,14 +3559,46 @@ MatrixFree<dim,Number>::cell_loop
 (void (CLASS::*function_pointer)(const MatrixFree<dim,Number> &,
                                  OutVector &,
                                  const InVector &,
-                                 const std::pair<unsigned int,
-                                 unsigned int> &)const,
+                                 const std::pair<unsigned int, unsigned int> &)const,
  const CLASS    *owning_class,
  OutVector      &dst,
- const InVector &src) const
+ const InVector &src,
+ const bool       zero_dst_vector) const
 {
   internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, CLASS, true>
-  worker(*this, src, dst, false, *owning_class, function_pointer, nullptr, nullptr);
+  worker(*this, src, dst, zero_dst_vector, *owning_class, function_pointer, nullptr, nullptr);
+  task_info.loop(worker);
+}
+
+
+
+template <int dim, typename Number>
+template <typename CLASS, typename OutVector, typename InVector>
+inline
+void
+MatrixFree<dim,Number>::loop
+(void (CLASS::*cell_operation)(const MatrixFree<dim,Number> &,
+                               OutVector &,
+                               const InVector &,
+                               const std::pair<unsigned int, unsigned int> &)const,
+ void (CLASS::*face_operation)(const MatrixFree<dim,Number> &,
+                               OutVector &,
+                               const InVector &,
+                               const std::pair<unsigned int, unsigned int> &)const,
+ void (CLASS::*boundary_operation)(const MatrixFree<dim,Number> &,
+                                   OutVector &,
+                                   const InVector &,
+                                   const std::pair<unsigned int, unsigned int> &)const,
+ const CLASS    *owning_class,
+ OutVector      &dst,
+ const InVector &src,
+ const bool       zero_dst_vector,
+ const DataAccessOnFaces dst_vector_face_access,
+ const DataAccessOnFaces src_vector_face_access) const
+{
+  internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, CLASS, true>
+  worker(*this, src, dst, zero_dst_vector, *owning_class, cell_operation, face_operation,
+         boundary_operation, src_vector_face_access, dst_vector_face_access);
   task_info.loop(worker);
 }
 
@@ -2327,14 +3612,47 @@ MatrixFree<dim,Number>::cell_loop
 (void(CLASS::*function_pointer)(const MatrixFree<dim,Number> &,
                                 OutVector &,
                                 const InVector &,
-                                const std::pair<unsigned int,
-                                unsigned int> &),
+                                const std::pair<unsigned int, unsigned int> &),
+ CLASS          *owning_class,
+ OutVector      &dst,
+ const InVector &src,
+ const bool       zero_dst_vector) const
+{
+  internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, CLASS, false>
+  worker(*this, src, dst, zero_dst_vector, *owning_class, function_pointer, nullptr, nullptr);
+  task_info.loop(worker);
+}
+
+
+
+template <int dim, typename Number>
+template <typename CLASS, typename OutVector, typename InVector>
+inline
+void
+MatrixFree<dim,Number>::loop
+(void(CLASS::*cell_operation)(const MatrixFree<dim,Number> &,
+                              OutVector &,
+                              const InVector &,
+                              const std::pair<unsigned int, unsigned int> &),
+ void(CLASS::*face_operation)(const MatrixFree<dim,Number> &,
+                              OutVector &,
+                              const InVector &,
+                              const std::pair<unsigned int, unsigned int> &),
+ void(CLASS::*boundary_operation)(const MatrixFree<dim,Number> &,
+                                  OutVector &,
+                                  const InVector &,
+                                  const std::pair<unsigned int, unsigned int> &),
  CLASS          *owning_class,
  OutVector      &dst,
- const InVector &src) const
+ const InVector &src,
+ const bool       zero_dst_vector,
+ const DataAccessOnFaces dst_vector_face_access,
+ const DataAccessOnFaces src_vector_face_access) const
 {
   internal::MFWorker<MatrixFree<dim, Number>, InVector, OutVector, CLASS, false>
-  worker(*this, src, dst, false, *owning_class, function_pointer, nullptr, nullptr);
+  worker(*this, src, dst, zero_dst_vector, *owning_class, cell_operation,
+         face_operation, boundary_operation,
+         src_vector_face_access, dst_vector_face_access);
   task_info.loop(worker);
 }
 
index 7131b7d3f0193b37a8a6dd8eb296cf45bebbe50c..9cecb44dfb88ee7437cea8a827d6be0ed2b80dd0 100644 (file)
 
 #include <deal.II/matrix_free/matrix_free.h>
 #include <deal.II/matrix_free/shape_info.templates.h>
-#include <deal.II/matrix_free/mapping_info.templates.h>
 #include <deal.II/matrix_free/dof_info.templates.h>
 #include <deal.II/matrix_free/face_info.h>
+#include <deal.II/matrix_free/face_setup_internal.h>
+
+#ifdef DEAL_II_WITH_THREADS
+#include <deal.II/base/parallel.h>
+#include <tbb/concurrent_unordered_map.h>
+#endif
+
+#include <fstream>
 
 
 DEAL_II_NAMESPACE_OPEN
@@ -59,6 +66,133 @@ MatrixFree<dim, Number>::MatrixFree(const MatrixFree<dim,Number> &other)
 
 
 
+template <int dim, typename Number>
+std::pair<unsigned int,unsigned int>
+MatrixFree<dim,Number>::create_cell_subrange_hp_by_index
+(const std::pair<unsigned int,unsigned int> &range,
+ const unsigned int fe_index,
+ const unsigned int vector_component) const
+{
+  AssertIndexRange (fe_index, dof_info[vector_component].max_fe_index);
+  const std::vector<unsigned int> &fe_indices =
+    dof_info[vector_component].cell_active_fe_index;
+  if (fe_indices.empty() == true)
+    return range;
+  else
+    {
+      // the range over which we are searching must be ordered, otherwise we
+      // got a range that spans over too many cells
+#ifdef DEBUG
+      for (unsigned int i=range.first+1; i<range.second; ++i)
+        Assert (fe_indices[i] >= fe_indices[i-1],
+                ExcMessage ("Cell range must be over sorted range of fe indices in hp case!"));
+      AssertIndexRange(range.first,fe_indices.size()+1);
+      AssertIndexRange(range.second,fe_indices.size()+1);
+#endif
+      std::pair<unsigned int,unsigned int> return_range;
+      return_range.first =
+        std::lower_bound(&fe_indices[0] + range.first,
+                         &fe_indices[0] + range.second, fe_index)
+        -&fe_indices[0] ;
+      return_range.second =
+        std::lower_bound(&fe_indices[0] + return_range.first,
+                         &fe_indices[0] + range.second,
+                         fe_index + 1)-&fe_indices[0];
+      Assert(return_range.first >= range.first &&
+             return_range.second <= range.second, ExcInternalError());
+      return return_range;
+    }
+}
+
+
+
+template <int dim, typename Number>
+void
+MatrixFree<dim,Number>::renumber_dofs (std::vector<types::global_dof_index> &renumbering,
+                                       const unsigned int vector_component)
+{
+  AssertIndexRange(vector_component, dof_info.size());
+  dof_info[vector_component].compute_dof_renumbering (renumbering);
+}
+
+
+
+template <int dim, typename Number>
+const DoFHandler<dim> &
+MatrixFree<dim,Number>::get_dof_handler (const unsigned int dof_index) const
+{
+  AssertIndexRange (dof_index, n_components());
+  if (dof_handlers.active_dof_handler == DoFHandlers::usual)
+    {
+      AssertDimension (dof_handlers.dof_handler.size(),
+                       dof_handlers.n_dof_handlers);
+      return *dof_handlers.dof_handler[dof_index];
+    }
+  else
+    {
+      Assert (false, ExcNotImplemented());
+      // put pseudo return argument to avoid compiler error, but trigger a
+      // segfault in case this is only run in optimized mode
+      return *dof_handlers.dof_handler[numbers::invalid_unsigned_int];
+    }
+}
+
+
+
+template <int dim, typename Number>
+typename DoFHandler<dim>::cell_iterator
+MatrixFree<dim,Number>::get_cell_iterator(const unsigned int macro_cell_number,
+                                          const unsigned int vector_number,
+                                          const unsigned int dof_index) const
+{
+  const unsigned int vectorization_length=VectorizedArray<Number>::n_array_elements;
+  AssertIndexRange (dof_index, dof_handlers.n_dof_handlers);
+  AssertIndexRange (macro_cell_number, task_info.cell_partition_data.back());
+  AssertIndexRange (vector_number, n_components_filled(macro_cell_number));
+
+  const DoFHandler<dim> *dofh = 0;
+  if (dof_handlers.active_dof_handler == DoFHandlers::usual)
+    {
+      AssertDimension (dof_handlers.dof_handler.size(),
+                       dof_handlers.n_dof_handlers);
+      dofh = dof_handlers.dof_handler[dof_index];
+    }
+  else
+    {
+      Assert (false, ExcMessage ("Cannot return DoFHandler<dim>::cell_iterator "
+                                 "for underlying DoFHandler!"));
+    }
+
+  std::pair<unsigned int,unsigned int> index =
+    cell_level_index[macro_cell_number*vectorization_length+vector_number];
+  return typename DoFHandler<dim>::cell_iterator
+         (&dofh->get_triangulation(), index.first, index.second, dofh);
+}
+
+
+
+template <int dim, typename Number>
+typename hp::DoFHandler<dim>::active_cell_iterator
+MatrixFree<dim,Number>::get_hp_cell_iterator(const unsigned int macro_cell_number,
+                                             const unsigned int vector_number,
+                                             const unsigned int dof_index) const
+{
+  constexpr unsigned int vectorization_length=VectorizedArray<Number>::n_array_elements;
+  AssertIndexRange (dof_index, dof_handlers.n_dof_handlers);
+  AssertIndexRange (macro_cell_number, task_info.cell_partition_data.back());
+  AssertIndexRange (vector_number, n_components_filled(macro_cell_number));
+
+  Assert (dof_handlers.active_dof_handler == DoFHandlers::hp,
+          ExcNotImplemented());
+  const hp::DoFHandler<dim> *dofh = dof_handlers.hp_dof_handler[dof_index];
+  std::pair<unsigned int,unsigned int> index =
+    cell_level_index[macro_cell_number*vectorization_length+vector_number];
+  return typename hp::DoFHandler<dim>::cell_iterator
+         (&dofh->get_triangulation(), index.first, index.second, dofh);
+}
+
+
+
 template <int dim, typename Number>
 void MatrixFree<dim,Number>::
 copy_from (const MatrixFree<dim,Number> &v)
@@ -85,21 +219,24 @@ internal_reinit(const Mapping<dim>                          &mapping,
                 const std::vector<const ConstraintMatrix *> &constraint,
                 const std::vector<IndexSet>                 &locally_owned_set,
                 const std::vector<hp::QCollection<1> >      &quad,
-                const typename MatrixFree<dim,Number>::AdditionalData additional_data)
+                const typename MatrixFree<dim,Number>::AdditionalData &additional_data)
 {
 
   // Reads out the FE information and stores the shape function values,
   // gradients and Hessians for quadrature points.
   {
-    const unsigned int n_fe   = dof_handler.size();
+    unsigned int n_fe = 0;
+    for (unsigned int no=0; no<dof_handler.size(); ++no)
+      n_fe += dof_handler[no]->get_fe().n_base_elements();
     const unsigned int n_quad = quad.size();
     shape_info.reinit (TableIndices<4>(n_fe, n_quad, 1, 1));
-    for (unsigned int no=0; no<n_fe; no++)
-      for (unsigned int nq =0; nq<n_quad; nq++)
-        {
-          AssertDimension (quad[nq].size(), 1);
-          shape_info(no,nq,0,0).reinit(quad[nq][0], dof_handler[no]->get_fe());
-        }
+    for (unsigned int no=0, c=0; no<dof_handler.size(); no++)
+      for (unsigned int b=0; b<dof_handler[no]->get_fe().n_base_elements(); ++b, ++c)
+        for (unsigned int nq =0; nq<n_quad; nq++)
+          {
+            AssertDimension (quad[nq].size(), 1);
+            shape_info(c,nq,0,0).reinit(quad[nq][0], dof_handler[no]->get_fe(), b);
+          }
   }
 
   if (additional_data.initialize_indices == true)
@@ -130,9 +267,12 @@ internal_reinit(const Mapping<dim>                          &mapping,
           task_info.n_procs = 1;
         }
 
-      initialize_dof_handlers (dof_handler, additional_data.level_mg_handler);
+      initialize_dof_handlers (dof_handler, additional_data);
       for (unsigned int no=0; no<dof_handler.size(); ++no)
-        dof_info[no].store_plain_indices = additional_data.store_plain_indices;
+        {
+          dof_info[no].store_plain_indices = additional_data.store_plain_indices;
+          dof_info[no].global_base_element_offset = no > 0 ? dof_info[no-1].global_base_element_offset + dof_handler[no-1]->get_fe().n_base_elements() : 0;
+        }
 
       // initialize the basic multithreading information that needs to be
       // passed to the DoFInfo structure
@@ -151,13 +291,13 @@ internal_reinit(const Mapping<dim>                          &mapping,
       // constraint_pool_data. It also reorders the way cells are gone through
       // (to separate cells with overlap to other processors from others
       // without).
-      initialize_indices (constraint, locally_owned_set);
+      initialize_indices (constraint, locally_owned_set, additional_data);
     }
 
   // initialize bare structures
   else if (dof_info.size() != dof_handler.size())
     {
-      initialize_dof_handlers(dof_handler, additional_data.level_mg_handler);
+      initialize_dof_handlers(dof_handler, additional_data);
       std::vector<unsigned int> dummy;
       std::vector<unsigned char> dummy2;
       task_info.collect_boundary_cells (cell_level_index.size(), cell_level_index.size(),
@@ -166,11 +306,17 @@ internal_reinit(const Mapping<dim>                          &mapping,
       for (unsigned int i=0; i<dof_info.size(); ++i)
         {
           dof_info[i].dimension    = dim;
-          dof_info[i].n_components = dof_handler[i]->get_fe().element_multiplicity(0);
+          dof_info[i].n_base_elements = dof_handler[i]->get_fe().n_base_elements();
+          dof_info[i].n_components.resize(dof_info[i].n_base_elements);
+          dof_info[i].start_components.resize(dof_info[i].n_base_elements+1);
+          for (unsigned int c=0; c<dof_info[i].n_base_elements; ++c)
+            {
+              dof_info[i].n_components[c] = dof_handler[i]->get_fe().element_multiplicity(c);
+              for (unsigned int l=0; l<dof_info[i].n_components[c]; ++l)
+                dof_info[i].component_to_base_index.push_back(c);
+              dof_info[i].start_components[c+1] = dof_info[i].start_components[c] + dof_info[i].n_components[c];
+            }
           dof_info[i].dofs_per_cell.push_back(dof_handler[i]->get_fe().dofs_per_cell);
-          dof_info[i].row_starts.resize(task_info.cell_partition_data.back()+1);
-          dof_info[i].row_starts.back()[2] =
-            cell_level_index.size() % VectorizedArray<Number>::n_array_elements;
 
           // if indices are not initialized, the cell_level_index might not be
           // divisible by the vectorization length. But it must be for
@@ -191,11 +337,11 @@ internal_reinit(const Mapping<dim>                          &mapping,
     {
       std::vector<unsigned int> dummy;
       mapping_info.initialize (dof_handler[0]->get_triangulation(), cell_level_index,
-                               internal::MatrixFreeFunctions:: FaceInfo
-                               <VectorizedArray<Number>::n_array_elements>(),
-                               dummy, mapping,
+                               face_info, dummy, mapping,
                                quad, additional_data.mapping_update_flags,
-                               update_default, update_default, update_default);
+                               additional_data.mapping_update_flags_boundary_faces,
+                               additional_data.mapping_update_flags_inner_faces,
+                               additional_data.mapping_update_flags_faces_by_cells);
 
       mapping_is_initialized = true;
     }
@@ -210,12 +356,14 @@ internal_reinit(const Mapping<dim>                            &mapping,
                 const std::vector<const ConstraintMatrix *>    &constraint,
                 const std::vector<IndexSet>                   &locally_owned_set,
                 const std::vector<hp::QCollection<1> >        &quad,
-                const typename MatrixFree<dim,Number>::AdditionalData additional_data)
+                const typename MatrixFree<dim,Number>::AdditionalData &additional_data)
 {
   // Reads out the FE information and stores the shape function values,
   // gradients and Hessians for quadrature points.
   {
-    const unsigned int n_components = dof_handler.size();
+    unsigned int n_components = 0;
+    for (unsigned int no=0; no<dof_handler.size(); ++no)
+      n_components += dof_handler[no]->get_fe()[0].n_base_elements();
     const unsigned int n_quad       = quad.size();
     unsigned int n_fe_in_collection = 0;
     for (unsigned int i=0; i<n_components; ++i)
@@ -227,12 +375,14 @@ internal_reinit(const Mapping<dim>                            &mapping,
     shape_info.reinit (TableIndices<4>(n_components, n_quad,
                                        n_fe_in_collection,
                                        n_quad_in_collection));
-    for (unsigned int no=0; no<n_components; no++)
-      for (unsigned int fe_no=0; fe_no<dof_handler[no]->get_fe_collection().size(); ++fe_no)
-        for (unsigned int nq =0; nq<n_quad; nq++)
-          for (unsigned int q_no=0; q_no<quad[nq].size(); ++q_no)
-            shape_info(no,nq,fe_no,q_no).reinit (quad[nq][q_no],
-                                                 dof_handler[no]->get_fe(fe_no));
+    for (unsigned int no=0, c=0; no<dof_handler.size(); no++)
+      for (unsigned int b=0; b<dof_handler[no]->get_fe(0).n_base_elements(); ++b, ++c)
+        for (unsigned int fe_no=0; fe_no<dof_handler[no]->get_fe_collection().size(); ++fe_no)
+          for (unsigned int nq =0; nq<n_quad; nq++)
+            for (unsigned int q_no=0; q_no<quad[nq].size(); ++q_no)
+              shape_info(c,nq,fe_no,q_no).reinit (quad[nq][q_no],
+                                                  dof_handler[no]->get_fe(fe_no),
+                                                  b);
   }
 
   if (additional_data.initialize_indices == true)
@@ -263,9 +413,12 @@ internal_reinit(const Mapping<dim>                            &mapping,
           task_info.n_procs = 1;
         }
 
-      initialize_dof_handlers (dof_handler, additional_data.level_mg_handler);
+      initialize_dof_handlers (dof_handler, additional_data);
       for (unsigned int no=0; no<dof_handler.size(); ++no)
-        dof_info[no].store_plain_indices = additional_data.store_plain_indices;
+        {
+          dof_info[no].store_plain_indices = additional_data.store_plain_indices;
+          dof_info[no].global_base_element_offset = no > 0 ? dof_info[no-1].global_base_element_offset + dof_handler[no-1]->get_fe()[0].n_base_elements() : 0;
+        }
 
       // initialize the basic multithreading information that needs to be
       // passed to the DoFInfo structure
@@ -284,13 +437,13 @@ internal_reinit(const Mapping<dim>                            &mapping,
       // constraint_pool_data. It also reorders the way cells are gone through
       // (to separate cells with overlap to other processors from others
       // without).
-      initialize_indices (constraint, locally_owned_set);
+      initialize_indices (constraint, locally_owned_set, additional_data);
     }
 
   // initialize bare structures
   else if (dof_info.size() != dof_handler.size())
     {
-      initialize_dof_handlers(dof_handler, additional_data.level_mg_handler);
+      initialize_dof_handlers(dof_handler, additional_data);
       std::vector<unsigned int> dummy;
       std::vector<unsigned char> dummy2;
       task_info.collect_boundary_cells (cell_level_index.size(), cell_level_index.size(),
@@ -300,11 +453,17 @@ internal_reinit(const Mapping<dim>                            &mapping,
         {
           Assert(dof_handler[i]->get_fe_collection().size() == 1, ExcNotImplemented());
           dof_info[i].dimension    = dim;
-          dof_info[i].n_components = dof_handler[i]->get_fe(0).element_multiplicity(0);
+          dof_info[i].n_base_elements = dof_handler[i]->get_fe(0).n_base_elements();
+          dof_info[i].n_components.resize(dof_info[i].n_base_elements);
+          dof_info[i].start_components.resize(dof_info[i].n_base_elements+1);
+          for (unsigned int c=0; c<dof_info[i].n_base_elements; ++c)
+            {
+              dof_info[i].n_components[c] = dof_handler[i]->get_fe(0).element_multiplicity(c);
+              for (unsigned int l=0; l<dof_info[i].n_components[c]; ++l)
+                dof_info[i].component_to_base_index.push_back(c);
+              dof_info[i].start_components[c+1] = dof_info[i].start_components[c] + dof_info[i].n_components[c];
+            }
           dof_info[i].dofs_per_cell.push_back(dof_handler[i]->get_fe(0).dofs_per_cell);
-          dof_info[i].row_starts.resize(task_info.cell_partition_data.back()+1);
-          dof_info[i].row_starts.back()[2] =
-            cell_level_index.size() % VectorizedArray<Number>::n_array_elements;
 
           // if indices are not initialized, the cell_level_index might not be
           // divisible by the vectorization length. But it must be for
@@ -321,11 +480,11 @@ internal_reinit(const Mapping<dim>                            &mapping,
   if (additional_data.initialize_mapping == true)
     {
       mapping_info.initialize (dof_handler[0]->get_triangulation(), cell_level_index,
-                               internal::MatrixFreeFunctions::FaceInfo
-                               <VectorizedArray<Number>::n_array_elements>(),
-                               dof_info[0].cell_active_fe_index, mapping,
-                               quad, additional_data.mapping_update_flags,
-                               update_default, update_default, update_default);
+                               face_info, dof_info[0].cell_active_fe_index,
+                               mapping, quad, additional_data.mapping_update_flags,
+                               additional_data.mapping_update_flags_boundary_faces,
+                               additional_data.mapping_update_flags_inner_faces,
+                               additional_data.mapping_update_flags_faces_by_cells);
 
       mapping_is_initialized = true;
     }
@@ -367,24 +526,25 @@ MatrixFree<dim, Number>::is_supported(const FiniteElement<dim, spacedim> &fe)
 
 namespace internal
 {
-
-  // steps through all children and adds the
-  // active cells recursively
-  template <typename InIterator>
-  void resolve_cell (const InIterator   &cell,
-                     std::vector<std::pair<unsigned int,unsigned int> > &cell_its,
-                     const unsigned int  subdomain_id)
+  namespace MatrixFreeFunctions
   {
-    if (cell->has_children())
-      for (unsigned int child=0; child<cell->n_children(); ++child)
-        resolve_cell (cell->child(child), cell_its,
-                      subdomain_id);
-    else if (subdomain_id == numbers::invalid_subdomain_id
-             || cell->subdomain_id() == subdomain_id)
-      {
-        Assert (cell->active(), ExcInternalError());
-        cell_its.emplace_back (cell->level(), cell->index());
-      }
+    // steps through all children and adds the active cells recursively
+    template <typename InIterator>
+    void resolve_cell (const InIterator   &cell,
+                       std::vector<std::pair<unsigned int,unsigned int> > &cell_its,
+                       const unsigned int  subdomain_id)
+    {
+      if (cell->has_children())
+        for (unsigned int child=0; child<cell->n_children(); ++child)
+          resolve_cell (cell->child(child), cell_its,
+                        subdomain_id);
+      else if (subdomain_id == numbers::invalid_subdomain_id ||
+               cell->subdomain_id() == subdomain_id)
+        {
+          Assert (cell->active(), ExcInternalError());
+          cell_its.emplace_back (cell->level(), cell->index());
+        }
+    }
   }
 }
 
@@ -393,16 +553,19 @@ namespace internal
 template <int dim, typename Number>
 void MatrixFree<dim,Number>::
 initialize_dof_handlers (const std::vector<const DoFHandler<dim>*> &dof_handler,
-                         const unsigned int level)
+                         const AdditionalData &additional_data)
 {
+  cell_level_index.clear();
   dof_handlers.active_dof_handler = DoFHandlers::usual;
-  dof_handlers.level = level;
+  dof_handlers.level = additional_data.level_mg_handler;
   dof_handlers.n_dof_handlers = dof_handler.size();
   dof_handlers.dof_handler.resize (dof_handlers.n_dof_handlers);
   for (unsigned int no=0; no<dof_handlers.n_dof_handlers; ++no)
     dof_handlers.dof_handler[no] = dof_handler[no];
 
   dof_info.resize (dof_handlers.n_dof_handlers);
+  for (unsigned int no=0; no<dof_handlers.n_dof_handlers; ++no)
+    dof_info[no].vectorization_length = VectorizedArray<Number>::n_array_elements;
 
   // Go through cells on zeroth level and then successively step down into
   // children. This gives a z-ordering of the cells, which is beneficial when
@@ -411,6 +574,7 @@ initialize_dof_handlers (const std::vector<const DoFHandler<dim>*> &dof_handler,
   const unsigned int my_pid = task_info.my_pid;
 
   const Triangulation<dim> &tria = dof_handlers.dof_handler[0]->get_triangulation();
+  const unsigned int level = additional_data.level_mg_handler;
   if (level == numbers::invalid_unsigned_int)
     {
       if (n_mpi_procs == 1)
@@ -423,7 +587,7 @@ initialize_dof_handlers (const std::vector<const DoFHandler<dim>*> &dof_handler,
            (&dof_handler[0]->get_triangulation())!=nullptr)
           ? my_pid : numbers::invalid_subdomain_id;
       for ( ; cell != end_cell; ++cell)
-        internal::resolve_cell (cell, cell_level_index, subdomain_id);
+        internal::MatrixFreeFunctions::resolve_cell (cell, cell_level_index, subdomain_id);
 
       Assert(n_mpi_procs>1 || cell_level_index.size()==tria.n_active_cells(),
              ExcInternalError());
@@ -452,15 +616,21 @@ initialize_dof_handlers (const std::vector<const DoFHandler<dim>*> &dof_handler,
 template <int dim, typename Number>
 void MatrixFree<dim,Number>::
 initialize_dof_handlers (const std::vector<const hp::DoFHandler<dim>*> &dof_handler,
-                         const unsigned int)
+                         const AdditionalData &additional_data)
 {
+  cell_level_index.clear();
   dof_handlers.active_dof_handler = DoFHandlers::hp;
+  dof_handlers.level = additional_data.level_mg_handler;
+  Assert(dof_handlers.level == numbers::invalid_unsigned_int,
+         ExcNotImplemented());
   dof_handlers.n_dof_handlers = dof_handler.size();
   dof_handlers.hp_dof_handler.resize (dof_handlers.n_dof_handlers);
   for (unsigned int no=0; no<dof_handlers.n_dof_handlers; ++no)
     dof_handlers.hp_dof_handler[no] = dof_handler[no];
 
   dof_info.resize (dof_handlers.n_dof_handlers);
+  for (unsigned int no=0; no<dof_handlers.n_dof_handlers; ++no)
+    dof_info[no].vectorization_length = VectorizedArray<Number>::n_array_elements;
 
   // go through cells on zeroth level and then successively step down into
   // children. This gives a z-ordering of the cells, which is beneficial when
@@ -484,8 +654,8 @@ initialize_dof_handlers (const std::vector<const hp::DoFHandler<dim>*> &dof_hand
        (&dof_handler[0]->get_triangulation())!=nullptr)
       ? my_pid : numbers::invalid_subdomain_id;
   for ( ; cell != end_cell; ++cell)
-    internal::resolve_cell (cell, cell_level_index,
-                            subdomain_id);
+    internal::MatrixFreeFunctions::resolve_cell (cell, cell_level_index,
+                                                 subdomain_id);
 
   Assert(n_mpi_procs>1 || cell_level_index.size()==tria.n_active_cells(),
          ExcInternalError());
@@ -500,8 +670,19 @@ initialize_dof_handlers (const std::vector<const hp::DoFHandler<dim>*> &dof_hand
 template <int dim, typename Number>
 void MatrixFree<dim,Number>::initialize_indices
 (const std::vector<const ConstraintMatrix *> &constraint,
- const std::vector<IndexSet>                 &locally_owned_set)
+ const std::vector<IndexSet>                 &locally_owned_set,
+ const AdditionalData                        &additional_data)
 {
+  // insert possible ghost cells and construct face topology
+  const bool do_face_integrals = (additional_data.mapping_update_flags_inner_faces |
+                                  additional_data.mapping_update_flags_boundary_faces) != update_default;
+  internal::MatrixFreeFunctions::FaceSetup<dim> face_setup;
+  if (do_face_integrals)
+    face_setup.initialize(dof_handlers.active_dof_handler == DoFHandlers::usual ?
+                          dof_handlers.dof_handler[0]->get_triangulation() :
+                          dof_handlers.hp_dof_handler[0]->get_triangulation(),
+                          additional_data, cell_level_index);
+
   const unsigned int n_fe = dof_handlers.n_dof_handlers;
   const unsigned int n_active_cells = cell_level_index.size();
 
@@ -510,9 +691,12 @@ void MatrixFree<dim,Number>::initialize_indices
   AssertDimension (n_fe, constraint.size());
 
   std::vector<types::global_dof_index> local_dof_indices;
+  std::vector<std::vector<std::vector<unsigned int> > > lexicographic(n_fe);
 
   internal::MatrixFreeFunctions::ConstraintValues<double> constraint_values;
 
+  bool cell_categorization_enabled = !additional_data.cell_vectorization_category.empty();
+
   for (unsigned int no=0; no<n_fe; ++no)
     {
       std::vector<const FiniteElement<dim>*> fes;
@@ -523,41 +707,64 @@ void MatrixFree<dim,Number>::initialize_indices
           for (unsigned int f=0; f<fe.size(); ++f)
             fes.push_back (&fe[f]);
 
-          dof_info[no].max_fe_index = fe.size();
-          dof_info[no].fe_index_conversion.resize (fe.size());
-          for (unsigned int ind=0; ind<hpdof->get_fe_collection().size(); ++ind)
-            dof_info[no].fe_index_conversion[ind] =
-              std::pair<unsigned int,unsigned int>(fe[ind].degree,
-                                                   fe[ind].dofs_per_cell);
           if (fe.size() > 1)
             dof_info[no].cell_active_fe_index.resize(n_active_cells,
                                                      numbers::invalid_unsigned_int);
+
+          Assert(additional_data.cell_vectorization_category.empty(),
+                 ExcNotImplemented());
         }
       else
         {
           const DoFHandler<dim> *dofh =&*dof_handlers.dof_handler[no];
           fes.push_back (&dofh->get_fe());
-          dof_info[no].max_fe_index = 1;
-          dof_info[no].fe_index_conversion.resize (1);
-          dof_info[no].fe_index_conversion[0] =
-            std::pair<unsigned int,unsigned int>(fes.back()->degree,
-                                                 fes.back()->dofs_per_cell);
+          if (cell_categorization_enabled == true)
+            dof_info[no].cell_active_fe_index.resize(n_active_cells,
+                                                     numbers::invalid_unsigned_int);
         }
+      lexicographic[no].resize(fes.size());
+
+      dof_info[no].fe_index_conversion.resize(fes.size());
+      dof_info[no].max_fe_index = fes.size();
 
+      dof_info[no].component_dof_indices_offset.clear();
+      dof_info[no].component_dof_indices_offset.resize(fes.size());
       for (unsigned int fe_index = 0; fe_index<fes.size(); ++fe_index)
         {
           const FiniteElement<dim> &fe = *fes[fe_index];
-          Assert (fe.n_base_elements() == 1,
-                  ExcMessage ("MatrixFree currently only works for DoFHandler with one base element"));
-          const unsigned int n_fe_components = fe.element_multiplicity (0);
-
           // cache number of finite elements and dofs_per_cell
           dof_info[no].dofs_per_cell.push_back (fe.dofs_per_cell);
           dof_info[no].dofs_per_face.push_back (fe.dofs_per_face);
-          dof_info[no].dimension    = dim;
-          dof_info[no].n_components = n_fe_components;
+          dof_info[no].dimension       = dim;
+          dof_info[no].n_base_elements = fe.n_base_elements();
+          dof_info[no].n_components.resize(dof_info[no].n_base_elements);
+          dof_info[no].start_components.resize(dof_info[no].n_base_elements+1);
+          dof_info[no].component_to_base_index.clear();
+          dof_info[no].component_dof_indices_offset[fe_index].push_back(0);
+          dof_info[no].fe_index_conversion[fe_index].clear();
+          for (unsigned int c=0; c<dof_info[no].n_base_elements; ++c)
+            {
+              dof_info[no].n_components[c] = fe.element_multiplicity(c);
+              for (unsigned int l=0; l<dof_info[no].n_components[c]; ++l)
+                {
+                  dof_info[no].component_to_base_index.push_back(c);
+                  dof_info[no].component_dof_indices_offset[fe_index].push_back
+                  (dof_info[no].component_dof_indices_offset[fe_index].back() +
+                   fe.base_element(c).dofs_per_cell);
+                  dof_info[no].fe_index_conversion[fe_index].push_back(fe.base_element(c).degree);
+                }
+              dof_info[no].start_components[c+1] = dof_info[no].start_components[c] + dof_info[no].n_components[c];
+              lexicographic[no][fe_index].
+              insert(lexicographic[no][fe_index].end(),
+                     shape_info(dof_info[no].global_base_element_offset+c,0,fe_index,0).lexicographic_numbering.begin(),
+                     shape_info(dof_info[no].global_base_element_offset+c,0,fe_index,0).lexicographic_numbering.end());
+            }
 
-          AssertDimension (shape_info(no,0,fe_index,0).lexicographic_numbering.size(),
+          AssertDimension (lexicographic[no][fe_index].size(),
+                           dof_info[no].dofs_per_cell[fe_index]);
+          AssertDimension (dof_info[no].component_dof_indices_offset[fe_index].size()-1,
+                           dof_info[no].start_components.back());
+          AssertDimension (dof_info[no].component_dof_indices_offset[fe_index].back(),
                            dof_info[no].dofs_per_cell[fe_index]);
         }
 
@@ -567,34 +774,38 @@ void MatrixFree<dim,Number>::initialize_indices
       (new Utilities::MPI::Partitioner(locally_owned_set[no], task_info.communicator));
 
       // initialize the arrays for indices
-      dof_info[no].row_starts.resize (n_active_cells+1);
-      dof_info[no].row_starts[0][0] = 0;
-      dof_info[no].row_starts[0][1] = 0;
-      dof_info[no].row_starts[0][2] = 0;
+      const unsigned int n_components_total = dof_info[no].start_components.back();
+      dof_info[no].row_starts.resize (n_active_cells*n_components_total+1);
+      dof_info[no].row_starts[0].first = 0;
+      dof_info[no].row_starts[0].second = 0;
       dof_info[no].dof_indices.reserve
       ((n_active_cells*dof_info[no].dofs_per_cell[0]*3)/2);
 
-      // cache the constrained indices for use in matrix-vector products
-      {
-        const types::global_dof_index
-        start_index = dof_info[no].vector_partitioner->local_range().first,
-        end_index   = dof_info[no].vector_partitioner->local_range().second;
-        for (types::global_dof_index i=start_index; i<end_index; ++i)
-          if (constraint[no]->is_constrained(i)==true)
-            dof_info[no].constrained_dofs.
-            push_back(static_cast<unsigned int>(i-start_index));
-      }
+      // cache the constrained indices for use in matrix-vector products and
+      // the like
+      const types::global_dof_index
+      start_index = dof_info[no].vector_partitioner->local_range().first,
+      end_index   = dof_info[no].vector_partitioner->local_range().second;
+      for (types::global_dof_index i=start_index; i<end_index; ++i)
+        if (constraint[no]->is_constrained(i)==true)
+          dof_info[no].constrained_dofs.
+          push_back(static_cast<unsigned int>(i-start_index));
     }
 
   // extract all the global indices associated with the computation, and form
   // the ghost indices
-  std::vector<unsigned int> boundary_cells;
+  std::vector<unsigned int> subdomain_boundary_cells;
   for (unsigned int counter = 0 ; counter < n_active_cells ; ++counter)
     {
-      bool cell_at_boundary = false;
+      bool cell_at_subdomain_boundary =
+        (face_setup.at_processor_boundary.size() > counter &&
+         face_setup.at_processor_boundary[counter]) ||
+        (additional_data.overlap_communication_computation == false &&
+         task_info.n_procs > 1);
+
       for (unsigned int no=0; no<n_fe; ++no)
         {
-          // OK, read indices from standard DoFHandler in the usual way
+          // read indices from standard DoFHandler in the usual way
           if (dof_handlers.active_dof_handler == DoFHandlers::usual &&
               dof_handlers.level == numbers::invalid_unsigned_int)
             {
@@ -607,12 +818,19 @@ void MatrixFree<dim,Number>::initialize_indices
               local_dof_indices.resize (dof_info[no].dofs_per_cell[0]);
               cell_it->get_dof_indices(local_dof_indices);
               dof_info[no].read_dof_indices (local_dof_indices,
-                                             shape_info(no,0,0,0).lexicographic_numbering,
+                                             lexicographic[no][0],
                                              *constraint[no], counter,
                                              constraint_values,
-                                             cell_at_boundary);
+                                             cell_at_subdomain_boundary);
+              if (cell_categorization_enabled)
+                {
+                  AssertIndexRange(cell_it->active_cell_index(),
+                                   additional_data.cell_vectorization_category.size());
+                  dof_info[no].cell_active_fe_index[counter] =
+                    additional_data.cell_vectorization_category[cell_it->active_cell_index()];
+                }
             }
-          // ok, now we are requested to use a level in a MG DoFHandler
+          // we are requested to use a multigrid level
           else if (dof_handlers.active_dof_handler == DoFHandlers::usual &&
                    dof_handlers.level != numbers::invalid_unsigned_int)
             {
@@ -626,11 +844,19 @@ void MatrixFree<dim,Number>::initialize_indices
               local_dof_indices.resize (dof_info[no].dofs_per_cell[0]);
               cell_it->get_mg_dof_indices(local_dof_indices);
               dof_info[no].read_dof_indices (local_dof_indices,
-                                             shape_info(no,0,0,0).lexicographic_numbering,
+                                             lexicographic[no][0],
                                              *constraint[no], counter,
                                              constraint_values,
-                                             cell_at_boundary);
+                                             cell_at_subdomain_boundary);
+              if (cell_categorization_enabled)
+                {
+                  AssertIndexRange(cell_it->active_cell_index(),
+                                   additional_data.cell_vectorization_category.size());
+                  dof_info[no].cell_active_fe_index[counter] =
+                    additional_data.cell_vectorization_category[cell_level_index[counter].second];
+                }
             }
+          // hp case where we need to decode the FE index and similar
           else if (dof_handlers.active_dof_handler == DoFHandlers::hp)
             {
               const hp::DoFHandler<dim> *dofh =
@@ -646,10 +872,10 @@ void MatrixFree<dim,Number>::initialize_indices
               local_dof_indices.resize (cell_it->get_fe().dofs_per_cell);
               cell_it->get_dof_indices(local_dof_indices);
               dof_info[no].read_dof_indices (local_dof_indices,
-                                             shape_info(no,0,cell_it->active_fe_index(),0).lexicographic_numbering,
+                                             lexicographic[no][cell_it->active_fe_index()],
                                              *constraint[no], counter,
                                              constraint_values,
-                                             cell_at_boundary);
+                                             cell_at_subdomain_boundary);
             }
           else
             {
@@ -659,29 +885,64 @@ void MatrixFree<dim,Number>::initialize_indices
 
       // if we found dofs on some FE component that belong to other
       // processors, the cell is added to the boundary cells.
-      if (cell_at_boundary == true && counter < cell_level_index_end_local)
-        boundary_cells.push_back(counter);
+      if (cell_at_subdomain_boundary == true && counter < cell_level_index_end_local)
+        subdomain_boundary_cells.push_back(counter);
     }
 
   const unsigned int vectorization_length =
     VectorizedArray<Number>::n_array_elements;
   task_info.collect_boundary_cells (cell_level_index_end_local,
                                     n_active_cells, vectorization_length,
-                                    boundary_cells);
+                                    subdomain_boundary_cells);
 
-  // finalize the creation of ghosts
-  for (unsigned int no=0; no<n_fe; ++no)
-    dof_info[no].assign_ghosts (boundary_cells);
+  // Finalize the creation of the ghost indices
+  {
+    std::vector<unsigned int> cells_with_ghosts(subdomain_boundary_cells);
+    for (unsigned int c=cell_level_index_end_local; c<n_active_cells; ++c)
+      cells_with_ghosts.push_back(c);
+    for (unsigned int no=0; no<n_fe; ++no)
+      {
+        if (do_face_integrals &&
+            additional_data.level_mg_handler != numbers::invalid_unsigned_int)
+          {
+            // in case of adaptivity, go through the cells on the next finer
+            // level and check whether we need to get read access to some of
+            // those entries for the mg flux matrices
+            const DoFHandler<dim> &dof_handler = *dof_handlers.dof_handler[no];
+            std::vector<types::global_dof_index> dof_indices;
+            if (additional_data.level_mg_handler + 1 <
+                dof_handler.get_triangulation().n_global_levels())
+              for (typename DoFHandler<dim>::cell_iterator cell=
+                     dof_handler.begin(additional_data.level_mg_handler+1);
+                   cell != dof_handler.end(additional_data.level_mg_handler+1); ++cell)
+                if (cell->level_subdomain_id() == task_info.my_pid)
+                  for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
+                    if ((cell->at_boundary(f) == false ||
+                         cell->has_periodic_neighbor(f) == true) &&
+                        cell->level() > cell->neighbor_or_periodic_neighbor(f)->level() &&
+                        cell->neighbor_or_periodic_neighbor(f)->level_subdomain_id() != task_info.my_pid)
+                      {
+                        dof_indices.resize(cell->neighbor_or_periodic_neighbor(f)->get_fe().dofs_per_cell);
+                        cell->neighbor_or_periodic_neighbor(f)->get_mg_dof_indices(dof_indices);
+                        for (unsigned int i=0; i<dof_indices.size(); ++i)
+                          dof_info[no].ghost_dofs.push_back(dof_indices[i]);
+                      }
+          }
+        dof_info[no].assign_ghosts (cells_with_ghosts);
+      }
+  }
 
   std::vector<unsigned int> renumbering;
   std::vector<unsigned char> irregular_cells;
   if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::none)
     {
-      const bool strict_categories = dof_handlers.active_dof_handler == DoFHandlers::hp;
+      const bool strict_categories = additional_data.cell_vectorization_categories_strict
+                                     || dof_handlers.active_dof_handler == DoFHandlers::hp;
       unsigned int dofs_per_cell = 0;
       for (unsigned int no=0; no<dof_info.size(); ++no)
         dofs_per_cell = std::max(dofs_per_cell, dof_info[no].dofs_per_cell[0]);
-      task_info.create_blocks_serial(boundary_cells, std::vector<unsigned int>(),
+      task_info.create_blocks_serial(subdomain_boundary_cells,
+                                     face_setup.cells_close_to_boundary,
                                      dofs_per_cell,
                                      dof_info[0].cell_active_fe_index,
                                      strict_categories,
@@ -693,7 +954,9 @@ void MatrixFree<dim,Number>::initialize_indices
       // in order to overlap communication in MPI with computations: Place all
       // cells with ghost indices into one chunk. Also reorder cells so that we
       // can parallelize by threads
-      task_info.initial_setup_blocks_tasks(boundary_cells, renumbering,
+      Assert(additional_data.cell_vectorization_category.empty(),
+             ExcNotImplemented());
+      task_info.initial_setup_blocks_tasks(subdomain_boundary_cells, renumbering,
                                            irregular_cells);
       task_info.guess_block_size (dof_info[0].dofs_per_cell[0]);
 
@@ -702,8 +965,8 @@ void MatrixFree<dim,Number>::initialize_indices
                                    n_macro_cells_before;
 
       unsigned int start_nonboundary = numbers::invalid_unsigned_int;
-
-      if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::partition_color)
+      if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::partition_color ||
+          task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::color)
         {
           // set up partitions. if we just use coloring without partitions, do
           // nothing here, assume all cells to belong to the zero partition (that
@@ -788,6 +1051,9 @@ void MatrixFree<dim,Number>::initialize_indices
 
       DynamicSparsityPattern connectivity;
       connectivity.reinit(task_info.n_active_cells, task_info.n_active_cells);
+      if ((additional_data.mapping_update_flags_inner_faces |
+           additional_data.mapping_update_flags_boundary_faces) != update_default)
+        make_connectivity_graph_faces(connectivity);
       if (task_info.n_active_cells > 0)
         dof_info[0].make_connectivity_graph(task_info, renumbering, connectivity);
 
@@ -823,12 +1089,13 @@ void MatrixFree<dim,Number>::initialize_indices
       .push_back(task_info.cell_partition_data.back()+n_ghost_slots);
     }
 
-  // Finally perform the renumbering of the degree of freedom number data. We
-  // also want to group several cells together to one "macro-cell" for
-  // vectorization (where the arithmetic operations will then be done
-  // simultaneously).
+  // Finally perform the renumbering. We also want to group several cells
+  // together to a batch of cells for SIMD (vectorized) execution (where the
+  // arithmetic operations of several cells will then be done simultaneously).
 #ifdef DEBUG
   {
+    AssertDimension(renumbering.size(),
+                    task_info.n_active_cells + task_info.n_ghost_cells);
     std::vector<unsigned int> sorted_renumbering (renumbering);
     std::sort (sorted_renumbering.begin(), sorted_renumbering.end());
     for (unsigned int i=0; i<sorted_renumbering.size(); ++i)
@@ -836,8 +1103,7 @@ void MatrixFree<dim,Number>::initialize_indices
   }
 #endif
   {
-    std::vector<std::pair<unsigned int,unsigned int> >
-    cell_level_index_old;
+    std::vector<std::pair<unsigned int,unsigned int> > cell_level_index_old;
     cell_level_index.swap (cell_level_index_old);
     cell_level_index.reserve(task_info.cell_partition_data.back()*vectorization_length);
     unsigned int position_cell=0;
@@ -889,11 +1155,306 @@ void MatrixFree<dim,Number>::initialize_indices
                                   constraints[i]->end());
       constraint_pool_row_index.push_back(constraint_pool_data.size());
     }
+
   AssertDimension(constraint_pool_data.size(), length);
   for (unsigned int no=0; no<n_fe; ++no)
     dof_info[no].reorder_cells(task_info, renumbering,
                                constraint_pool_row_index,
-                               irregular_cells, vectorization_length);
+                               irregular_cells);
+
+  // Finally resort the faces and collect several faces for vectorization
+  if ((additional_data.mapping_update_flags_inner_faces |
+       additional_data.mapping_update_flags_boundary_faces) != update_default)
+    {
+      face_setup.generate_faces(dof_handlers.active_dof_handler == DoFHandlers::usual ?
+                                dof_handlers.dof_handler[0]->get_triangulation() :
+                                dof_handlers.hp_dof_handler[0]->get_triangulation(),
+                                cell_level_index, task_info);
+      face_info.faces.clear();
+
+      std::vector<bool> hard_vectorization_boundary(task_info.face_partition_data.size(),
+                                                    false);
+      if (task_info.scheme == internal::MatrixFreeFunctions::TaskInfo::none)
+        hard_vectorization_boundary[task_info.partition_row_index[2]] = true;
+      else
+        for (unsigned int i=0; i<hard_vectorization_boundary.size(); ++i)
+          hard_vectorization_boundary[i] = true;
+
+      internal::MatrixFreeFunctions::
+      collect_faces_vectorization(face_setup.inner_faces,
+                                  hard_vectorization_boundary,
+                                  task_info.face_partition_data,
+                                  face_info.faces);
+      hard_vectorization_boundary.clear();
+      hard_vectorization_boundary.resize(task_info.boundary_partition_data.size(),
+                                         task_info.scheme != internal::MatrixFreeFunctions::TaskInfo::none);
+      internal::MatrixFreeFunctions::
+      collect_faces_vectorization(face_setup.boundary_faces,
+                                  hard_vectorization_boundary,
+                                  task_info.boundary_partition_data,
+                                  face_info.faces);
+      hard_vectorization_boundary.clear();
+      hard_vectorization_boundary.resize(task_info.ghost_face_partition_data.size(),
+                                         false);
+      internal::MatrixFreeFunctions::
+      collect_faces_vectorization(face_setup.inner_ghost_faces,
+                                  hard_vectorization_boundary,
+                                  task_info.ghost_face_partition_data,
+                                  face_info.faces);
+      hard_vectorization_boundary.clear();
+      hard_vectorization_boundary.resize(task_info.refinement_edge_face_partition_data.size(),
+                                         false);
+      internal::MatrixFreeFunctions::
+      collect_faces_vectorization(face_setup.refinement_edge_faces,
+                                  hard_vectorization_boundary,
+                                  task_info.refinement_edge_face_partition_data,
+                                  face_info.faces);
+
+      cell_level_index.resize(cell_level_index.size()
+                              +
+                              VectorizedArray<Number>::n_array_elements *
+                              (task_info.refinement_edge_face_partition_data[1]-
+                               task_info.refinement_edge_face_partition_data[0]));
+
+      for (unsigned int no=0; no<n_fe; ++no)
+        dof_info[no].compute_face_index_compression(face_info.faces);
+
+      // build the inverse map back from the faces array to
+      // cell_and_face_to_plain_faces
+      face_info.cell_and_face_to_plain_faces.
+      reinit(TableIndices<3>(task_info.cell_partition_data.back(),
+                             GeometryInfo<dim>::faces_per_cell,
+                             VectorizedArray<Number>::n_array_elements),
+             true);
+      face_info.cell_and_face_to_plain_faces.fill(numbers::invalid_unsigned_int);
+      face_info.cell_and_face_boundary_id.
+      reinit(TableIndices<3>(task_info.cell_partition_data.back(),
+                             GeometryInfo<dim>::faces_per_cell,
+                             VectorizedArray<Number>::n_array_elements),
+             true);
+      face_info.cell_and_face_boundary_id.fill(numbers::invalid_boundary_id);
+
+      for (unsigned int f=0; f<task_info.boundary_partition_data.back(); ++f)
+        for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements &&
+             face_info.faces[f].cells_interior[v] != numbers::invalid_unsigned_int; ++v)
+          {
+            TableIndices<3> index(face_info.faces[f].cells_interior[v]/
+                                  VectorizedArray<Number>::n_array_elements,
+                                  face_info.faces[f].interior_face_no,
+                                  face_info.faces[f].cells_interior[v]%
+                                  VectorizedArray<Number>::n_array_elements);
+
+            //Assert(cell_and_face_to_plain_faces(index) == numbers::invalid_unsigned_int,
+            //       ExcInternalError("Should only visit each face once"));
+            face_info.cell_and_face_to_plain_faces(index) =
+              f*VectorizedArray<Number>::n_array_elements + v;
+            if (face_info.faces[f].cells_exterior[v] != numbers::invalid_unsigned_int)
+              {
+                TableIndices<3> index(face_info.faces[f].cells_exterior[v]/
+                                      VectorizedArray<Number>::n_array_elements,
+                                      face_info.faces[f].exterior_face_no,
+                                      face_info.faces[f].cells_exterior[v]%
+                                      VectorizedArray<Number>::n_array_elements);
+                //Assert(cell_and_face_to_plain_faces(index) == numbers::invalid_unsigned_int,
+                //       ExcInternalError("Should only visit each face once"));
+                face_info.cell_and_face_to_plain_faces(index) =
+                  f*VectorizedArray<Number>::n_array_elements + v;
+              }
+            else
+              face_info.cell_and_face_boundary_id(index) =
+                types::boundary_id(face_info.faces[f].exterior_face_no);
+          }
+
+      // compute tighter index sets for various sets of face integrals
+      for (unsigned int no=0; no<n_fe; ++no)
+        {
+          const Utilities::MPI::Partitioner &part = *dof_info[no].vector_partitioner;
+
+          // partitioner 0: no face integrals, simply use the indices present
+          // on the cells
+          unsigned int n_macro_cells_before = *(task_info.cell_partition_data.end()-2);
+          std::vector<types::global_dof_index> ghost_indices;
+          {
+            for (unsigned int cell=0; cell<VectorizedArray<Number>::n_array_elements
+                 *n_macro_cells_before; ++cell)
+              if (cell > 0 && cell_level_index[cell] != cell_level_index[cell-1])
+                {
+                  for (unsigned int i=dof_info[no].row_starts[cell*dof_info[no].start_components.back()].first;
+                       i<dof_info[no].row_starts[(cell+1)*dof_info[no].start_components.back()].first; ++i)
+                    if (dof_info[no].dof_indices[i] > part.local_size())
+                      ghost_indices.push_back(part.local_to_global(dof_info[no].dof_indices[i]));
+                  for (unsigned int i=dof_info[no].row_starts_plain_indices[cell];
+                       i<dof_info[no].row_starts_plain_indices[cell+1]; ++i)
+                    if (dof_info[no].plain_dof_indices[i] > part.local_size())
+                      ghost_indices.push_back(part.local_to_global(dof_info[no].plain_dof_indices[i]));
+                }
+            std::sort(ghost_indices.begin(), ghost_indices.end());
+            ghost_indices.erase(std::unique(ghost_indices.begin(), ghost_indices.end()),
+                                ghost_indices.end());
+            IndexSet compressed_set(part.size());
+            compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end());
+            compressed_set.subtract_set(dof_info[no].vector_partitioner->locally_owned_range());
+            const bool all_ghosts_equal =
+              Utilities::MPI::min((int)(compressed_set.n_elements() ==
+                                        dof_info[no].vector_partitioner
+                                        ->ghost_indices().n_elements()),
+                                  dof_info[no].vector_partitioner->get_mpi_communicator());
+            if (all_ghosts_equal)
+              dof_info[no].vector_partitioner_face_variants[0] =
+                dof_info[no].vector_partitioner;
+            else
+              {
+                dof_info[no].vector_partitioner_face_variants[0].reset
+                (new Utilities::MPI::Partitioner(part.locally_owned_range(),
+                                                 part.get_mpi_communicator()));
+                const_cast<Utilities::MPI::Partitioner *>
+                (dof_info[no].vector_partitioner_face_variants[0].get())->
+                set_ghost_indices(compressed_set, part.ghost_indices());
+              }
+          }
+
+          // partitioner 1: values on faces
+          {
+            bool all_nodal = true;
+            for (unsigned int c=0; c<dof_info[no].n_base_elements; ++c)
+              if (!shape_info(dof_info[no].global_base_element_offset+c,0,0,0).
+                  nodal_at_cell_boundaries)
+                all_nodal = false;
+            if (all_nodal == false)
+              dof_info[no].vector_partitioner_face_variants[1] =
+                dof_info[no].vector_partitioner;
+            else
+              {
+                for (unsigned int f=0; f<n_inner_face_batches(); ++f)
+                  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements &&
+                       face_info.faces[f].cells_interior[v] != numbers::invalid_unsigned_int; ++v)
+                    {
+                      AssertIndexRange(face_info.faces[f].cells_interior[v],
+                                       n_macro_cells_before*
+                                       VectorizedArray<Number>::n_array_elements);
+                      if (dof_info[no].index_storage_variants[1][f] >=
+                          internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::contiguous
+                          &&
+                          dof_info[no].dof_indices_contiguous[2][face_info.faces[f].cells_exterior[v]] >=
+                          part.local_size())
+                        {
+                          const unsigned int p = face_info.faces[f].cells_exterior[v];
+                          const unsigned int stride = 1;
+                          unsigned int i=0;
+                          for (unsigned int e=0; e<dof_info[no].n_base_elements; ++e)
+                            for (unsigned int c=0; c<dof_info[no].n_components[e]; ++c)
+                              {
+                                const internal::MatrixFreeFunctions::ShapeInfo<VectorizedArray<Number>> &shape
+                                    = shape_info(dof_info[no].global_base_element_offset+e,0,0,0);
+                                for (unsigned int j=0; j<shape.dofs_per_component_on_face; ++j)
+                                  ghost_indices.push_back(part.local_to_global
+                                                          (dof_info[no].dof_indices_contiguous[2][p]+
+                                                           i+
+                                                           shape.face_to_cell_index_nodal
+                                                           (face_info.faces[f].exterior_face_no,j)*
+                                                           stride));
+                                i += shape.dofs_per_component_on_cell*stride;
+                              }
+                          AssertDimension(i, dof_info[no].dofs_per_cell[0]*stride);
+                        }
+                    }
+                std::sort(ghost_indices.begin(), ghost_indices.end());
+                ghost_indices.erase(std::unique(ghost_indices.begin(), ghost_indices.end()),
+                                    ghost_indices.end());
+                IndexSet compressed_set(part.size());
+                compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end());
+                compressed_set.subtract_set(dof_info[no].vector_partitioner->locally_owned_range());
+                const bool all_ghosts_equal =
+                  Utilities::MPI::min((int)(compressed_set.n_elements() ==
+                                            dof_info[no].vector_partitioner->ghost_indices().n_elements()),
+                                      dof_info[no].vector_partitioner->get_mpi_communicator());
+                if (all_ghosts_equal)
+                  dof_info[no].vector_partitioner_face_variants[1] =
+                    dof_info[no].vector_partitioner;
+                else
+                  {
+                    dof_info[no].vector_partitioner_face_variants[1].reset
+                    (new Utilities::MPI::Partitioner(part.locally_owned_range(),
+                                                     part.get_mpi_communicator()));
+                    const_cast<Utilities::MPI::Partitioner *>
+                    (dof_info[no].vector_partitioner_face_variants[1].get())
+                    ->set_ghost_indices(compressed_set, part.ghost_indices());
+                  }
+              }
+          }
+
+          // partitioner 2: values and gradients on faces
+          {
+            bool all_hermite = true;
+            for (unsigned int c=0; c<dof_info[no].n_base_elements; ++c)
+              if (shape_info(dof_info[no].global_base_element_offset+c,0,0,0).element_type
+                  != internal::MatrixFreeFunctions::tensor_symmetric_hermite)
+                all_hermite = false;
+            if (all_hermite == false)
+              dof_info[no].vector_partitioner_face_variants[2] =
+                dof_info[no].vector_partitioner;
+            else
+              {
+                for (unsigned int f=0; f<n_inner_face_batches(); ++f)
+                  for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements &&
+                       face_info.faces[f].cells_interior[v] != numbers::invalid_unsigned_int; ++v)
+                    {
+                      AssertIndexRange(face_info.faces[f].cells_interior[v],
+                                       n_macro_cells_before*VectorizedArray<Number>::n_array_elements);
+                      if (dof_info[no].index_storage_variants[1][f] >=
+                          internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::contiguous
+                          &&
+                          dof_info[no].dof_indices_contiguous[2][face_info.faces[f].cells_exterior[v]] >=
+                          part.local_size())
+                        {
+                          const unsigned int p = face_info.faces[f].cells_exterior[v];
+                          const unsigned int stride = 1;
+                          unsigned int i=0;
+                          for (unsigned int e=0; e<dof_info[no].n_base_elements; ++e)
+                            for (unsigned int c=0; c<dof_info[no].n_components[e]; ++c)
+                              {
+                                const internal::MatrixFreeFunctions::ShapeInfo<VectorizedArray<Number>> &shape
+                                    = shape_info(dof_info[no].global_base_element_offset+e,0,0,0);
+                                for (unsigned int j=0; j<2*shape.dofs_per_component_on_face; ++j)
+                                  ghost_indices.
+                                  push_back(part.local_to_global(dof_info[no].dof_indices_contiguous[2][p]
+                                                                 +i+
+                                                                 shape.face_to_cell_index_hermite
+                                                                 (face_info.faces[f].exterior_face_no,j)*stride));
+                                i += shape.dofs_per_component_on_cell*stride;
+                              }
+                          AssertDimension(i, dof_info[no].dofs_per_cell[0]*stride);
+                        }
+                    }
+                std::sort(ghost_indices.begin(), ghost_indices.end());
+                ghost_indices.erase(std::unique(ghost_indices.begin(), ghost_indices.end()),
+                                    ghost_indices.end());
+                IndexSet compressed_set(part.size());
+                compressed_set.add_indices(ghost_indices.begin(), ghost_indices.end());
+                compressed_set.subtract_set(dof_info[no].vector_partitioner->locally_owned_range());
+                const bool all_ghosts_equal =
+                  Utilities::MPI::min((int)(compressed_set.n_elements() ==
+                                            dof_info[no].vector_partitioner->ghost_indices().n_elements()),
+                                      dof_info[no].vector_partitioner->get_mpi_communicator());
+                if (all_ghosts_equal)
+                  dof_info[no].vector_partitioner_face_variants[2] =
+                    dof_info[no].vector_partitioner;
+                else
+                  {
+                    dof_info[no].vector_partitioner_face_variants[2].reset
+                    (new Utilities::MPI::Partitioner(part.locally_owned_range(),
+                                                     part.get_mpi_communicator()));
+                    const_cast<Utilities::MPI::Partitioner *>(dof_info[no].vector_partitioner_face_variants[2].get())
+                    ->set_ghost_indices(compressed_set, part.ghost_indices());
+                  }
+              }
+          }
+        }
+    }
+
+  for (unsigned int no=0; no<n_fe; ++no)
+    dof_info[no].compute_vector_zero_access_pattern (task_info,
+                                                     face_info.faces);
 
   indices_are_initialized = true;
 }
@@ -909,17 +1470,158 @@ void MatrixFree<dim,Number>::clear()
   task_info.clear();
   dof_handlers.dof_handler.clear();
   dof_handlers.hp_dof_handler.clear();
+  face_info.clear();
   indices_are_initialized = false;
   mapping_is_initialized  = false;
 }
 
 
 
+#ifdef DEAL_II_WITH_THREADS
+
+namespace internal
+{
+  namespace
+  {
+    void fill_index_subrange(const unsigned int begin,
+                             const unsigned int end,
+                             const std::vector<std::pair<unsigned int,unsigned int> > &cell_level_index,
+                             tbb::concurrent_unordered_map<std::pair<unsigned int,unsigned int>, unsigned int> &map)
+    {
+      if (cell_level_index.empty())
+        return;
+      unsigned int cell = begin;
+      if (cell == 0)
+        map.insert(std::make_pair(cell_level_index[cell++], 0U));
+      for ( ; cell<end; ++cell)
+        if (cell_level_index[cell] != cell_level_index[cell-1])
+          map.insert(std::make_pair(cell_level_index[cell], cell));
+    }
+
+    template <int dim>
+    void fill_connectivity_subrange(const unsigned int begin,
+                                    const unsigned int end,
+                                    const dealii::Triangulation<dim> &tria,
+                                    const std::vector<std::pair<unsigned int,unsigned int> > &cell_level_index,
+                                    const tbb::concurrent_unordered_map<std::pair<unsigned int,unsigned int>, unsigned int> &map,
+                                    DynamicSparsityPattern &connectivity_direct)
+    {
+      std::vector<types::global_dof_index> new_indices;
+      for (unsigned int cell=begin; cell<end; ++cell)
+        {
+          new_indices.clear();
+          typename dealii::Triangulation<dim>::cell_iterator dcell
+          (&tria, cell_level_index[cell].first, cell_level_index[cell].second);
+          for (unsigned int f=0; f<GeometryInfo<dim>::faces_per_cell; ++f)
+            {
+              // Only inner faces couple different cells
+              if (dcell->at_boundary() == false &&
+                  dcell->neighbor_or_periodic_neighbor(f)->level_subdomain_id() ==
+                  dcell->level_subdomain_id())
+                {
+                  std::pair<unsigned int,unsigned int> level_index
+                  (dcell->neighbor_or_periodic_neighbor(f)->level(),
+                   dcell->neighbor_or_periodic_neighbor(f)->index());
+                  auto it = map.find(level_index);
+                  if (it != map.end())
+                    {
+                      const unsigned int neighbor_cell = it->second;
+                      if (neighbor_cell != cell)
+                        new_indices.push_back(neighbor_cell);
+                    }
+                }
+            }
+          std::sort(new_indices.begin(), new_indices.end());
+          connectivity_direct.add_entries(cell, new_indices.begin(),
+                                          std::unique(new_indices.begin(),
+                                                      new_indices.end()));
+        }
+    }
+
+    void fill_connectivity_indirect_subrange(const unsigned int begin,
+                                             const unsigned int end,
+                                             const DynamicSparsityPattern &connectivity_direct,
+                                             DynamicSparsityPattern &connectivity)
+    {
+      std::vector<types::global_dof_index> new_indices;
+      for (unsigned int block=begin; block<end; ++block)
+        {
+          new_indices.clear();
+          for (DynamicSparsityPattern::iterator
+               it = connectivity_direct.begin(block);
+               it != connectivity_direct.end(block); ++it)
+            {
+              new_indices.push_back(it->column());
+              for (DynamicSparsityPattern::iterator it_neigh =
+                     connectivity_direct.begin(it->column());
+                   it_neigh != connectivity_direct.end(it->column()); ++it_neigh)
+                if (it_neigh->column() != block)
+                  new_indices.push_back(it_neigh->column());
+            }
+          std::sort(new_indices.begin(), new_indices.end());
+          connectivity.add_entries(block, new_indices.begin(),
+                                   std::unique(new_indices.begin(),new_indices.end()));
+        }
+    }
+  }
+}
+
+#endif
+
+
+
+template <int dim, typename Number>
+void MatrixFree<dim,Number>::make_connectivity_graph_faces
+(DynamicSparsityPattern &connectivity)
+{
+  (void)connectivity;
+#ifdef DEAL_II_WITH_THREADS
+  // step 1: build map between the index in the matrix-free context and the
+  // one in the triangulation
+  tbb::concurrent_unordered_map<std::pair<unsigned int,unsigned int>, unsigned int> map;
+  parallel::apply_to_subranges(0, cell_level_index.size(),
+                               std::bind(&internal::fill_index_subrange,
+                                         std::placeholders::_1, std::placeholders::_2,
+                                         std::cref(cell_level_index),
+                                         std::ref(map)), 50);
+
+  // step 2: Make a list for all blocks with other blocks that write to the
+  // cell (due to the faces that are associated to it)
+  DynamicSparsityPattern connectivity_direct(connectivity.n_rows(),
+                                             connectivity.n_cols());
+  const Triangulation<dim> &tria =
+    dof_handlers.active_dof_handler == DoFHandlers::usual ?
+    dof_handlers.dof_handler[0]->get_triangulation() :
+    dof_handlers.hp_dof_handler[0]->get_triangulation();
+  parallel::apply_to_subranges(0, task_info.n_active_cells,
+                               std::bind(&internal::fill_connectivity_subrange<dim>,
+                                         std::placeholders::_1, std::placeholders::_2,
+                                         std::cref(tria),
+                                         std::cref(cell_level_index),
+                                         std::cref(map),
+                                         std::ref(connectivity_direct)),
+                               20);
+  connectivity_direct.symmetrize();
+
+  // step 3: Include also interaction between neighbors one layer away because
+  // faces might be assigned to cells differently
+  parallel::apply_to_subranges(0, task_info.n_active_cells,
+                               std::bind(&internal::fill_connectivity_indirect_subrange,
+                                         std::placeholders::_1, std::placeholders::_2,
+                                         std::cref(connectivity_direct),
+                                         std::ref(connectivity)),
+                               20);
+#endif
+}
+
+
+
 template <int dim, typename Number>
 std::size_t MatrixFree<dim,Number>::memory_consumption () const
 {
   std::size_t memory = MemoryConsumption::memory_consumption (dof_info);
   memory += MemoryConsumption::memory_consumption (cell_level_index);
+  memory += MemoryConsumption::memory_consumption (face_info);
   memory += MemoryConsumption::memory_consumption (shape_info);
   memory += MemoryConsumption::memory_consumption (constraint_pool_data);
   memory += MemoryConsumption::memory_consumption (constraint_pool_row_index);
@@ -930,15 +1632,22 @@ std::size_t MatrixFree<dim,Number>::memory_consumption () const
 }
 
 
+
 template <int dim, typename Number>
 template <typename StreamType>
 void MatrixFree<dim,Number>::print_memory_consumption (StreamType &out) const
 {
-  out << "  Memory cell FE operator total: --> ";
+  out << "  Memory matrix-free data total: --> ";
   task_info.print_memory_statistics (out, memory_consumption());
   out << "   Memory cell index:                ";
   task_info.print_memory_statistics
   (out, MemoryConsumption::memory_consumption (cell_level_index));
+  if (Utilities::MPI::sum(face_info.faces.size(), task_info.communicator) > 0)
+    {
+      out << "   Memory face indicators:           ";
+      task_info.print_memory_statistics
+      (out, MemoryConsumption::memory_consumption (face_info.faces));
+    }
   for (unsigned int j=0; j<dof_info.size(); ++ j)
     {
       out << "   Memory DoFInfo component "<< j << std::endl;
index a2223d7ce483797863d3c529f02497e425272460..92d4cd388b06f626adc6b633c709453325be2027 100644 (file)
 INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR})
 
 SET(_src
-  matrix_free.cc
   evaluation_selector.cc
+  mapping_info.cc
+  matrix_free.cc
   task_info.cc
   )
 
 SET(_inst
-  matrix_free.inst.in
   evaluation_selector.inst.in
+  mapping_info.inst.in
+  matrix_free.inst.in
   )
 
 FILE(GLOB _header
diff --git a/source/matrix_free/mapping_info.cc b/source/matrix_free/mapping_info.cc
new file mode 100644 (file)
index 0000000..c67d7e9
--- /dev/null
@@ -0,0 +1,29 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/matrix_free/mapping_info.templates.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+
+#include <iostream>
+
+DEAL_II_NAMESPACE_OPEN
+
+#include "mapping_info.inst"
+
+template struct internal::MatrixFreeFunctions::FPArrayComparator<double>;
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/source/matrix_free/mapping_info.inst.in b/source/matrix_free/mapping_info.inst.in
new file mode 100644 (file)
index 0000000..a464948
--- /dev/null
@@ -0,0 +1,39 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+for (deal_II_dimension : DIMENSIONS)
+{
+    template struct internal::MatrixFreeFunctions::MappingInfo<deal_II_dimension,double>;
+    template struct internal::MatrixFreeFunctions::MappingInfo<deal_II_dimension,float>;
+
+    template struct internal::MatrixFreeFunctions::MappingInfoStorage<deal_II_dimension,deal_II_dimension,double>;
+    template struct internal::MatrixFreeFunctions::MappingInfoStorage<deal_II_dimension,deal_II_dimension,float>;
+#if deal_II_dimension > 1
+    template struct internal::MatrixFreeFunctions::MappingInfoStorage<deal_II_dimension-1,deal_II_dimension,double>;
+    template struct internal::MatrixFreeFunctions::MappingInfoStorage<deal_II_dimension-1,deal_II_dimension,float>;
+#endif
+
+    template void internal::MatrixFreeFunctions::MappingInfo<deal_II_dimension,double>::
+    print_memory_consumption<std::ostream> (std::ostream &, const TaskInfo&) const;
+    template void internal::MatrixFreeFunctions::MappingInfo<deal_II_dimension,double>::
+    print_memory_consumption<ConditionalOStream> (ConditionalOStream &,const TaskInfo&) const;
+
+    template void internal::MatrixFreeFunctions::MappingInfo<deal_II_dimension,float>::
+    print_memory_consumption<std::ostream> (std::ostream &, const TaskInfo&) const;
+    template void internal::MatrixFreeFunctions::MappingInfo<deal_II_dimension,float>::
+    print_memory_consumption<ConditionalOStream> (ConditionalOStream &, const TaskInfo&) const;
+
+}
index 882be59172c4ad99e86a968442506d722a33feec..755bbe47ee09d3eeb83173bd9772dcc80df5d884 100644 (file)
@@ -29,14 +29,6 @@ for (deal_II_dimension : DIMENSIONS)
     template void MatrixFree<deal_II_dimension,float>::
     print_memory_consumption<ConditionalOStream> (ConditionalOStream &) const;
 
-    template struct internal::MatrixFreeFunctions::MappingInfo<deal_II_dimension,double>;
-    template struct internal::MatrixFreeFunctions::MappingInfo<deal_II_dimension,float>;
-
-    template struct internal::MatrixFreeFunctions::MappingInfoStorage<deal_II_dimension,deal_II_dimension,double>;
-    template struct internal::MatrixFreeFunctions::MappingInfoStorage<deal_II_dimension,deal_II_dimension,float>;
-    template struct internal::MatrixFreeFunctions::MappingInfoStorage<deal_II_dimension-1,deal_II_dimension,double>;
-    template struct internal::MatrixFreeFunctions::MappingInfoStorage<deal_II_dimension-1,deal_II_dimension,float>;
-
 #ifndef DEAL_II_MSVC
     template
     void
index ee3a90ad2fbee6fbcef1de5a71dc4204d5235a7b..6538fda43eb6f6f26f732e82410918ac8d3a4bf4 100644 (file)
@@ -208,6 +208,9 @@ void do_test (const unsigned int parallel_option)
 template <int dim, int fe_degree>
 void test ()
 {
+  // use more threads than usual to stress components a bit more
+  MultithreadInfo::set_thread_limit(7);
+
   // 'misuse' fe_degree for setting the parallel
   // option here
   unsigned int parallel_option = 0;

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.