#include <deal.II/base/thread_management.h>
#include <deal.II/base/utilities.h>
+#include <deal.II/fe/fe_dgq.h>
#include <deal.II/fe/fe_nothing.h>
#include <deal.II/fe/fe_values.h>
-#include <deal.II/fe/mapping_q1.h>
+#include <deal.II/fe/mapping_q_generic.h>
+#include <deal.II/matrix_free/evaluation_kernels.h>
+#include <deal.II/matrix_free/evaluation_selector.h>
#include <deal.II/matrix_free/mapping_info.h>
-
DEAL_II_NAMESPACE_OPEN
face_data_by_cells.clear();
cell_type.clear();
face_type.clear();
+ mapping = nullptr;
}
update_default);
}
- // Could call these functions in parallel, but not useful because the
- // work inside is nicely split up already
- initialize_cells(tria, cells, active_fe_index, mapping);
- initialize_faces(tria, cells, face_info.faces, mapping);
- initialize_faces_by_cells(tria, cells, mapping);
+ // In case we have no hp adaptivity (active_fe_index is empty), we have
+ // cells, and the mapping is MappingQGeneric or a derived class, we can
+ // use the fast method.
+ if (active_fe_index.empty() && !cells.empty() &&
+ dynamic_cast<const MappingQGeneric<dim> *>(&mapping))
+ compute_mapping_q(tria, cells, face_info.faces);
+ else
+ {
+ // Could call these functions in parallel, but not useful because
+ // the work inside is nicely split up already
+ initialize_cells(tria, cells, active_fe_index, mapping);
+ initialize_faces(tria, cells, face_info.faces, mapping);
+ initialize_faces_by_cells(tria, cells, mapping);
+ }
}
this->mapping = &mapping;
- // Could call these functions in parallel, but not useful because the
- // work inside is nicely split up already
- initialize_cells(tria, cells, active_fe_index, mapping);
- initialize_faces(tria, cells, face_info.faces, mapping);
- initialize_faces_by_cells(tria, cells, mapping);
+ if (active_fe_index.empty() && !cells.empty() &&
+ dynamic_cast<const MappingQGeneric<dim> *>(&mapping))
+ compute_mapping_q(tria, cells, face_info.faces);
+ else
+ {
+ // Could call these functions in parallel, but not useful because
+ // the work inside is nicely split up already
+ initialize_cells(tria, cells, active_fe_index, mapping);
+ initialize_faces(tria, cells, face_info.faces, mapping);
+ initialize_faces_by_cells(tria, cells, mapping);
+ }
}
/* ------------------------- initialization of cells ------------------- */
+ // Copy a vectorized array of one type to another type
+ template <typename VectorizedArrayType1, typename VectorizedArrayType2>
+ inline DEAL_II_ALWAYS_INLINE void
+ store_vectorized_array(const VectorizedArrayType1 value,
+ const unsigned int offset,
+ VectorizedArrayType2 & result)
+ {
+ static_assert(VectorizedArrayType2::n_array_elements >=
+ VectorizedArrayType1::n_array_elements,
+ "Cannot convert to vectorized array of wider number type");
+
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int v = 0; v < VectorizedArrayType1::n_array_elements; ++v)
+ result[offset + v] = value[v];
+ }
+
+
+
// Namespace with implementation of extraction of values on cell
// range
namespace ExtractCellHelper
}
}
+ // For second derivatives on the real cell, we need the gradient of the
+ // inverse Jacobian J. This involves some calculus and is done
+ // vectorized. If L is the gradient of the jacobian on the unit cell,
+ // the gradient of the inverse is given by (multidimensional calculus) -
+ // J * (J * L) * J (the third J is because we need to transform the
+ // gradient L from the unit to the real cell, and then apply the inverse
+ // Jacobian). Compare this with 1D with j(x) = 1/k(phi(x)), where j =
+ // phi' is the inverse of the jacobian and k is the derivative of the
+ // jacobian on the unit cell. Then j' = phi' k'/k^2 = j k' j^2.
+ template <int dim, typename Number>
+ Tensor<1, dim *(dim + 1) / 2, Tensor<1, dim, Number>>
+ process_jacobian_gradient(const Tensor<2, dim, Number> &inv_jac,
+ const Tensor<3, dim, Number> &jac_grad)
+ {
+ Number inv_jac_grad[dim][dim][dim];
+
+ // compute: inv_jac_grad = J*grad_unit(J^-1)
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ for (unsigned int f = 0; f < dim; ++f)
+ {
+ inv_jac_grad[f][e][d] = (inv_jac[f][0] * jac_grad[d][e][0]);
+ for (unsigned int g = 1; g < dim; ++g)
+ inv_jac_grad[f][e][d] += (inv_jac[f][g] * jac_grad[d][e][g]);
+ }
+
+ // compute: transpose (-jac * jac_grad[d] * jac)
+ Number tmp[dim];
+ Number grad_jac_inv[dim][dim][dim];
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ {
+ for (unsigned int f = 0; f < dim; ++f)
+ {
+ tmp[f] = Number();
+ for (unsigned int g = 0; g < dim; ++g)
+ tmp[f] -= inv_jac_grad[d][f][g] * inv_jac[g][e];
+ }
+
+ // needed for non-diagonal part of Jacobian grad
+ for (unsigned int f = 0; f < dim; ++f)
+ {
+ grad_jac_inv[f][d][e] = inv_jac[f][0] * tmp[0];
+ for (unsigned int g = 1; g < dim; ++g)
+ grad_jac_inv[f][d][e] += inv_jac[f][g] * tmp[g];
+ }
+ }
+
+ Tensor<1, dim *(dim + 1) / 2, Tensor<1, dim, Number>> result;
+
+ // the diagonal part of Jacobian gradient comes first
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ result[d][e] = grad_jac_inv[d][d][e];
+
+ // then the upper-diagonal part
+ for (unsigned int d = 0, count = 0; d < dim; ++d)
+ for (unsigned int e = d + 1; e < dim; ++e, ++count)
+ for (unsigned int f = 0; f < dim; ++f)
+ result[dim + count][f] = grad_jac_inv[d][e][f];
+ return result;
+ }
+
/**
* Helper function called internally during the initialize function.
*/
{
FE_Nothing<dim> dummy_fe;
- Tensor<3, dim, VectorizedArrayType> jac_grad, grad_jac_inv;
- Tensor<1, dim, VectorizedArrayType> tmp;
-
// when we make comparisons about the size of Jacobians we need to
// know the approximate size of typical entries in Jacobians. We need
// to fix the Jacobian size once and for all. We choose the diameter
(update_flags & update_quadrature_points ? update_quadrature_points :
update_default);
- std::vector<std::vector<unsigned int>> n_q_points_1d(fe_values.size()),
- step_size_cartesian(fe_values.size());
- for (unsigned int my_q = 0; my_q < fe_values.size(); ++my_q)
- {
- n_q_points_1d[my_q].resize(
- mapping_info.cell_data[my_q].descriptor.size());
- step_size_cartesian[my_q].resize(n_q_points_1d[my_q].size());
- for (unsigned int hpq = 0; hpq < n_q_points_1d[my_q].size(); ++hpq)
- {
- n_q_points_1d[my_q][hpq] = mapping_info.cell_data[my_q]
- .descriptor[hpq]
- .quadrature_1d.size();
-
- // To walk on the diagonal for lexicographic ordering, we have
- // to jump one index ahead in each direction. For direction 0,
- // this is just the next point, for direction 1, it means adding
- // n_q_points_1d, and so on.
- step_size_cartesian[my_q][hpq] = 0;
- unsigned int factor = 1;
- for (unsigned int d = 0; d < dim; ++d)
- {
- step_size_cartesian[my_q][hpq] += factor;
- factor *= n_q_points_1d[my_q][hpq];
- }
- }
- }
-
const unsigned int end_cell = std::min(mapping_info.cell_type.size(),
std::size_t(cell_range.second));
// loop over given cells
data.first[my_q].jacobians[0].push_back(inv_jac);
if (update_flags & update_jacobian_grads)
- {
- // for second derivatives on the real cell, need
- // also the gradient of the inverse Jacobian J. This
- // involves some calculus and is done
- // vectorized. This is very cheap compared to what
- // fe_values does (in early 2011). If L is the
- // gradient of the jacobian on the unit cell, the
- // gradient of the inverse is given by
- // (multidimensional calculus) - J * (J * L) * J
- // (the third J is because we need to transform the
- // gradient L from the unit to the real cell, and
- // then apply the inverse Jacobian). Compare this
- // with 1D with j(x) = 1/k(phi(x)), where j = phi'
- // is the inverse of the jacobian and k is the
- // derivative of the jacobian on the unit cell. Then
- // j' = phi' k'/k^2 = j k' j^2.
-
- // compute: jac_grad = J*grad_unit(J^-1)
- for (unsigned int d = 0; d < dim; ++d)
- for (unsigned int e = 0; e < dim; ++e)
- for (unsigned int f = 0; f < dim; ++f)
- {
- jac_grad[f][e][d] =
- (inv_jac[f][0] * jacobian_grad[d][e][0]);
- for (unsigned int g = 1; g < dim; ++g)
- jac_grad[f][e][d] +=
- (inv_jac[f][g] * jacobian_grad[d][e][g]);
- }
-
- // compute: transpose (-jac * jac_grad[d] * jac)
- for (unsigned int d = 0; d < dim; ++d)
- for (unsigned int e = 0; e < dim; ++e)
- {
- for (unsigned int f = 0; f < dim; ++f)
- {
- tmp[f] = VectorizedArrayType();
- for (unsigned int g = 0; g < dim; ++g)
- tmp[f] -=
- jac_grad[d][f][g] * inv_jac[g][e];
- }
-
- // needed for non-diagonal part of Jacobian
- // grad
- for (unsigned int f = 0; f < dim; ++f)
- {
- grad_jac_inv[f][d][e] =
- inv_jac[f][0] * tmp[0];
- for (unsigned int g = 1; g < dim; ++g)
- grad_jac_inv[f][d][e] +=
- inv_jac[f][g] * tmp[g];
- }
- }
-
- // the diagonal part of Jacobian gradient comes first
- Tensor<1,
- dim *(dim + 1) / 2,
- Tensor<1, dim, VectorizedArrayType>>
- final_grad;
- for (unsigned int d = 0; d < dim; ++d)
- for (unsigned int e = 0; e < dim; ++e)
- final_grad[d][e] = grad_jac_inv[d][d][e];
-
- // then the upper-diagonal part
- for (unsigned int d = 0, count = 0; d < dim; ++d)
- for (unsigned int e = d + 1; e < dim; ++e, ++count)
- for (unsigned int f = 0; f < dim; ++f)
- final_grad[dim + count][f] =
- grad_jac_inv[d][e][f];
- data.first[my_q].jacobian_gradients[0].push_back(
- final_grad);
- }
+ data.first[my_q].jacobian_gradients[0].push_back(
+ process_jacobian_gradient(inv_jac, jacobian_grad));
}
}
if (update_flags & update_quadrature_points)
{
// eventually we turn to the quadrature points that we can
- // compress in case we have Cartesian cells. we also need to
+ // compress in case we have affine cells. we also need to
// reorder them into arrays of vectorized data types. first
// go through the cells and find out how much memory we need
// to allocate for the quadrature points. We store
- // n_q_points for all cells but Cartesian cells. For
- // Cartesian cells, only need to store the values on a
- // diagonal through the cell (n_q_points_1d). This will give
- // (with some little indexing) the location of all
- // quadrature points.
+ // n_q_points for general cells and a single value for
+ // Cartesian and affine cells (the position of the (0,0)
+ // point from the reference coordinates)
const unsigned int old_size =
data.first[my_q].quadrature_points.size();
data.first[my_q].quadrature_point_offsets.push_back(old_size);
- if (mapping_info.get_cell_type(cell) == cartesian)
+ if (mapping_info.get_cell_type(cell) < general)
{
- for (unsigned int q = 0;
- q < n_q_points_1d[my_q][fe_index];
- ++q)
+ Point<dim, VectorizedArrayType> quad_point;
+ for (unsigned int v = 0;
+ v < VectorizedArrayType::n_array_elements;
+ ++v)
{
- Point<dim, VectorizedArrayType> quad_point;
+ typename dealii::Triangulation<dim>::cell_iterator
+ cell_it(
+ &tria,
+ cells[cell *
+ VectorizedArrayType::n_array_elements +
+ v]
+ .first,
+ cells[cell *
+ VectorizedArrayType::n_array_elements +
+ v]
+ .second);
+ const Point<dim> p =
+ mapping.transform_unit_to_real_cell(cell_it,
+ Point<dim>());
for (unsigned int d = 0; d < dim; ++d)
- quad_point[d] =
- cell_data.quadrature_points
- [q * step_size_cartesian[my_q][fe_index]][d];
- data.first[my_q].quadrature_points.push_back(
- quad_point);
+ quad_point[d][v] = p[d];
}
+ data.first[my_q].quadrature_points.push_back(quad_point);
}
else
{
{
const unsigned int cell = lcell + first_cell;
data_cells.data_index_offsets[cell] =
- cell_type[cell] <= static_cast<unsigned int>(affine) ?
- indices_compressed[data_cells_local.data_index_offsets[lcell]] :
+ cell_type[cell] <= affine ?
+ (dim == structdim ? 2 : 1) *
+ indices_compressed[data_cells_local
+ .data_index_offsets[lcell]] :
data_cells_local.data_index_offsets[lcell] + data_shift[0];
if (data_cells_local.quadrature_point_offsets.size() > lcell)
data_cells.quadrature_point_offsets[cell] =
}
}
+
+
+ /**
+ * This evaluates the mapping information on a range of cells calling
+ * into the tensor product interpolators of the matrix-free framework,
+ * using a polynomial expansion of the cell geometry in terms of
+ * MappingQ.
+ */
+ template <int dim,
+ typename Number,
+ typename VectorizedArrayType,
+ typename VectorizedDouble>
+ void
+ compute_range_mapping_q(
+ const unsigned int begin_cell,
+ const unsigned int end_cell,
+ const std::vector<GeometryType> & cell_type,
+ const std::vector<bool> & process_cell,
+ const UpdateFlags update_flags_cells,
+ const AlignedVector<double> & plain_quadrature_points,
+ const ShapeInfo<VectorizedDouble> &shape_info,
+ MappingInfoStorage<dim, dim, Number, VectorizedArrayType> &my_data)
+ {
+ constexpr unsigned int n_lanes = VectorizedArrayType::n_array_elements;
+ constexpr unsigned int n_lanes_d = VectorizedDouble::n_array_elements;
+
+ const unsigned int n_q_points = my_data.descriptor[0].n_q_points;
+ const unsigned int n_mapping_points =
+ shape_info.dofs_per_component_on_cell;
+ constexpr unsigned int hess_dim = dim * (dim + 1) / 2;
+
+ AlignedVector<VectorizedDouble> cell_points(dim * n_mapping_points);
+ AlignedVector<VectorizedDouble> cell_quads(dim * n_q_points);
+ AlignedVector<VectorizedDouble> cell_grads(dim * dim * n_q_points);
+ AlignedVector<VectorizedDouble> cell_grad_grads(dim * hess_dim *
+ n_q_points);
+ AlignedVector<VectorizedDouble> scratch_data(
+ dim * (2 * n_q_points + 3 * n_mapping_points));
+
+ for (unsigned int cell = begin_cell; cell < end_cell; ++cell)
+ for (unsigned vv = 0; vv < n_lanes; vv += n_lanes_d)
+ {
+ if (cell_type[cell] > affine || process_cell[cell])
+ {
+ unsigned int
+ start_indices[VectorizedDouble::n_array_elements];
+ for (unsigned int v = 0; v < n_lanes_d; ++v)
+ start_indices[v] =
+ (cell * n_lanes + vv + v) * n_mapping_points * dim;
+ vectorized_load_and_transpose(n_mapping_points * dim,
+ plain_quadrature_points.data(),
+ start_indices,
+ cell_points.data());
+
+ SelectEvaluator<dim, -1, 0, dim, VectorizedDouble>::evaluate(
+ shape_info,
+ cell_points.data(),
+ cell_quads.data(),
+ cell_grads.data(),
+ cell_grad_grads.data(),
+ scratch_data.data(),
+ true,
+ true,
+ update_flags_cells & update_jacobian_grads);
+ }
+ if (update_flags_cells & update_quadrature_points)
+ {
+ Point<dim, VectorizedArrayType> *quadrature_points =
+ my_data.quadrature_points.data() +
+ my_data.quadrature_point_offsets[cell];
+ if (cell_type[cell] <= affine)
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int v = 0; v < n_lanes_d; ++v)
+ quadrature_points[0][d][vv + v] =
+ plain_quadrature_points
+ [(dim * (cell * n_lanes + vv + v) + d) *
+ n_mapping_points];
+ else
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int q = 0; q < n_q_points; ++q)
+ store_vectorized_array(cell_quads[q + d * n_q_points],
+ vv,
+ quadrature_points[q][d]);
+ }
+
+ const unsigned int n_points =
+ cell_type[cell] <= affine ? 1 : n_q_points;
+ if (process_cell[cell])
+ for (unsigned int q = 0; q < n_points; ++q)
+ {
+ const unsigned int idx =
+ my_data.data_index_offsets[cell] + q;
+ Tensor<2, dim, VectorizedDouble> jac;
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ jac[d][e] = cell_grads[q + (d * dim + e) * n_q_points];
+
+ // eliminate roundoff errors
+ if (cell_type[cell] == cartesian)
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ if (d != e)
+ jac[d][e] = 0.;
+
+ const VectorizedDouble jac_det = determinant(jac);
+ const Tensor<2, dim, VectorizedDouble> inv_jac =
+ transpose(invert(jac));
+
+ if (cell_type[cell] <= affine)
+ {
+ store_vectorized_array(jac_det,
+ vv,
+ my_data.JxW_values[idx]);
+
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ store_vectorized_array(
+ jac[d][e],
+ vv,
+ my_data.jacobians[0][idx + 1][d][e]);
+ }
+ else
+ {
+ const double weight =
+ my_data.descriptor[0].quadrature.weight(q);
+ store_vectorized_array(jac_det * weight,
+ vv,
+ my_data.JxW_values[idx]);
+ }
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ store_vectorized_array(inv_jac[d][e],
+ vv,
+ my_data.jacobians[0][idx][d][e]);
+
+ if (update_flags_cells & update_jacobian_grads &&
+ cell_type[cell] > affine)
+ {
+ Tensor<3, dim, VectorizedDouble> jac_grad;
+ for (unsigned int d = 0; d < dim; ++d)
+ {
+ for (unsigned int e = 0; e < dim; ++e)
+ jac_grad[d][e][e] =
+ cell_grad_grads[q + (d * hess_dim + e) *
+ n_q_points];
+ for (unsigned int c = dim, e = 0; e < dim; ++e)
+ for (unsigned int f = e + 1; f < dim; ++f, ++c)
+ jac_grad[d][e][f] = jac_grad[d][f][e] =
+ cell_grad_grads[q + (d * hess_dim + c) *
+ n_q_points];
+ const auto inv_jac_grad =
+ process_jacobian_gradient(inv_jac, jac_grad);
+ for (unsigned int d = 0; d < hess_dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ store_vectorized_array(
+ inv_jac_grad[d][e],
+ vv,
+ my_data.jacobian_gradients[0][idx][d][e]);
+ }
+ }
+ }
+ }
+ }
+
} // namespace ExtractCellHelper
cell_data[my_q].data_index_offsets.resize(cell_type.size());
std::vector<std::array<std::size_t, 2>> shift(
data_cells_local.size());
- shift[0][0] = n_constant_jacobians;
+ shift[0][0] = 2 * n_constant_jacobians;
shift[0][1] = 0;
for (unsigned int i = 1; i < data_cells_local.size(); ++i)
{
cell_data[my_q]);
// finally, insert the constant cell data at the beginning (the
- // other tasks can already start copying the non-constant data)
+ // other tasks can already start copying the non-constant
+ // data). Note that we use two slots for the constant data to
+ // accommodate for both the inverse transposed Jacobian (that we
+ // need for derivatives) and the Jacobian (that we need for
+ // quadrature points)
if (my_q == 0)
{
for (const auto &it : data_cells_local[0].second.data)
++v)
jac[d][e][v] = it.first[d][e][v];
AssertIndexRange(it.second, n_constant_jacobians);
- const std::size_t index = it.second;
- cell_data[my_q].JxW_values[index] = determinant(jac);
+ const std::size_t index = it.second;
+ cell_data[my_q].JxW_values[2 * index] = determinant(jac);
// invert and transpose jac
- cell_data[my_q].jacobians[0][index] = transpose(invert(jac));
+ cell_data[my_q].jacobians[0][2 * index] =
+ transpose(invert(jac));
+ cell_data[my_q].jacobians[0][2 * index + 1] = jac;
// second derivative of transformation is zero on affine cells
}
}
else
{
- for (unsigned int i = 0; i < n_constant_jacobians; ++i)
+ for (unsigned int i = 0; i < 2 * n_constant_jacobians; ++i)
{
cell_data[my_q].JxW_values[i] = cell_data[0].JxW_values[i];
cell_data[my_q].jacobians[0][i] =
}
}
+
+
+ /**
+ * This evaluates the mapping information on a range of cells calling
+ * into the tensor product interpolators of the matrix-free framework,
+ * using a polynomial expansion of the cell geometry in terms of
+ * MappingQ.
+ */
+ template <int dim,
+ typename Number,
+ typename VectorizedArrayType,
+ typename VectorizedDouble>
+ void
+ compute_range_mapping_q(
+ const unsigned int begin_face,
+ const unsigned int end_face,
+ const std::vector<
+ FaceToCellTopology<VectorizedArrayType::n_array_elements>> &faces,
+ const std::vector<GeometryType> & face_type,
+ const std::vector<bool> & process_face,
+ const UpdateFlags update_flags_faces,
+ const AlignedVector<double> & plain_quadrature_points,
+ const ShapeInfo<VectorizedDouble> &shape_info,
+ MappingInfoStorage<dim - 1, dim, Number, VectorizedArrayType> &my_data)
+ {
+ constexpr unsigned int n_lanes = VectorizedArrayType::n_array_elements;
+ constexpr unsigned int n_lanes_d = VectorizedDouble::n_array_elements;
+
+ const unsigned int n_q_points = my_data.descriptor[0].n_q_points;
+ const unsigned int n_mapping_points =
+ shape_info.dofs_per_component_on_cell;
+
+ AlignedVector<VectorizedDouble> cell_points(dim * n_mapping_points);
+ AlignedVector<VectorizedDouble> face_quads(dim * n_q_points);
+ AlignedVector<VectorizedDouble> face_grads(dim * dim * n_q_points);
+ AlignedVector<VectorizedDouble> scratch_data(
+ dim * (2 * n_q_points + 3 * n_mapping_points));
+
+ for (unsigned int face = begin_face; face < end_face; ++face)
+ for (unsigned vv = 0; vv < n_lanes; vv += n_lanes_d)
+ {
+ // load the geometry field for all SIMD lanes
+ unsigned int start_indices[VectorizedDouble::n_array_elements];
+ const unsigned int face_no = faces[face].interior_face_no;
+ for (unsigned int v = 0; v < n_lanes_d; ++v)
+ if (faces[face].cells_interior[vv + v] !=
+ numbers::invalid_unsigned_int)
+ start_indices[v] =
+ faces[face].cells_interior[vv + v] * n_mapping_points * dim;
+ else
+ start_indices[v] =
+ faces[face].cells_interior[0] * n_mapping_points * dim;
+ vectorized_load_and_transpose(n_mapping_points * dim,
+ plain_quadrature_points.data(),
+ start_indices,
+ cell_points.data());
+
+ // now let the matrix-free evaluators provide us with the
+ // data on faces
+ FEFaceEvaluationSelector<dim,
+ -1,
+ 0,
+ dim,
+ double,
+ VectorizedDouble>::
+ evaluate(shape_info,
+ cell_points.data(),
+ face_quads.data(),
+ face_grads.data(),
+ scratch_data.data(),
+ true,
+ true,
+ face_no,
+ GeometryInfo<dim>::max_children_per_cell,
+ faces[face].face_orientation > 8 ?
+ faces[face].face_orientation - 8 :
+ 0,
+ my_data.descriptor[0].face_orientations);
+
+
+ if (update_flags_faces & update_quadrature_points)
+ for (unsigned int q = 0; q < n_q_points; ++q)
+ for (unsigned int d = 0; d < dim; ++d)
+ store_vectorized_array(
+ face_quads[d * n_q_points + q],
+ vv,
+ my_data.quadrature_points
+ [my_data.quadrature_point_offsets[face] + q][d]);
+
+ if (process_face[face] == false)
+ continue;
+
+ // go through the faces and fill the result
+ const unsigned int offset = my_data.data_index_offsets[face];
+ const unsigned int n_points_compute =
+ face_type[face] <= affine ? 1 : n_q_points;
+ for (unsigned int q = 0; q < n_points_compute; ++q)
+ {
+ Tensor<2, dim, VectorizedDouble> jac;
+ for (unsigned int e = 0; e < dim; ++e)
+ {
+ const unsigned int ee =
+ ExtractFaceHelper::reorder_face_derivative_indices<dim>(
+ face_no, e);
+ for (unsigned int d = 0; d < dim; ++d)
+ jac[d][ee] = face_grads[(d * dim + e) * n_q_points + q];
+ }
+ Tensor<2, dim, VectorizedDouble> inv_jac = invert(jac);
+ for (unsigned int e = 0; e < dim; ++e)
+ {
+ const unsigned int ee =
+ ExtractFaceHelper::reorder_face_derivative_indices<dim>(
+ face_no, e);
+ for (unsigned int d = 0; d < dim; ++d)
+ store_vectorized_array(
+ inv_jac[ee][d],
+ vv,
+ my_data.jacobians[0][offset + q][d][e]);
+ }
+
+ std::array<Tensor<1, dim, VectorizedDouble>, dim - 1>
+ tangential_vectors;
+ for (unsigned int d = 0; d != dim - 1; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ for (unsigned int f = 0; f < dim; ++f)
+ tangential_vectors[d][e] +=
+ jac[e][f] *
+ GeometryInfo<dim>::unit_tangential_vectors[face_no][d]
+ [f];
+
+ Tensor<1, dim, VectorizedDouble> boundary_form;
+ if (dim == 1)
+ boundary_form[0] = face_no == 0 ? -1. : 1.;
+ else if (dim == 2)
+ boundary_form = cross_product_2d(tangential_vectors[0]);
+ else if (dim == 3)
+ boundary_form = cross_product_3d(tangential_vectors[0],
+ tangential_vectors[1]);
+ else
+ Assert(false, ExcNotImplemented());
+
+ const VectorizedDouble JxW =
+ boundary_form.norm() *
+ (face_type[face] <= affine ?
+ 1. :
+ my_data.descriptor[0].quadrature.weight(q));
+
+ store_vectorized_array(JxW,
+ vv,
+ my_data.JxW_values[offset + q]);
+
+ const Tensor<1, dim, VectorizedDouble> normal =
+ boundary_form / boundary_form.norm();
+
+ for (unsigned int d = 0; d < dim; ++d)
+ store_vectorized_array(
+ normal[d], vv, my_data.normal_vectors[offset + q][d]);
+
+ my_data.normals_times_jacobians[0][offset + q] =
+ my_data.normal_vectors[offset + q] *
+ my_data.jacobians[0][offset + q];
+ }
+
+ if (faces[face].cells_exterior[0] !=
+ numbers::invalid_unsigned_int)
+ {
+ for (unsigned int v = 0; v < n_lanes_d; ++v)
+ if (faces[face].cells_exterior[vv + v] !=
+ numbers::invalid_unsigned_int)
+ start_indices[v] = faces[face].cells_exterior[vv + v] *
+ n_mapping_points * dim;
+ else
+ start_indices[v] =
+ faces[face].cells_exterior[0] * n_mapping_points * dim;
+
+ vectorized_load_and_transpose(n_mapping_points * dim,
+ plain_quadrature_points.data(),
+ start_indices,
+ cell_points.data());
+
+ FEFaceEvaluationSelector<dim,
+ -1,
+ 0,
+ dim,
+ Number,
+ VectorizedDouble>::
+ evaluate(shape_info,
+ cell_points.data(),
+ face_quads.data(),
+ face_grads.data(),
+ scratch_data.data(),
+ false,
+ true,
+ faces[face].exterior_face_no,
+ faces[face].subface_index,
+ faces[face].face_orientation < 8 ?
+ faces[face].face_orientation :
+ 0,
+ my_data.descriptor[0].face_orientations);
+
+ for (unsigned int q = 0; q < n_points_compute; ++q)
+ {
+ Tensor<2, dim, VectorizedDouble> jac;
+ for (unsigned int e = 0; e < dim; ++e)
+ {
+ const unsigned int ee =
+ ExtractFaceHelper::reorder_face_derivative_indices<
+ dim>(faces[face].exterior_face_no, e);
+ for (unsigned int d = 0; d < dim; ++d)
+ jac[d][ee] =
+ face_grads[(d * dim + e) * n_q_points + q];
+ }
+ Tensor<2, dim, VectorizedDouble> inv_jac = invert(jac);
+ for (unsigned int e = 0; e < dim; ++e)
+ {
+ const unsigned int ee =
+ ExtractFaceHelper::reorder_face_derivative_indices<
+ dim>(faces[face].exterior_face_no, e);
+ for (unsigned int d = 0; d < dim; ++d)
+ store_vectorized_array(
+ inv_jac[ee][d],
+ vv,
+ my_data.jacobians[1][offset + q][d][e]);
+ }
+ my_data.normals_times_jacobians[1][offset + q] =
+ my_data.normal_vectors[offset + q] *
+ my_data.jacobians[1][offset + q];
+ }
+ }
+ }
+ }
+
} // namespace ExtractFaceHelper
if (faces.size() == 0)
return;
- // Create as many chunks of cells as we have threads and spawn the work
+ // Create as many chunks of cells as we have threads and spawn the
+ // work
unsigned int work_per_chunk =
std::max(std::size_t(8),
(faces.size() + MultithreadInfo::n_threads() - 1) /
ExtractFaceHelper::
CompressedFaceData<dim, Number, VectorizedArrayType>>>
data_faces_local;
- // Reserve enough space to avoid re-allocation (which would destroy the
- // references passed to the tasks!)
+ // Reserve enough space to avoid re-allocation (which would destroy
+ // the references passed to the tasks!)
data_faces_local.reserve(MultithreadInfo::n_threads());
{
+ template <int dim, typename Number, typename VectorizedArrayType>
+ void
+ MappingInfo<dim, Number, VectorizedArrayType>::compute_mapping_q(
+ const dealii::Triangulation<dim> & tria,
+ const std::vector<std::pair<unsigned int, unsigned int>> &cell_array,
+ const std::vector<
+ FaceToCellTopology<VectorizedArrayType::n_array_elements>> &faces)
+ {
+ // step 1: extract quadrature point data with the data appropriate for
+ // MappingQGeneric
+ const MappingQGeneric<dim> *mapping_q =
+ dynamic_cast<const MappingQGeneric<dim> *>(&*this->mapping);
+ Assert(mapping_q != nullptr, ExcInternalError());
+
+ const unsigned int mapping_degree = mapping_q->get_degree();
+ const unsigned int n_mapping_points =
+ Utilities::pow(mapping_degree + 1, dim);
+ AlignedVector<double> plain_quadrature_points(cell_array.size() *
+ n_mapping_points * dim);
+
+ const double jacobian_size = ExtractCellHelper::get_jacobian_size(tria);
+
+ std::vector<unsigned int> cell_data_index(cell_array.size());
+ std::vector<GeometryType> preliminary_cell_type(cell_array.size());
+ {
+ FE_Nothing<dim> dummy_fe;
+ QGaussLobatto<dim> quadrature(mapping_degree + 1);
+
+ FEValues<dim> fe_values(*mapping_q,
+ dummy_fe,
+ quadrature,
+ update_quadrature_points | update_jacobians);
+
+ // we include a map to store some compressed information about the
+ // Jacobians which we collect by a stencil-like pattern around the
+ // first quadrature point on the cell - we use a relatively coarse
+ // tolerance to account for some inaccuracies in the manifold
+ // evaluation
+ const FPArrayComparator<double> comparator(1e4 * jacobian_size);
+ std::map<std::array<Tensor<2, dim>, dim + 1>,
+ unsigned int,
+ FPArrayComparator<double>>
+ compressed_jacobians(comparator);
+
+ unsigned int n_data_buckets = 0;
+ for (unsigned int cell = 0; cell < cell_array.size(); ++cell)
+ {
+ typename dealii::Triangulation<dim>::cell_iterator cell_it(
+ &tria, cell_array[cell].first, cell_array[cell].second);
+ fe_values.reinit(cell_it);
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int q = 0; q < n_mapping_points; ++q)
+ plain_quadrature_points[(cell * dim + d) * n_mapping_points +
+ q] = fe_values.quadrature_point(q)[d];
+
+ // store the first, second, n-th and n^2-th one along a
+ // stencil-like pattern
+ std::array<Tensor<2, dim, double>, dim + 1> jacobians_on_stencil;
+ jacobians_on_stencil[0] =
+ Tensor<2, dim, double>(fe_values.jacobian(0));
+ for (unsigned int d = 0, skip = 1; d < dim;
+ ++d, skip *= (mapping_degree + 1))
+ jacobians_on_stencil[1 + d] =
+ Tensor<2, dim, double>(fe_values.jacobian(skip));
+
+ // check in the map for the index of this cell
+ auto inserted = compressed_jacobians.insert(
+ std::make_pair(jacobians_on_stencil, cell));
+ bool add_this_cell = inserted.second;
+ if (inserted.second == false)
+ {
+ // check if the found duplicate really is a translation and
+ // the similarity identified by the map is not by accident
+ double max_distance = 0;
+ const double *ptr_origin =
+ plain_quadrature_points.data() +
+ inserted.first->second * dim * n_mapping_points;
+ const double *ptr_mine = plain_quadrature_points.data() +
+ cell * dim * n_mapping_points;
+ for (unsigned int d = 0; d < dim; ++d)
+ {
+ const double translate_d =
+ ptr_origin[d * n_mapping_points] -
+ ptr_mine[d * n_mapping_points];
+ for (unsigned int q = 1; q < n_mapping_points; ++q)
+ max_distance =
+ std::max(std::abs(ptr_origin[d * n_mapping_points + q] -
+ ptr_mine[d * n_mapping_points + q] -
+ translate_d),
+ max_distance);
+ }
+
+ // this is not a duplicate, must add it again
+ if (max_distance > 1e-10 * jacobian_size)
+ add_this_cell = true;
+ }
+
+ if (add_this_cell == true)
+ {
+ // check whether cell is Cartesian/affine/general
+ GeometryType type = cartesian;
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ if (d != e)
+ if (std::abs(inserted.first->first[0][d][e]) >
+ 1e-12 * jacobian_size)
+ type = affine;
+
+ for (unsigned int q = 1; q < n_mapping_points; ++q)
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ if (std::abs(fe_values.jacobian(q)[d][e] -
+ fe_values.jacobian(0)[d][e]) >
+ 1e-12 * jacobian_size)
+ {
+ type = general;
+ goto endloop;
+ }
+ endloop:
+ cell_data_index[cell] = n_data_buckets;
+ preliminary_cell_type[cell] = type;
+ ++n_data_buckets;
+ }
+ else
+ {
+ cell_data_index[cell] = cell_data_index[inserted.first->second];
+ preliminary_cell_type[cell] =
+ preliminary_cell_type[inserted.first->second];
+ }
+ }
+ }
+
+ // step 2: compute the appropriate evaluation matrices for cells and
+ // faces
+
+ // We want to use vectorization for computing the quantities, but must
+ // evaluate the geometry in double precision; thus, for floats we need
+ // to do things in two sweeps and convert the final result.
+ constexpr unsigned int n_lanes = VectorizedArrayType::n_array_elements;
+ using VectorizedDouble =
+ VectorizedArray<double,
+ ((std::is_same<Number, float>::value && n_lanes > 1) ?
+ n_lanes / 2 :
+ n_lanes)>;
+
+ // Create a ShapeInfo object to provide the necessary interpolators to
+ // the various quadrature points. Note that it is initialized with the
+ // finite element fe_geometry using the degree of the mapping, which is
+ // not the same as the degree of the underlying finite element shape
+ // functions or the quadrature points; shape info is merely a vehicle to
+ // return us the right interpolation matrices from the cell support
+ // points to the cell and face quadrature points.
+ std::vector<ShapeInfo<VectorizedDouble>> shape_infos(cell_data.size());
+ {
+ FE_DGQ<dim> fe_geometry(mapping_degree);
+ for (unsigned int my_q = 0; my_q < cell_data.size(); ++my_q)
+ shape_infos[my_q].reinit(cell_data[my_q].descriptor[0].quadrature_1d,
+ fe_geometry);
+ }
+
+ // step 3: find compression of cells with vectorization
+ std::map<std::array<unsigned int, n_lanes>, unsigned int> compressed_data;
+
+ cell_type.resize(cell_array.size() / n_lanes);
+ std::vector<bool> process_cell(cell_type.size());
+ std::vector<unsigned int> cell_data_index_vect(cell_type.size());
+
+ for (unsigned int cell = 0; cell < cell_array.size(); cell += n_lanes)
+ {
+ std::pair<std::array<unsigned int, n_lanes>, unsigned int>
+ data_indices;
+ for (unsigned int i = 0; i < n_lanes; ++i)
+ data_indices.first[i] = cell_data_index[cell + i];
+ data_indices.second = cell / n_lanes;
+
+ auto inserted = compressed_data.insert(data_indices);
+
+ process_cell[cell / n_lanes] = inserted.second;
+ if (inserted.second == true)
+ cell_data_index_vect[cell / n_lanes] = data_indices.second;
+ else
+ cell_data_index_vect[cell / n_lanes] = inserted.first->second;
+
+ cell_type[cell / n_lanes] =
+ *std::max_element(preliminary_cell_type.data() + cell,
+ preliminary_cell_type.data() + cell + n_lanes);
+ }
+
+ // step 4: compute the data on cells from the cached quadrature
+ // points, filling up all SIMD lanes as appropriate
+ for (unsigned int my_q = 0; my_q < cell_data.size(); ++my_q)
+ {
+ MappingInfoStorage<dim, dim, Number, VectorizedArrayType> &my_data =
+ cell_data[my_q];
+
+ // step 4a: set the index offsets, find out how much to allocate,
+ // and allocate the memory
+ const unsigned int n_q_points = my_data.descriptor[0].n_q_points;
+ unsigned int max_size = 0;
+ my_data.data_index_offsets.resize(cell_type.size());
+ for (unsigned int cell = 0; cell < cell_type.size(); ++cell)
+ {
+ if (process_cell[cell] == false)
+ my_data.data_index_offsets[cell] =
+ my_data.data_index_offsets[cell_data_index_vect[cell]];
+ else
+ my_data.data_index_offsets[cell] = max_size;
+ max_size =
+ std::max(max_size,
+ my_data.data_index_offsets[cell] +
+ (cell_type[cell] <= affine ? 2 : n_q_points));
+ }
+
+ my_data.JxW_values.resize_fast(max_size);
+ my_data.jacobians[0].resize_fast(max_size);
+ if (update_flags_cells & update_jacobian_grads)
+ my_data.jacobian_gradients[0].resize_fast(max_size);
+
+ if (update_flags_cells & update_quadrature_points)
+ {
+ my_data.quadrature_point_offsets.resize(cell_type.size());
+ for (unsigned int cell = 1; cell < cell_type.size(); ++cell)
+ if (cell_type[cell - 1] <= affine)
+ my_data.quadrature_point_offsets[cell] =
+ my_data.quadrature_point_offsets[cell - 1] + 1;
+ else
+ my_data.quadrature_point_offsets[cell] =
+ my_data.quadrature_point_offsets[cell - 1] + n_q_points;
+ my_data.quadrature_points.resize_fast(
+ my_data.quadrature_point_offsets.back() +
+ (cell_type.back() <= affine ? 1 : n_q_points));
+ }
+
+ // step 4b: go through the cells and compute the information using
+ // similar evaluators as for the matrix-free integrals
+ ExtractCellHelper::compute_range_mapping_q<dim,
+ Number,
+ VectorizedArrayType,
+ VectorizedDouble>(
+ 0,
+ cell_type.size(),
+ cell_type,
+ process_cell,
+ update_flags_cells,
+ plain_quadrature_points,
+ shape_infos[my_q],
+ my_data);
+ }
+
+ if (faces.empty())
+ return;
+
+ // step 5: find compression of faces with vectorization
+ std::map<std::array<unsigned int, 2 * n_lanes + 3>, unsigned int>
+ compressed_faces;
+
+ face_type.resize(faces.size());
+ std::vector<bool> process_face(face_type.size());
+ std::vector<unsigned int> face_data_index_vect(face_type.size());
+
+ for (unsigned int face = 0; face < faces.size(); ++face)
+ {
+ std::pair<std::array<unsigned int, 2 * n_lanes + 3>, unsigned int>
+ data_indices;
+ for (unsigned int i = 0; i < n_lanes; ++i)
+ if (faces[face].cells_interior[i] != numbers::invalid_unsigned_int)
+ data_indices.first[i] =
+ cell_data_index[faces[face].cells_interior[i]];
+ else
+ data_indices.first[i] = data_indices.first[0];
+ for (unsigned int i = 0; i < n_lanes; ++i)
+ data_indices.first[n_lanes + i] = data_indices.first[i];
+ for (unsigned int i = 0; i < n_lanes; ++i)
+ if (faces[face].cells_exterior[i] != numbers::invalid_unsigned_int)
+ data_indices.first[n_lanes + i] =
+ cell_data_index[faces[face].cells_exterior[i]];
+ data_indices.first[2 * n_lanes] = faces[face].interior_face_no;
+ data_indices.first[2 * n_lanes + 1] = faces[face].exterior_face_no;
+ data_indices.first[2 * n_lanes + 2] = faces[face].subface_index;
+
+ data_indices.second = face;
+
+ auto inserted = compressed_faces.insert(data_indices);
+
+ process_face[face] = inserted.second;
+ if (inserted.second == true)
+ face_data_index_vect[face] = face;
+ else
+ face_data_index_vect[face] = inserted.first->second;
+
+ face_type[face] = cartesian;
+ for (unsigned int i = 0; i < n_lanes; ++i)
+ if (faces[face].cells_interior[i] != numbers::invalid_unsigned_int)
+ face_type[face] =
+ std::max(face_type[face],
+ preliminary_cell_type[faces[face].cells_interior[i]]);
+ for (unsigned int i = 0; i < n_lanes; ++i)
+ if (faces[face].cells_exterior[i] != numbers::invalid_unsigned_int)
+ face_type[face] =
+ std::max(face_type[face],
+ preliminary_cell_type[faces[face].cells_exterior[i]]);
+ }
+
+ // step 6: compute the data on faces from the cached cell quadrature
+ // points, filling up all SIMD lanes as appropriate
+ for (unsigned int my_q = 0; my_q < face_data.size(); ++my_q)
+ {
+ MappingInfoStorage<dim - 1, dim, Number, VectorizedArrayType>
+ &my_data = face_data[my_q];
+
+ // step 6a: set the index offsets, find out how much to allocate,
+ // and allocate the memory
+ const unsigned int n_q_points = my_data.descriptor[0].n_q_points;
+ unsigned int max_size = 0;
+ my_data.data_index_offsets.resize(face_type.size());
+ for (unsigned int face = 0; face < face_type.size(); ++face)
+ {
+ if (process_face[face] == false)
+ my_data.data_index_offsets[face] =
+ my_data.data_index_offsets[face_data_index_vect[face]];
+ else
+ my_data.data_index_offsets[face] = max_size;
+ max_size =
+ std::max(max_size,
+ my_data.data_index_offsets[face] +
+ (face_type[face] <= affine ? 1 : n_q_points));
+ }
+
+ const UpdateFlags update_flags_common =
+ update_flags_boundary_faces | update_flags_inner_faces;
+
+ my_data.JxW_values.resize_fast(max_size);
+ my_data.normal_vectors.resize_fast(max_size);
+ my_data.jacobians[0].resize_fast(max_size);
+ my_data.jacobians[1].resize_fast(max_size);
+ if (update_flags_common & update_jacobian_grads)
+ {
+ my_data.jacobian_gradients[0].resize_fast(max_size);
+ my_data.jacobian_gradients[1].resize_fast(max_size);
+ }
+ my_data.normals_times_jacobians[0].resize_fast(max_size);
+ my_data.normals_times_jacobians[1].resize_fast(max_size);
+
+ if (update_flags_cells & update_quadrature_points)
+ {
+ my_data.quadrature_point_offsets.resize(face_type.size());
+ my_data.quadrature_point_offsets[0] = 0;
+ for (unsigned int face = 1; face < faces.size(); ++face)
+ my_data.quadrature_point_offsets[face] =
+ n_q_points + my_data.quadrature_point_offsets[face - 1];
+ my_data.quadrature_points.resize_fast(face_type.size() *
+ n_q_points);
+ }
+
+ // step 6b: go through the faces and compute the information using
+ // similar evaluators as for the matrix-free face integrals
+ ExtractFaceHelper::compute_range_mapping_q<dim,
+ Number,
+ VectorizedArrayType,
+ VectorizedDouble>(
+ 0,
+ face_type.size(),
+ faces,
+ face_type,
+ process_face,
+ update_flags_common,
+ plain_quadrature_points,
+ shape_infos[my_q],
+ my_data);
+ }
+
+ // step 6c: figure out if normal vectors are the same on some of the
+ // faces which allows us to set the flat_faces face type
+ unsigned int quad_with_most_points = 0;
+ for (unsigned int my_q = 1; my_q < face_data.size(); ++my_q)
+ if (face_data[my_q].descriptor[0].n_q_points >
+ face_data[quad_with_most_points].descriptor[0].n_q_points)
+ quad_with_most_points = my_q;
+ for (unsigned int face = 0; face < face_type.size(); ++face)
+ if (face_type[face] == general)
+ {
+ const unsigned int n_q_points =
+ face_data[quad_with_most_points].descriptor[0].n_q_points;
+ const Tensor<1, dim, VectorizedArrayType> *normals =
+ face_data[quad_with_most_points].normal_vectors.data() +
+ face_data[quad_with_most_points].data_index_offsets[face];
+ VectorizedArrayType distance = 0.;
+ for (unsigned int q = 1; q < n_q_points; ++q)
+ distance += (normals[q] - normals[0]).norm_square();
+ bool all_small = true;
+ for (unsigned int v = 0; v < n_lanes; ++v)
+ if (distance[v] > 50. * std::numeric_limits<Number>::epsilon() *
+ std::numeric_limits<Number>::epsilon() *
+ n_q_points)
+ all_small = false;
+ if (all_small)
+ face_type[face] = flat_faces;
+ }
+
+ // step 7: compute the face data by cells. This still needs to be
+ // transitioned to extracting the information from cell quadrature
+ // points but we need to figure out the correct indices of neighbors
+ // within the list of arrays still
+ initialize_faces_by_cells(tria, cell_array, *this->mapping);
+ }
+
+
+
template <int dim, typename Number, typename VectorizedArrayType>
void
MappingInfo<dim, Number, VectorizedArrayType>::initialize_faces_by_cells(
return false;
}
- } // end of namespace MatrixFreeFunctions
+
+
+ template <typename Number, typename VectorizedArrayType>
+ template <int dim>
+ bool
+ FPArrayComparator<Number, VectorizedArrayType>::
+ operator()(const std::array<Tensor<2, dim, Number>, dim + 1> &t1,
+ const std::array<Tensor<2, dim, Number>, dim + 1> &t2) const
+ {
+ for (unsigned int i = 0; i < t1.size(); ++i)
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int e = 0; e < dim; ++e)
+ if (t1[i][d][e] < t2[i][d][e] - tolerance)
+ return true;
+ else if (t1[i][d][e] > t2[i][d][e] + tolerance)
+ return false;
+ return false;
+ }
+
+ } // namespace MatrixFreeFunctions
} // end of namespace internal
DEAL_II_NAMESPACE_CLOSE