#include <deal.II/base/utilities.h>
#include <deal.II/base/vectorization.h>
+#include <deal.II/matrix_free/dof_info.h>
#include <deal.II/matrix_free/shape_info.h>
#include <deal.II/matrix_free/tensor_product_kernels.h>
}
}
};
+
+
+
+ // internal helper function for reading data; base version of different types
+ template <typename VectorizedArrayType, typename Number2>
+ void
+ do_vectorized_read(const Number2 *src_ptr, VectorizedArrayType &dst)
+ {
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ dst[v] = src_ptr[v];
+ }
+
+
+
+ // internal helper function for reading data; specialized version where we
+ // can use a dedicated load function
+ template <typename Number, unsigned int width>
+ void
+ do_vectorized_read(const Number *src_ptr, VectorizedArray<Number, width> &dst)
+ {
+ dst.load(src_ptr);
+ }
+
+
+
+ // internal helper function for reading data; base version of different types
+ template <typename VectorizedArrayType, typename Number2>
+ void
+ do_vectorized_gather(const Number2 * src_ptr,
+ const unsigned int * indices,
+ VectorizedArrayType &dst)
+ {
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ dst[v] = src_ptr[indices[v]];
+ }
+
+
+
+ // internal helper function for reading data; specialized version where we
+ // can use a dedicated gather function
+ template <typename Number, unsigned int width>
+ void
+ do_vectorized_gather(const Number * src_ptr,
+ const unsigned int * indices,
+ VectorizedArray<Number, width> &dst)
+ {
+ dst.gather(src_ptr, indices);
+ }
+
+
+
+ // internal helper function for reading data; base version of different types
+ template <typename VectorizedArrayType, typename Number2>
+ void
+ do_vectorized_add(const VectorizedArrayType src, Number2 *dst_ptr)
+ {
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ dst_ptr[v] += src[v];
+ }
+
+
+
+ // internal helper function for reading data; specialized version where we
+ // can use a dedicated load function
+ template <typename Number, unsigned int width>
+ void
+ do_vectorized_add(const VectorizedArray<Number, width> src, Number *dst_ptr)
+ {
+ VectorizedArray<Number, width> tmp;
+ tmp.load(dst_ptr);
+ (tmp + src).store(dst_ptr);
+ }
+
+
+
+ // internal helper function for reading data; base version of different types
+ template <typename VectorizedArrayType, typename Number2>
+ void
+ do_vectorized_scatter_add(const VectorizedArrayType src,
+ const unsigned int * indices,
+ Number2 * dst_ptr)
+ {
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ dst_ptr[indices[v]] += src[v];
+ }
+
+
+
+ // internal helper function for reading data; specialized version where we
+ // can use a dedicated gather function
+ template <typename Number, unsigned int width>
+ void
+ do_vectorized_scatter_add(const VectorizedArray<Number, width> src,
+ const unsigned int * indices,
+ Number * dst_ptr)
+ {
+#if DEAL_II_COMPILER_VECTORIZATION_LEVEL < 3
+ for (unsigned int v = 0; v < width; ++v)
+ dst_ptr[indices[v]] += src[v];
+#else
+ VectorizedArray<Number, width> tmp;
+ tmp.gather(dst_ptr, indices);
+ (tmp + src).scatter(indices, dst_ptr);
+#endif
+ }
+
+
+
+ template <int dim,
+ int fe_degree,
+ int n_q_points_1d,
+ int n_components,
+ typename Number,
+ typename VectorizedArrayType,
+ typename Number2 = Number>
+ struct FEFaceEvaluationSelector
+ {
+ static void
+ evaluate(const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
+ const VectorizedArrayType * values_array,
+ VectorizedArrayType * values_quad,
+ VectorizedArrayType * gradients_quad,
+ VectorizedArrayType * scratch_data,
+ const bool evaluate_values,
+ const bool evaluate_gradients,
+ const unsigned int face_no,
+ const unsigned int subface_index,
+ const unsigned int face_orientation,
+ const Table<2, unsigned int> &orientation_map)
+ {
+ constexpr unsigned int static_dofs_per_face =
+ fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
+ numbers::invalid_unsigned_int;
+ const unsigned int dofs_per_face =
+ fe_degree > -1 ? static_dofs_per_face :
+ Utilities::pow(data.fe_degree + 1, dim - 1);
+
+ // we allocate small amounts of data on the stack to signal the compiler
+ // that this temporary data is only needed for the calculations but the
+ // final results can be discarded and need not be written back to
+ // memory. For large sizes or when the dofs per face is not a compile-time
+ // constant, however, we want to go to the heap in the `scratch_data`
+ // variable to not risk a stack overflow.
+ constexpr unsigned int stack_array_size_threshold = 100;
+
+ VectorizedArrayType
+ temp_data[static_dofs_per_face < stack_array_size_threshold ?
+ n_components * 2 * static_dofs_per_face :
+ 1];
+ VectorizedArrayType *temp1;
+ if (static_dofs_per_face < stack_array_size_threshold)
+ temp1 = &temp_data[0];
+ else
+ temp1 = scratch_data;
+
+ FEFaceNormalEvaluationImpl<dim,
+ fe_degree,
+ n_components,
+ VectorizedArrayType>::
+ template interpolate<true, false>(
+ data, values_array, temp1, evaluate_gradients, face_no);
+
+ const unsigned int n_q_points_1d_actual =
+ fe_degree > -1 ? n_q_points_1d : 0;
+ if (fe_degree > -1 &&
+ subface_index >= GeometryInfo<dim>::max_children_per_cell &&
+ data.element_type <= MatrixFreeFunctions::tensor_symmetric)
+ FEFaceEvaluationImpl<
+ true,
+ dim,
+ fe_degree,
+ n_q_points_1d_actual,
+ n_components,
+ VectorizedArrayType>::evaluate_in_face(data,
+ temp1,
+ values_quad,
+ gradients_quad,
+ scratch_data + 2 *
+ n_components *
+ dofs_per_face,
+ evaluate_values,
+ evaluate_gradients,
+ subface_index);
+ else
+ FEFaceEvaluationImpl<
+ false,
+ dim,
+ fe_degree,
+ n_q_points_1d_actual,
+ n_components,
+ VectorizedArrayType>::evaluate_in_face(data,
+ temp1,
+ values_quad,
+ gradients_quad,
+ scratch_data + 2 *
+ n_components *
+ dofs_per_face,
+ evaluate_values,
+ evaluate_gradients,
+ subface_index);
+
+ if (face_orientation)
+ adjust_for_face_orientation(face_orientation,
+ orientation_map,
+ false,
+ evaluate_values,
+ evaluate_gradients,
+ data.n_q_points_face,
+ scratch_data,
+ values_quad,
+ gradients_quad);
+ }
+
+ static void
+ integrate(const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
+ VectorizedArrayType * values_array,
+ VectorizedArrayType * values_quad,
+ VectorizedArrayType * gradients_quad,
+ VectorizedArrayType * scratch_data,
+ const bool integrate_values,
+ const bool integrate_gradients,
+ const unsigned int face_no,
+ const unsigned int subface_index,
+ const unsigned int face_orientation,
+ const Table<2, unsigned int> &orientation_map)
+ {
+ if (face_orientation)
+ adjust_for_face_orientation(face_orientation,
+ orientation_map,
+ true,
+ integrate_values,
+ integrate_gradients,
+ data.n_q_points_face,
+ scratch_data,
+ values_quad,
+ gradients_quad);
+
+ constexpr unsigned int static_dofs_per_face =
+ fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
+ numbers::invalid_unsigned_int;
+ const unsigned int dofs_per_face =
+ fe_degree > -1 ? static_dofs_per_face :
+ Utilities::pow(data.fe_degree + 1, dim - 1);
+
+ constexpr unsigned int stack_array_size_threshold = 100;
+
+ VectorizedArrayType
+ temp_data[static_dofs_per_face < stack_array_size_threshold ?
+ n_components * 2 * static_dofs_per_face :
+ 1];
+ VectorizedArrayType *temp1;
+ if (static_dofs_per_face < stack_array_size_threshold)
+ temp1 = &temp_data[0];
+ else
+ temp1 = scratch_data;
+
+ const unsigned int n_q_points_1d_actual =
+ fe_degree > -1 ? n_q_points_1d : 0;
+ if (fe_degree > -1 &&
+ subface_index >= GeometryInfo<dim - 1>::max_children_per_cell &&
+ data.element_type <= MatrixFreeFunctions::tensor_symmetric)
+ FEFaceEvaluationImpl<
+ true,
+ dim,
+ fe_degree,
+ n_q_points_1d_actual,
+ n_components,
+ VectorizedArrayType>::integrate_in_face(data,
+ temp1,
+ values_quad,
+ gradients_quad,
+ scratch_data +
+ 2 * n_components *
+ dofs_per_face,
+ integrate_values,
+ integrate_gradients,
+ subface_index);
+ else
+ FEFaceEvaluationImpl<
+ false,
+ dim,
+ fe_degree,
+ n_q_points_1d_actual,
+ n_components,
+ VectorizedArrayType>::integrate_in_face(data,
+ temp1,
+ values_quad,
+ gradients_quad,
+ scratch_data +
+ 2 * n_components *
+ dofs_per_face,
+ integrate_values,
+ integrate_gradients,
+ subface_index);
+
+ FEFaceNormalEvaluationImpl<dim,
+ fe_degree,
+ n_components,
+ VectorizedArrayType>::
+ template interpolate<false, false>(
+ data, temp1, values_array, integrate_gradients, face_no);
+ }
+
+ static bool
+ gather_evaluate(
+ const Number2 * src_ptr,
+ const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
+ const MatrixFreeFunctions::DoFInfo & dof_info,
+ VectorizedArrayType * values_quad,
+ VectorizedArrayType * gradients_quad,
+ VectorizedArrayType * scratch_data,
+ const bool evaluate_values,
+ const bool evaluate_gradients,
+ const unsigned int active_fe_index,
+ const unsigned int first_selected_component,
+ const unsigned int cell,
+ const unsigned int face_no,
+ const unsigned int subface_index,
+ const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index,
+ const unsigned int face_orientation,
+ const Table<2, unsigned int> & orientation_map)
+ {
+ const unsigned int side = face_no % 2;
+
+ constexpr unsigned int static_dofs_per_component =
+ fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim) :
+ numbers::invalid_unsigned_int;
+ constexpr unsigned int static_dofs_per_face =
+ fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
+ numbers::invalid_unsigned_int;
+ const unsigned int dofs_per_face =
+ fe_degree > -1 ? static_dofs_per_face :
+ Utilities::pow(data.fe_degree + 1, dim - 1);
+
+ constexpr unsigned int stack_array_size_threshold = 100;
+
+ VectorizedArrayType
+ temp_data[static_dofs_per_face < stack_array_size_threshold ?
+ n_components * 2 * dofs_per_face :
+ 1];
+ VectorizedArrayType *__restrict temp1;
+ if (static_dofs_per_face < stack_array_size_threshold)
+ temp1 = &temp_data[0];
+ else
+ temp1 = scratch_data;
+
+ // case 1: contiguous and interleaved indices
+ if (((evaluate_gradients == false &&
+ data.nodal_at_cell_boundaries == true) ||
+ (data.element_type ==
+ MatrixFreeFunctions::tensor_symmetric_hermite &&
+ fe_degree > 1)) &&
+ dof_info.index_storage_variants[dof_access_index][cell] ==
+ MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+ interleaved_contiguous)
+ {
+ AssertDimension(
+ dof_info.n_vectorization_lanes_filled[dof_access_index][cell],
+ VectorizedArrayType::n_array_elements);
+ const unsigned int dof_index =
+ dof_info.dof_indices_contiguous
+ [dof_access_index][cell * VectorizedArrayType::n_array_elements] +
+ dof_info.component_dof_indices_offset[active_fe_index]
+ [first_selected_component] *
+ VectorizedArrayType::n_array_elements;
+
+ if (fe_degree > 1 && evaluate_gradients == true)
+ {
+ // we know that the gradient weights for the Hermite case on the
+ // right (side==1) are the negative from the value at the left
+ // (side==0), so we only read out one of them.
+ const VectorizedArrayType grad_weight =
+ data.shape_data_on_face[0][fe_degree + 1 + side];
+ AssertDimension(data.face_to_cell_index_hermite.size(1),
+ 2 * dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_hermite(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 = index_array[2 * i];
+ const unsigned int ind2 = index_array[2 * i + 1];
+ AssertIndexRange(ind1, data.dofs_per_component_on_cell);
+ AssertIndexRange(ind2, data.dofs_per_component_on_cell);
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ {
+ do_vectorized_read(
+ src_ptr + dof_index +
+ (ind1 + comp * static_dofs_per_component) *
+ VectorizedArrayType::n_array_elements,
+ temp1[i + 2 * comp * dofs_per_face]);
+ do_vectorized_read(
+ src_ptr + dof_index +
+ (ind2 + comp * static_dofs_per_component) *
+ VectorizedArrayType::n_array_elements,
+ temp1[dofs_per_face + i + 2 * comp * dofs_per_face]);
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face] =
+ grad_weight *
+ (temp1[i + 2 * comp * dofs_per_face] -
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face]);
+ }
+ }
+ }
+ else
+ {
+ AssertDimension(data.face_to_cell_index_nodal.size(1),
+ dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_nodal(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind = index_array[i];
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ do_vectorized_read(
+ src_ptr + dof_index +
+ (ind + comp * static_dofs_per_component) *
+ VectorizedArrayType::n_array_elements,
+ temp1[i + 2 * comp * dofs_per_face]);
+ }
+ }
+ }
+
+ // case 2: contiguous and interleaved indices with fixed stride
+ else if (((evaluate_gradients == false &&
+ data.nodal_at_cell_boundaries == true) ||
+ (data.element_type ==
+ MatrixFreeFunctions::tensor_symmetric_hermite &&
+ fe_degree > 1)) &&
+ dof_info.index_storage_variants[dof_access_index][cell] ==
+ MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+ interleaved_contiguous_strided)
+ {
+ AssertDimension(
+ dof_info.n_vectorization_lanes_filled[dof_access_index][cell],
+ VectorizedArrayType::n_array_elements);
+ const unsigned int *indices =
+ &dof_info.dof_indices_contiguous
+ [dof_access_index][cell * VectorizedArrayType::n_array_elements];
+ if (fe_degree > 1 && evaluate_gradients == true)
+ {
+ // we know that the gradient weights for the Hermite case on the
+ // right (side==1) are the negative from the value at the left
+ // (side==0), so we only read out one of them.
+ const VectorizedArrayType grad_weight =
+ data.shape_data_on_face[0][fe_degree + 1 + side];
+ AssertDimension(data.face_to_cell_index_hermite.size(1),
+ 2 * dofs_per_face);
+
+ const unsigned int *index_array =
+ &data.face_to_cell_index_hermite(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 =
+ index_array[2 * i] * VectorizedArrayType::n_array_elements;
+ const unsigned int ind2 =
+ index_array[2 * i + 1] *
+ VectorizedArrayType::n_array_elements;
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ {
+ do_vectorized_gather(
+ src_ptr + ind1 +
+ comp * static_dofs_per_component *
+ VectorizedArrayType::n_array_elements +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component] *
+ VectorizedArrayType::n_array_elements,
+ indices,
+ temp1[i + 2 * comp * dofs_per_face]);
+ VectorizedArrayType grad;
+ do_vectorized_gather(
+ src_ptr + ind2 +
+ comp * static_dofs_per_component *
+ VectorizedArrayType::n_array_elements +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component] *
+ VectorizedArrayType::n_array_elements,
+ indices,
+ grad);
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face] =
+ grad_weight *
+ (temp1[i + 2 * comp * dofs_per_face] - grad);
+ }
+ }
+ }
+ else
+ {
+ AssertDimension(data.face_to_cell_index_nodal.size(1),
+ dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_nodal(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind =
+ index_array[i] * VectorizedArrayType::n_array_elements;
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ do_vectorized_gather(
+ src_ptr + ind +
+ comp * static_dofs_per_component *
+ VectorizedArrayType::n_array_elements +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component] *
+ VectorizedArrayType::n_array_elements,
+ indices,
+ temp1[i + 2 * comp * dofs_per_face]);
+ }
+ }
+ }
+
+ // case 3: contiguous and interleaved indices with mixed stride
+ else if (((evaluate_gradients == false &&
+ data.nodal_at_cell_boundaries == true) ||
+ (data.element_type ==
+ MatrixFreeFunctions::tensor_symmetric_hermite &&
+ fe_degree > 1)) &&
+ dof_info.index_storage_variants[dof_access_index][cell] ==
+ MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+ interleaved_contiguous_mixed_strides)
+ {
+ const unsigned int *strides =
+ &dof_info.dof_indices_interleave_strides
+ [dof_access_index][cell * VectorizedArrayType::n_array_elements];
+ unsigned int indices[VectorizedArrayType::n_array_elements];
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements;
+ ++v)
+ indices[v] =
+ dof_info.dof_indices_contiguous
+ [dof_access_index]
+ [cell * VectorizedArrayType::n_array_elements + v] +
+ dof_info.component_dof_indices_offset[active_fe_index]
+ [first_selected_component] *
+ strides[v];
+ const unsigned int nvec =
+ dof_info.n_vectorization_lanes_filled[dof_access_index][cell];
+
+ if (fe_degree > 1 && evaluate_gradients == true)
+ {
+ // we know that the gradient weights for the Hermite case on the
+ // right (side==1) are the negative from the value at the left
+ // (side==0), so we only read out one of them.
+ const VectorizedArrayType grad_weight =
+ data.shape_data_on_face[0][fe_degree + 1 + side];
+ AssertDimension(data.face_to_cell_index_hermite.size(1),
+ 2 * dofs_per_face);
+
+ const unsigned int *index_array =
+ &data.face_to_cell_index_hermite(face_no, 0);
+ if (nvec == VectorizedArrayType::n_array_elements)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ unsigned int ind1[VectorizedArrayType::n_array_elements];
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int v = 0;
+ v < VectorizedArrayType::n_array_elements;
+ ++v)
+ ind1[v] =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[2 * i]) *
+ strides[v];
+ unsigned int ind2[VectorizedArrayType::n_array_elements];
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int v = 0;
+ v < VectorizedArrayType::n_array_elements;
+ ++v)
+ ind2[v] =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[2 * i + 1]) *
+ strides[v];
+ do_vectorized_gather(src_ptr,
+ ind1,
+ temp1[i + 2 * comp * dofs_per_face]);
+ VectorizedArrayType grad;
+ do_vectorized_gather(src_ptr, ind2, grad);
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face] =
+ grad_weight *
+ (temp1[i + 2 * comp * dofs_per_face] - grad);
+ }
+ else
+ {
+ for (unsigned int i = 0; i < n_components * 2 * dofs_per_face;
+ ++i)
+ temp1[i] = VectorizedArrayType();
+ for (unsigned int v = 0; v < nvec; ++v)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[2 * i]) *
+ strides[v];
+ const unsigned int ind2 =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[2 * i + 1]) *
+ strides[v];
+ temp1[i + 2 * comp * dofs_per_face][v] =
+ src_ptr[ind1];
+ const Number grad = src_ptr[ind2];
+ temp1[i + dofs_per_face +
+ 2 * comp * dofs_per_face][v] =
+ grad_weight[0] *
+ (temp1[i + 2 * comp * dofs_per_face][v] - grad);
+ }
+ }
+ }
+ else
+ {
+ AssertDimension(data.face_to_cell_index_nodal.size(1),
+ dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_nodal(face_no, 0);
+ if (nvec == VectorizedArrayType::n_array_elements)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ unsigned int ind[VectorizedArrayType::n_array_elements];
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int v = 0;
+ v < VectorizedArrayType::n_array_elements;
+ ++v)
+ ind[v] =
+ indices[v] +
+ (comp * static_dofs_per_component + index_array[i]) *
+ strides[v];
+ do_vectorized_gather(src_ptr,
+ ind,
+ temp1[i + 2 * comp * dofs_per_face]);
+ }
+ else
+ {
+ for (unsigned int i = 0; i < n_components * dofs_per_face;
+ ++i)
+ temp1[i] = VectorizedArrayType();
+ for (unsigned int v = 0; v < nvec; ++v)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[i]) *
+ strides[v];
+ temp1[i + 2 * comp * dofs_per_face][v] =
+ src_ptr[ind1];
+ }
+ }
+ }
+ }
+
+ // case 4: contiguous indices without interleaving
+ else if (((evaluate_gradients == false &&
+ data.nodal_at_cell_boundaries == true) ||
+ (data.element_type ==
+ MatrixFreeFunctions::tensor_symmetric_hermite &&
+ fe_degree > 1)) &&
+ dof_info.index_storage_variants[dof_access_index][cell] ==
+ MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+ contiguous &&
+ dof_info.n_vectorization_lanes_filled[dof_access_index][cell] ==
+ VectorizedArrayType::n_array_elements)
+ {
+ const unsigned int *indices =
+ &dof_info.dof_indices_contiguous
+ [dof_access_index][cell * VectorizedArrayType::n_array_elements];
+ if (evaluate_gradients == true &&
+ data.element_type ==
+ MatrixFreeFunctions::tensor_symmetric_hermite)
+ {
+ // we know that the gradient weights for the Hermite case on the
+ // right (side==1) are the negative from the value at the left
+ // (side==0), so we only read out one of them.
+ const VectorizedArrayType grad_weight =
+ data.shape_data_on_face[0][fe_degree + 1 + side];
+ AssertDimension(data.face_to_cell_index_hermite.size(1),
+ 2 * dofs_per_face);
+
+ const unsigned int *index_array =
+ &data.face_to_cell_index_hermite(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 = index_array[2 * i];
+ const unsigned int ind2 = index_array[2 * i + 1];
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ {
+ do_vectorized_gather(
+ src_ptr + ind1 + comp * static_dofs_per_component +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component],
+ indices,
+ temp1[i + 2 * comp * dofs_per_face]);
+ VectorizedArrayType grad;
+ do_vectorized_gather(
+ src_ptr + ind2 + comp * static_dofs_per_component +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component],
+ indices,
+ grad);
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face] =
+ grad_weight *
+ (temp1[i + 2 * comp * dofs_per_face] - grad);
+ }
+ }
+ }
+ else
+ {
+ AssertDimension(data.face_to_cell_index_nodal.size(1),
+ dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_nodal(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ {
+ const unsigned int ind = index_array[i];
+ do_vectorized_gather(
+ src_ptr + ind + comp * static_dofs_per_component +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component],
+ indices,
+ temp1[i + comp * 2 * dofs_per_face]);
+ }
+ }
+ }
+
+ // case 5: default vector access
+ else
+ {
+ return false;
+ }
+
+ if (fe_degree > -1 &&
+ subface_index >= GeometryInfo<dim>::max_children_per_cell &&
+ data.element_type <= MatrixFreeFunctions::tensor_symmetric)
+ FEFaceEvaluationImpl<
+ true,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components,
+ VectorizedArrayType>::evaluate_in_face(data,
+ temp1,
+ values_quad,
+ gradients_quad,
+ scratch_data + 2 *
+ n_components *
+ dofs_per_face,
+ evaluate_values,
+ evaluate_gradients,
+ subface_index);
+ else
+ FEFaceEvaluationImpl<
+ false,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components,
+ VectorizedArrayType>::evaluate_in_face(data,
+ temp1,
+ values_quad,
+ gradients_quad,
+ scratch_data + 2 *
+ n_components *
+ dofs_per_face,
+ evaluate_values,
+ evaluate_gradients,
+ subface_index);
+
+ if (face_orientation)
+ adjust_for_face_orientation(face_orientation,
+ orientation_map,
+ false,
+ evaluate_values,
+ evaluate_gradients,
+ data.n_q_points_face,
+ scratch_data,
+ values_quad,
+ gradients_quad);
+
+ return true;
+ }
+
+ static bool
+ integrate_scatter(
+ Number2 * dst_ptr,
+ const MatrixFreeFunctions::ShapeInfo<VectorizedArrayType> &data,
+ const MatrixFreeFunctions::DoFInfo & dof_info,
+ VectorizedArrayType * values_array,
+ VectorizedArrayType * values_quad,
+ VectorizedArrayType * gradients_quad,
+ VectorizedArrayType * scratch_data,
+ const bool integrate_values,
+ const bool integrate_gradients,
+ const unsigned int active_fe_index,
+ const unsigned int first_selected_component,
+ const unsigned int cell,
+ const unsigned int face_no,
+ const unsigned int subface_index,
+ const MatrixFreeFunctions::DoFInfo::DoFAccessIndex dof_access_index,
+ const unsigned int face_orientation,
+ const Table<2, unsigned int> & orientation_map)
+ {
+ if (face_orientation)
+ adjust_for_face_orientation(face_orientation,
+ orientation_map,
+ true,
+ integrate_values,
+ integrate_gradients,
+ data.n_q_points_face,
+ scratch_data,
+ values_quad,
+ gradients_quad);
+
+ const unsigned int side = face_no % 2;
+ constexpr unsigned int static_dofs_per_component =
+ fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim) :
+ numbers::invalid_unsigned_int;
+ const unsigned int dofs_per_face =
+ fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
+ Utilities::pow(data.fe_degree + 1, dim - 1);
+
+ constexpr unsigned int stack_array_size_threshold = 100;
+
+ VectorizedArrayType temp_data[dofs_per_face < stack_array_size_threshold ?
+ n_components * 2 * dofs_per_face :
+ 1];
+ VectorizedArrayType *__restrict temp1;
+ if (dofs_per_face < stack_array_size_threshold)
+ temp1 = &temp_data[0];
+ else
+ temp1 = scratch_data;
+
+ if (fe_degree > -1 &&
+ subface_index >= GeometryInfo<dim>::max_children_per_cell &&
+ data.element_type <= internal::MatrixFreeFunctions::tensor_symmetric)
+ internal::FEFaceEvaluationImpl<
+ true,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components,
+ VectorizedArrayType>::integrate_in_face(data,
+ temp1,
+ values_quad,
+ gradients_quad,
+ scratch_data +
+ 2 * n_components *
+ dofs_per_face,
+ integrate_values,
+ integrate_gradients,
+ subface_index);
+ else
+ internal::FEFaceEvaluationImpl<
+ false,
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components,
+ VectorizedArrayType>::integrate_in_face(data,
+ temp1,
+ values_quad,
+ gradients_quad,
+ scratch_data +
+ 2 * n_components *
+ dofs_per_face,
+ integrate_values,
+ integrate_gradients,
+ subface_index);
+
+ // case 1: contiguous and interleaved indices
+ if (((integrate_gradients == false &&
+ data.nodal_at_cell_boundaries == true) ||
+ (data.element_type ==
+ internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
+ fe_degree > 1)) &&
+ dof_info.index_storage_variants[dof_access_index][cell] ==
+ internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+ interleaved_contiguous)
+ {
+ AssertDimension(
+ dof_info.n_vectorization_lanes_filled[dof_access_index][cell],
+ VectorizedArrayType::n_array_elements);
+ const unsigned int dof_index =
+ dof_info.dof_indices_contiguous
+ [dof_access_index][cell * VectorizedArrayType::n_array_elements] +
+ dof_info.component_dof_indices_offset[active_fe_index]
+ [first_selected_component] *
+ VectorizedArrayType::n_array_elements;
+
+ if (fe_degree > 1 && integrate_gradients == true)
+ {
+ // we know that the gradient weights for the Hermite case on the
+ // right (side==1) are the negative from the value at the left
+ // (side==0), so we only read out one of them.
+ const VectorizedArrayType grad_weight =
+ data.shape_data_on_face[0][fe_degree + 2 - side];
+ AssertDimension(data.face_to_cell_index_hermite.size(1),
+ 2 * dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_hermite(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 = index_array[2 * i];
+ const unsigned int ind2 = index_array[2 * i + 1];
+ AssertIndexRange(ind1, data.dofs_per_component_on_cell);
+ AssertIndexRange(ind2, data.dofs_per_component_on_cell);
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ {
+ VectorizedArrayType val =
+ temp1[i + 2 * comp * dofs_per_face] -
+ grad_weight *
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
+ VectorizedArrayType grad =
+ grad_weight *
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
+ do_vectorized_add(
+ val,
+ dst_ptr + dof_index +
+ (ind1 + comp * static_dofs_per_component) *
+ VectorizedArrayType::n_array_elements);
+ do_vectorized_add(
+ grad,
+ dst_ptr + dof_index +
+ (ind2 + comp * static_dofs_per_component) *
+ VectorizedArrayType::n_array_elements);
+ }
+ }
+ }
+ else
+ {
+ AssertDimension(data.face_to_cell_index_nodal.size(1),
+ dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_nodal(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind = index_array[i];
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ do_vectorized_add(
+ temp1[i + 2 * comp * dofs_per_face],
+ dst_ptr + dof_index +
+ (ind + comp * static_dofs_per_component) *
+ VectorizedArrayType::n_array_elements);
+ }
+ }
+ }
+
+ // case 2: contiguous and interleaved indices with fixed stride
+ else if (((integrate_gradients == false &&
+ data.nodal_at_cell_boundaries == true) ||
+ (data.element_type ==
+ internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
+ fe_degree > 1)) &&
+ dof_info.index_storage_variants[dof_access_index][cell] ==
+ internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+ interleaved_contiguous_strided)
+ {
+ AssertDimension(
+ dof_info.n_vectorization_lanes_filled[dof_access_index][cell],
+ VectorizedArrayType::n_array_elements);
+ const unsigned int *indices =
+ &dof_info.dof_indices_contiguous
+ [dof_access_index][cell * VectorizedArrayType::n_array_elements];
+ if (fe_degree > 1 && integrate_gradients == true)
+ {
+ // we know that the gradient weights for the Hermite case on the
+ // right (side==1) are the negative from the value at the left
+ // (side==0), so we only read out one of them.
+ const VectorizedArrayType grad_weight =
+ data.shape_data_on_face[0][fe_degree + 2 - side];
+ AssertDimension(data.face_to_cell_index_hermite.size(1),
+ 2 * dofs_per_face);
+
+ const unsigned int *index_array =
+ &data.face_to_cell_index_hermite(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 =
+ index_array[2 * i] * VectorizedArrayType::n_array_elements;
+ const unsigned int ind2 =
+ index_array[2 * i + 1] *
+ VectorizedArrayType::n_array_elements;
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ {
+ VectorizedArrayType val =
+ temp1[i + 2 * comp * dofs_per_face] -
+ grad_weight *
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
+ VectorizedArrayType grad =
+ grad_weight *
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
+ do_vectorized_scatter_add(
+ val,
+ indices,
+ dst_ptr + ind1 +
+ comp * static_dofs_per_component *
+ VectorizedArrayType::n_array_elements +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component] *
+ VectorizedArrayType::n_array_elements);
+ do_vectorized_scatter_add(
+ grad,
+ indices,
+ dst_ptr + ind2 +
+ comp * static_dofs_per_component *
+ VectorizedArrayType::n_array_elements +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component] *
+ VectorizedArrayType::n_array_elements);
+ }
+ }
+ }
+ else
+ {
+ AssertDimension(data.face_to_cell_index_nodal.size(1),
+ dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_nodal(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind =
+ index_array[i] * VectorizedArrayType::n_array_elements;
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ do_vectorized_scatter_add(
+ temp1[i + 2 * comp * dofs_per_face],
+ indices,
+ dst_ptr + ind +
+ comp * static_dofs_per_component *
+ VectorizedArrayType::n_array_elements +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component] *
+ VectorizedArrayType::n_array_elements);
+ }
+ }
+ }
+
+ // case 3: contiguous and interleaved indices with mixed stride
+ else if (((integrate_gradients == false &&
+ data.nodal_at_cell_boundaries == true) ||
+ (data.element_type ==
+ internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
+ fe_degree > 1)) &&
+ dof_info.index_storage_variants[dof_access_index][cell] ==
+ internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+ interleaved_contiguous_mixed_strides)
+ {
+ const unsigned int *strides =
+ &dof_info.dof_indices_interleave_strides
+ [dof_access_index][cell * VectorizedArrayType::n_array_elements];
+ unsigned int indices[VectorizedArrayType::n_array_elements];
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements;
+ ++v)
+ indices[v] =
+ dof_info.dof_indices_contiguous
+ [dof_access_index]
+ [cell * VectorizedArrayType::n_array_elements + v] +
+ dof_info.component_dof_indices_offset[active_fe_index]
+ [first_selected_component] *
+ strides[v];
+ const unsigned int nvec =
+ dof_info.n_vectorization_lanes_filled[dof_access_index][cell];
+
+ if (fe_degree > 1 && integrate_gradients == true)
+ {
+ // we know that the gradient weights for the Hermite case on the
+ // right (side==1) are the negative from the value at the left
+ // (side==0), so we only read out one of them.
+ const VectorizedArrayType grad_weight =
+ data.shape_data_on_face[0][fe_degree + 2 - side];
+ AssertDimension(data.face_to_cell_index_hermite.size(1),
+ 2 * dofs_per_face);
+
+ const unsigned int *index_array =
+ &data.face_to_cell_index_hermite(face_no, 0);
+ if (nvec == VectorizedArrayType::n_array_elements)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ unsigned int ind1[VectorizedArrayType::n_array_elements];
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int v = 0;
+ v < VectorizedArrayType::n_array_elements;
+ ++v)
+ ind1[v] =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[2 * i]) *
+ strides[v];
+ unsigned int ind2[VectorizedArrayType::n_array_elements];
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int v = 0;
+ v < VectorizedArrayType::n_array_elements;
+ ++v)
+ ind2[v] =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[2 * i + 1]) *
+ strides[v];
+ VectorizedArrayType val =
+ temp1[i + 2 * comp * dofs_per_face] -
+ grad_weight *
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
+ VectorizedArrayType grad =
+ grad_weight *
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
+ do_vectorized_scatter_add(val, ind1, dst_ptr);
+ do_vectorized_scatter_add(grad, ind2, dst_ptr);
+ }
+ else
+ {
+ for (unsigned int v = 0; v < nvec; ++v)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[2 * i]) *
+ strides[v];
+ const unsigned int ind2 =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[2 * i + 1]) *
+ strides[v];
+ Number val =
+ temp1[i + 2 * comp * dofs_per_face][v] -
+ grad_weight[0] * temp1[i + dofs_per_face +
+ 2 * comp * dofs_per_face][v];
+ Number grad =
+ grad_weight[0] * temp1[i + dofs_per_face +
+ 2 * comp * dofs_per_face][v];
+ dst_ptr[ind1] += val;
+ dst_ptr[ind2] += grad;
+ }
+ }
+ }
+ else
+ {
+ AssertDimension(data.face_to_cell_index_nodal.size(1),
+ dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_nodal(face_no, 0);
+ if (nvec == VectorizedArrayType::n_array_elements)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ unsigned int ind[VectorizedArrayType::n_array_elements];
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int v = 0;
+ v < VectorizedArrayType::n_array_elements;
+ ++v)
+ ind[v] =
+ indices[v] +
+ (comp * static_dofs_per_component + index_array[i]) *
+ strides[v];
+ do_vectorized_scatter_add(
+ temp1[i + 2 * comp * dofs_per_face], ind, dst_ptr);
+ }
+ else
+ {
+ for (unsigned int v = 0; v < nvec; ++v)
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 =
+ indices[v] + (comp * static_dofs_per_component +
+ index_array[i]) *
+ strides[v];
+ dst_ptr[ind1] +=
+ temp1[i + 2 * comp * dofs_per_face][v];
+ }
+ }
+ }
+ }
+
+ // case 4: contiguous indices without interleaving
+ else if (((integrate_gradients == false &&
+ data.nodal_at_cell_boundaries == true) ||
+ (data.element_type ==
+ internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
+ fe_degree > 1)) &&
+ dof_info.index_storage_variants[dof_access_index][cell] ==
+ internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
+ contiguous &&
+ dof_info.n_vectorization_lanes_filled[dof_access_index][cell] ==
+ VectorizedArrayType::n_array_elements)
+ {
+ const unsigned int *indices =
+ &dof_info.dof_indices_contiguous
+ [dof_access_index][cell * VectorizedArrayType::n_array_elements];
+
+ if (integrate_gradients == true &&
+ data.element_type ==
+ internal::MatrixFreeFunctions::tensor_symmetric_hermite)
+ {
+ // we know that the gradient weights for the Hermite case on the
+ // right (side==1) are the negative from the value at the left
+ // (side==0), so we only read out one of them.
+ const VectorizedArrayType grad_weight =
+ data.shape_data_on_face[0][fe_degree + 2 - side];
+ AssertDimension(data.face_to_cell_index_hermite.size(1),
+ 2 * dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_hermite(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind1 = index_array[2 * i];
+ const unsigned int ind2 = index_array[2 * i + 1];
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ {
+ VectorizedArrayType val =
+ temp1[i + 2 * comp * dofs_per_face] -
+ grad_weight *
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
+ VectorizedArrayType grad =
+ grad_weight *
+ temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
+ do_vectorized_scatter_add(
+ val,
+ indices,
+ dst_ptr + comp * static_dofs_per_component + ind1 +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component]);
+ do_vectorized_scatter_add(
+ grad,
+ indices,
+ dst_ptr + comp * static_dofs_per_component + ind2 +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component]);
+ }
+ }
+ }
+ else
+ {
+ AssertDimension(data.face_to_cell_index_nodal.size(1),
+ dofs_per_face);
+ const unsigned int *index_array =
+ &data.face_to_cell_index_nodal(face_no, 0);
+ for (unsigned int i = 0; i < dofs_per_face; ++i)
+ {
+ const unsigned int ind = index_array[i];
+ for (unsigned int comp = 0; comp < n_components; ++comp)
+ do_vectorized_scatter_add(
+ temp1[i + 2 * comp * dofs_per_face],
+ indices,
+ dst_ptr + comp * static_dofs_per_component + ind +
+ dof_info.component_dof_indices_offset
+ [active_fe_index][first_selected_component]);
+ }
+ }
+ }
+
+ // case 5: default vector access, must be handled separately, just do
+ // the face-normal interpolation
+ else
+ {
+ FEFaceNormalEvaluationImpl<dim,
+ fe_degree,
+ n_components,
+ VectorizedArrayType>::
+ template interpolate<false, false>(
+ data, temp1, values_array, integrate_gradients, face_no);
+ return false;
+ }
+
+ return true;
+ }
+
+ static void
+ adjust_for_face_orientation(const unsigned int face_orientation,
+ const Table<2, unsigned int> &orientation_map,
+ const bool integrate,
+ const bool values,
+ const bool gradients,
+ const unsigned int n_q_points,
+ VectorizedArrayType * tmp_values,
+ VectorizedArrayType * values_quad,
+ VectorizedArrayType * gradients_quad)
+ {
+ Assert(face_orientation, ExcInternalError());
+ const unsigned int *orientation = &orientation_map[face_orientation][0];
+ for (unsigned int c = 0; c < n_components; ++c)
+ {
+ if (values == true)
+ {
+ if (integrate)
+ for (unsigned int q = 0; q < n_q_points; ++q)
+ tmp_values[q] = values_quad[c * n_q_points + orientation[q]];
+ else
+ for (unsigned int q = 0; q < n_q_points; ++q)
+ tmp_values[orientation[q]] = values_quad[c * n_q_points + q];
+ for (unsigned int q = 0; q < n_q_points; ++q)
+ values_quad[c * n_q_points + q] = tmp_values[q];
+ }
+ if (gradients == true)
+ for (unsigned int d = 0; d < dim; ++d)
+ {
+ if (integrate)
+ for (unsigned int q = 0; q < n_q_points; ++q)
+ tmp_values[q] = gradients_quad[(c * dim + d) * n_q_points +
+ orientation[q]];
+ else
+ for (unsigned int q = 0; q < n_q_points; ++q)
+ tmp_values[orientation[q]] =
+ gradients_quad[(c * dim + d) * n_q_points + q];
+ for (unsigned int q = 0; q < n_q_points; ++q)
+ gradients_quad[(c * dim + d) * n_q_points + q] =
+ tmp_values[q];
+ }
+ }
+ }
+ };
+
+
+
+ template <int dim, int fe_degree, int n_components, typename Number>
+ struct CellwiseInverseMassMatrixImpl
+ {
+ static void
+ apply(const AlignedVector<Number> &inverse_shape,
+ const AlignedVector<Number> &inverse_coefficients,
+ const unsigned int n_desired_components,
+ const Number * in_array,
+ Number * out_array)
+ {
+ constexpr unsigned int dofs_per_component =
+ Utilities::pow(fe_degree + 1, dim);
+ Assert(inverse_coefficients.size() > 0 &&
+ inverse_coefficients.size() % dofs_per_component == 0,
+ ExcMessage(
+ "Expected diagonal to be a multiple of scalar dof per cells"));
+ if (inverse_coefficients.size() != dofs_per_component)
+ AssertDimension(n_desired_components * dofs_per_component,
+ inverse_coefficients.size());
+
+ Assert(dim >= 1 || dim <= 3, ExcNotImplemented());
+
+ internal::EvaluatorTensorProduct<internal::evaluate_evenodd,
+ dim,
+ fe_degree + 1,
+ fe_degree + 1,
+ Number>
+ evaluator(AlignedVector<Number>(),
+ AlignedVector<Number>(),
+ inverse_shape);
+
+ const unsigned int shift_coefficient =
+ inverse_coefficients.size() > dofs_per_component ? dofs_per_component :
+ 0;
+ const Number *inv_coefficient = inverse_coefficients.data();
+ for (unsigned int d = 0; d < n_desired_components; ++d)
+ {
+ const Number *in = in_array + d * dofs_per_component;
+ Number * out = out_array + d * dofs_per_component;
+ // Need to select 'apply' method with hessian slot because values
+ // assume symmetries that do not exist in the inverse shapes
+ evaluator.template hessians<0, false, false>(in, out);
+ if (dim > 1)
+ {
+ evaluator.template hessians<1, false, false>(out, out);
+
+ if (dim == 3)
+ {
+ evaluator.template hessians<2, false, false>(out, out);
+ for (unsigned int q = 0; q < dofs_per_component; ++q)
+ out[q] *= inv_coefficient[q];
+ evaluator.template hessians<2, true, false>(out, out);
+ }
+ else if (dim == 2)
+ for (unsigned int q = 0; q < dofs_per_component; ++q)
+ out[q] *= inv_coefficient[q];
+
+ evaluator.template hessians<1, true, false>(out, out);
+ }
+ else
+ {
+ for (unsigned int q = 0; q < dofs_per_component; ++q)
+ out[q] *= inv_coefficient[q];
+ }
+ evaluator.template hessians<0, true, false>(out, out);
+
+ inv_coefficient += shift_coefficient;
+ }
+ }
+
+ static void
+ transform_from_q_points_to_basis(const AlignedVector<Number> &inverse_shape,
+ const unsigned int n_desired_components,
+ const Number * in_array,
+ Number * out_array)
+ {
+ constexpr unsigned int dofs_per_cell = Utilities::pow(fe_degree + 1, dim);
+ internal::EvaluatorTensorProduct<internal::evaluate_evenodd,
+ dim,
+ fe_degree + 1,
+ fe_degree + 1,
+ Number>
+ evaluator(AlignedVector<Number>(),
+ AlignedVector<Number>(),
+ inverse_shape);
+
+ for (unsigned int d = 0; d < n_desired_components; ++d)
+ {
+ const Number *in = in_array + d * dofs_per_cell;
+ Number * out = out_array + d * dofs_per_cell;
+
+ if (dim == 3)
+ {
+ evaluator.template hessians<2, true, false>(in, out);
+ evaluator.template hessians<1, true, false>(out, out);
+ evaluator.template hessians<0, true, false>(out, out);
+ }
+ if (dim == 2)
+ {
+ evaluator.template hessians<1, true, false>(in, out);
+ evaluator.template hessians<0, true, false>(out, out);
+ }
+ if (dim == 1)
+ evaluator.template hessians<0, true, false>(in, out);
+ }
+ }
+ };
+
} // end of namespace internal
#include <deal.II/matrix_free/shape_info.h>
#include <deal.II/matrix_free/tensor_product_kernels.h>
#include <deal.II/matrix_free/type_traits.h>
+#include <deal.II/matrix_free/vector_access_internal.h>
DEAL_II_NAMESPACE_OPEN
* points is inaccurate and this value must be used instead.
*/
const unsigned int n_q_points;
-
-protected:
- /**
- * For faces not oriented in the standard way, this method applies
- * re-indexing on quadrature points. Called at the end of evaluate() and at
- * the beginning of integrate().
- */
- void
- adjust_for_face_orientation(const bool integrate,
- const bool values,
- const bool gradients);
};
namespace internal
{
- // below we use type-traits from matrix-free/type_traits.h
-
- // access to generic const vectors that have operator ().
- // FIXME: this is wrong for Trilinos/Petsc MPI vectors
- // where we should first do Partitioner::local_to_global()
- template <typename VectorType,
- typename std::enable_if<!has_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline typename VectorType::value_type
- vector_access(const VectorType &vec, const unsigned int entry)
- {
- return vec(entry);
- }
-
-
-
- // access to generic non-const vectors that have operator ().
- // FIXME: this is wrong for Trilinos/Petsc MPI vectors
- // where we should first do Partitioner::local_to_global()
- template <typename VectorType,
- typename std::enable_if<!has_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline typename VectorType::value_type &
- vector_access(VectorType &vec, const unsigned int entry)
- {
- return vec(entry);
- }
-
-
-
- // access to distributed MPI vectors that have a local_element(uint)
- // method to access data in local index space, which is what we use in
- // DoFInfo and hence in read_dof_values etc.
- template <typename VectorType,
- typename std::enable_if<has_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline typename VectorType::value_type &
- vector_access(VectorType &vec, const unsigned int entry)
- {
- return vec.local_element(entry);
- }
-
-
-
- // same for const access
- template <typename VectorType,
- typename std::enable_if<has_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline typename VectorType::value_type
- vector_access(const VectorType &vec, const unsigned int entry)
- {
- return vec.local_element(entry);
- }
-
-
-
- template <typename VectorType,
- typename std::enable_if<has_add_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline void
- vector_access_add(VectorType & vec,
- const unsigned int entry,
- const typename VectorType::value_type &val)
- {
- vec.add_local_element(entry, val);
- }
-
-
-
- template <typename VectorType,
- typename std::enable_if<!has_add_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline void
- vector_access_add(VectorType & vec,
- const unsigned int entry,
- const typename VectorType::value_type &val)
- {
- vector_access(vec, entry) += val;
- }
-
-
-
- template <typename VectorType,
- typename std::enable_if<has_add_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline void
- vector_access_add_global(VectorType & vec,
- const types::global_dof_index entry,
- const typename VectorType::value_type &val)
- {
- vec.add(entry, val);
- }
-
-
-
- template <typename VectorType,
- typename std::enable_if<!has_add_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline void
- vector_access_add_global(VectorType & vec,
- const types::global_dof_index entry,
- const typename VectorType::value_type &val)
- {
- vec(entry) += val;
- }
-
-
-
- template <typename VectorType,
- typename std::enable_if<has_set_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline void
- vector_access_set(VectorType & vec,
- const unsigned int entry,
- const typename VectorType::value_type &val)
- {
- vec.set_local_element(entry, val);
- }
-
-
-
- template <typename VectorType,
- typename std::enable_if<!has_set_local_element<VectorType>::value,
- VectorType>::type * = nullptr>
- inline void
- vector_access_set(VectorType & vec,
- const unsigned int entry,
- const typename VectorType::value_type &val)
- {
- vector_access(vec, entry) = val;
- }
-
-
-
- // this is to make sure that the parallel partitioning in VectorType
- // is really the same as stored in MatrixFree.
- // version below is when has_partitioners_are_compatible == false
- // FIXME: this is incorrect for PETSc/Trilinos MPI vectors
- template <
- typename VectorType,
- typename std::enable_if<!has_partitioners_are_compatible<VectorType>::value,
- VectorType>::type * = nullptr>
- inline void
- check_vector_compatibility(
- const VectorType & vec,
- const internal::MatrixFreeFunctions::DoFInfo &dof_info)
- {
- (void)vec;
- (void)dof_info;
-
- AssertDimension(vec.size(), dof_info.vector_partitioner->size());
- }
-
-
-
- // same as above for has_partitioners_are_compatible == true
- template <
- typename VectorType,
- typename std::enable_if<has_partitioners_are_compatible<VectorType>::value,
- VectorType>::type * = nullptr>
- inline void
- check_vector_compatibility(
- const VectorType & vec,
- const internal::MatrixFreeFunctions::DoFInfo &dof_info)
- {
- (void)vec;
- (void)dof_info;
- Assert(vec.partitioners_are_compatible(*dof_info.vector_partitioner),
- ExcMessage(
- "The parallel layout of the given vector is not "
- "compatible with the parallel partitioning in MatrixFree. "
- "Use MatrixFree::initialize_dof_vector to get a "
- "compatible vector."));
- }
-
-
-
- // Below, three classes (VectorReader, VectorSetter,
- // VectorDistributorLocalToGlobal) implement the same interface and can be
- // used to to read from vector, set elements of a vector and add to elements
- // of the vector.
-
- // 1. A class to read data from vector
- template <typename Number, typename VectorizedArrayType>
- struct VectorReader
- {
- template <typename VectorType>
- void
- process_dof(const unsigned int index,
- const VectorType & vec,
- Number & res) const
- {
- res = vector_access(vec, index);
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized(const unsigned int dofs_per_cell,
- const unsigned int dof_index,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, true>) const
- {
- const Number *vec_ptr = vec.begin() + dof_index;
- for (unsigned int i = 0; i < dofs_per_cell;
- ++i, vec_ptr += VectorizedArrayType::n_array_elements)
- dof_values[i].load(vec_ptr);
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized(const unsigned int dofs_per_cell,
- const unsigned int dof_index,
- const VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int i = 0; i < dofs_per_cell; ++i)
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- dof_values[i][v] = vector_access(
- vec, dof_index + v + i * VectorizedArrayType::n_array_elements);
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
- const unsigned int * dof_indices,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, true>) const
- {
- dealii::vectorized_load_and_transpose(dofs_per_cell,
- vec.begin(),
- dof_indices,
- dof_values);
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
- const unsigned int * dof_indices,
- const VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int d = 0; d < dofs_per_cell; ++d)
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- dof_values[d][v] = vector_access(vec, dof_indices[v] + d);
- }
-
-
-
- // variant where VectorType::value_type is the same as Number -> can call
- // gather
- template <typename VectorType>
- void
- process_dof_gather(const unsigned int * indices,
- VectorType & vec,
- const unsigned int constant_offset,
- VectorizedArrayType &res,
- std::integral_constant<bool, true>) const
- {
- res.gather(vec.begin() + constant_offset, indices);
- }
-
-
-
- // variant where VectorType::value_type is not the same as Number -> must
- // manually load the data
- template <typename VectorType>
- void
- process_dof_gather(const unsigned int * indices,
- const VectorType & vec,
- const unsigned int constant_offset,
- VectorizedArrayType &res,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- res[v] = vector_access(vec, indices[v] + constant_offset);
- }
-
-
-
- template <typename VectorType>
- void
- process_dof_global(const types::global_dof_index index,
- const VectorType & vec,
- Number & res) const
- {
- res = vec(index);
- }
-
-
-
- void
- pre_constraints(const Number &, Number &res) const
- {
- res = Number();
- }
-
-
-
- template <typename VectorType>
- void
- process_constraint(const unsigned int index,
- const Number weight,
- const VectorType & vec,
- Number & res) const
- {
- res += weight * vector_access(vec, index);
- }
-
-
-
- void
- post_constraints(const Number &sum, Number &write_pos) const
- {
- write_pos = sum;
- }
-
-
-
- void
- process_empty(VectorizedArrayType &res) const
- {
- res = VectorizedArrayType();
- }
- };
-
-
-
- // 2. A class to add values to the vector during
- // FEEvaluation::distribute_local_to_global() call
- template <typename Number, typename VectorizedArrayType>
- struct VectorDistributorLocalToGlobal
- {
- template <typename VectorType>
- void
- process_dof(const unsigned int index, VectorType &vec, Number &res) const
- {
- vector_access_add(vec, index, res);
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized(const unsigned int dofs_per_cell,
- const unsigned int dof_index,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, true>) const
- {
- Number *vec_ptr = vec.begin() + dof_index;
- for (unsigned int i = 0; i < dofs_per_cell;
- ++i, vec_ptr += VectorizedArrayType::n_array_elements)
- {
- VectorizedArrayType tmp;
- tmp.load(vec_ptr);
- tmp += dof_values[i];
- tmp.store(vec_ptr);
- }
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized(const unsigned int dofs_per_cell,
- const unsigned int dof_index,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int i = 0; i < dofs_per_cell; ++i)
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- vector_access_add(vec,
- dof_index + v +
- i * VectorizedArrayType::n_array_elements,
- dof_values[i][v]);
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
- const unsigned int * dof_indices,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, true>) const
- {
- vectorized_transpose_and_store(
- true, dofs_per_cell, dof_values, dof_indices, vec.begin());
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
- const unsigned int * dof_indices,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int d = 0; d < dofs_per_cell; ++d)
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- vector_access_add(vec, dof_indices[v] + d, dof_values[d][v]);
- }
-
-
-
- // variant where VectorType::value_type is the same as Number -> can call
- // scatter
- template <typename VectorType>
- void
- process_dof_gather(const unsigned int * indices,
- VectorType & vec,
- const unsigned int constant_offset,
- VectorizedArrayType &res,
- std::integral_constant<bool, true>) const
- {
-# if DEAL_II_COMPILER_VECTORIZATION_LEVEL < 3
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- vector_access(vec, indices[v] + constant_offset) += res[v];
-# else
- // only use gather in case there is also scatter.
- VectorizedArrayType tmp;
- tmp.gather(vec.begin() + constant_offset, indices);
- tmp += res;
- tmp.scatter(indices, vec.begin() + constant_offset);
-# endif
- }
-
-
-
- // variant where VectorType::value_type is not the same as Number -> must
- // manually append all data
- template <typename VectorType>
- void
- process_dof_gather(const unsigned int * indices,
- VectorType & vec,
- const unsigned int constant_offset,
- VectorizedArrayType &res,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- vector_access_add(vec, indices[v] + constant_offset, res[v]);
- }
-
-
-
- template <typename VectorType>
- void
- process_dof_global(const types::global_dof_index index,
- VectorType & vec,
- Number & res) const
- {
- vector_access_add_global(vec, index, res);
- }
-
-
-
- void
- pre_constraints(const Number &input, Number &res) const
- {
- res = input;
- }
-
-
-
- template <typename VectorType>
- void
- process_constraint(const unsigned int index,
- const Number weight,
- VectorType & vec,
- Number & res) const
- {
- vector_access_add(vec, index, weight * res);
- }
-
-
-
- void
- post_constraints(const Number &, Number &) const
- {}
-
-
-
- void
- process_empty(VectorizedArrayType &) const
- {}
- };
-
-
-
- // 3. A class to set elements of the vector
- template <typename Number, typename VectorizedArrayType>
- struct VectorSetter
- {
- template <typename VectorType>
- void
- process_dof(const unsigned int index, VectorType &vec, Number &res) const
- {
- vector_access(vec, index) = res;
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized(const unsigned int dofs_per_cell,
- const unsigned int dof_index,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, true>) const
- {
- Number *vec_ptr = vec.begin() + dof_index;
- for (unsigned int i = 0; i < dofs_per_cell;
- ++i, vec_ptr += VectorizedArrayType::n_array_elements)
- dof_values[i].store(vec_ptr);
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized(const unsigned int dofs_per_cell,
- const unsigned int dof_index,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int i = 0; i < dofs_per_cell; ++i)
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- vector_access(vec,
- dof_index + v +
- i * VectorizedArrayType::n_array_elements) =
- dof_values[i][v];
- }
-
-
-
- template <typename VectorType>
- void
- process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
- const unsigned int * dof_indices,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, true>) const
- {
- vectorized_transpose_and_store(
- false, dofs_per_cell, dof_values, dof_indices, vec.begin());
- }
-
-
-
- template <typename VectorType, bool booltype>
- void
- process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
- const unsigned int * dof_indices,
- VectorType & vec,
- VectorizedArrayType *dof_values,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int i = 0; i < dofs_per_cell; ++i)
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- vector_access(vec, dof_indices[v] + i) = dof_values[i][v];
- }
-
-
-
- template <typename VectorType>
- void
- process_dof_gather(const unsigned int * indices,
- VectorType & vec,
- const unsigned int constant_offset,
- VectorizedArrayType &res,
- std::integral_constant<bool, true>) const
- {
- res.scatter(indices, vec.begin() + constant_offset);
- }
-
-
-
- template <typename VectorType>
- void
- process_dof_gather(const unsigned int * indices,
- VectorType & vec,
- const unsigned int constant_offset,
- VectorizedArrayType &res,
- std::integral_constant<bool, false>) const
- {
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- vector_access(vec, indices[v] + constant_offset) = res[v];
- }
-
-
-
- template <typename VectorType>
- void
- process_dof_global(const types::global_dof_index index,
- VectorType & vec,
- Number & res) const
- {
- vec(index) = res;
- }
-
-
-
- void
- pre_constraints(const Number &, Number &) const
- {}
-
-
-
- template <typename VectorType>
- void
- process_constraint(const unsigned int,
- const Number,
- VectorType &,
- Number &) const
- {}
-
-
-
- void
- post_constraints(const Number &, Number &) const
- {}
-
-
-
- void
- process_empty(VectorizedArrayType &) const
- {}
- };
-
-
-
// allows to select between block vectors and non-block vectors, which
// allows to use a unified interface for extracting blocks on block vectors
// and doing nothing on usual vectors
if (!(evaluate_values + evaluate_gradients))
return;
- constexpr unsigned int static_dofs_per_face =
- fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
- numbers::invalid_unsigned_int;
- const unsigned int dofs_per_face =
- fe_degree > -1 ? static_dofs_per_face :
- Utilities::pow(this->data->fe_degree + 1, dim - 1);
-
- // we allocate small amounts of data on the stack to signal the compiler
- // that this temporary data is only needed for the calculations but the
- // final results can be discarded and need not be written back to
- // memory. For large sizes or when the dofs per face is not a compile-time
- // constant, however, we want to go to the heap in the `scratch_data`
- // variable to not risk a stack overflow.
- constexpr unsigned int stack_array_size_threshold = 100;
-
- VectorizedArrayType
- temp_data[static_dofs_per_face < stack_array_size_threshold ?
- n_components * 2 * static_dofs_per_face :
- 1];
- VectorizedArrayType *temp1;
- if (static_dofs_per_face < stack_array_size_threshold)
- temp1 = &temp_data[0];
- else
- temp1 = this->scratch_data;
-
- internal::FEFaceNormalEvaluationImpl<
+ internal::FEFaceEvaluationSelector<
dim,
fe_degree,
+ n_q_points_1d,
n_components,
- VectorizedArrayType>::template interpolate<true, false>(*this->data,
- values_array,
- temp1,
- evaluate_gradients,
- this->face_no);
-
- const unsigned int n_q_points_1d_actual = fe_degree > -1 ? n_q_points_1d : 0;
- if (fe_degree > -1 &&
- this->subface_index >= GeometryInfo<dim>::max_children_per_cell &&
- this->data->element_type <=
- internal::MatrixFreeFunctions::tensor_symmetric)
- internal::FEFaceEvaluationImpl<
- true,
- dim,
- fe_degree,
- n_q_points_1d_actual,
- n_components,
- VectorizedArrayType>::evaluate_in_face(*this->data,
- temp1,
- this->begin_values(),
- this->begin_gradients(),
- this->scratch_data +
- 2 * n_components * dofs_per_face,
- evaluate_values,
- evaluate_gradients,
- this->subface_index);
- else
- internal::FEFaceEvaluationImpl<
- false,
- dim,
- fe_degree,
- n_q_points_1d_actual,
- n_components,
- VectorizedArrayType>::evaluate_in_face(*this->data,
- temp1,
- this->begin_values(),
- this->begin_gradients(),
- this->scratch_data +
- 2 * n_components * dofs_per_face,
- evaluate_values,
- evaluate_gradients,
- this->subface_index);
-
- if (this->face_orientation)
- adjust_for_face_orientation(false, evaluate_values, evaluate_gradients);
+ Number,
+ VectorizedArrayType>::evaluate(*this->data,
+ values_array,
+ this->begin_values(),
+ this->begin_gradients(),
+ this->scratch_data,
+ evaluate_values,
+ evaluate_gradients,
+ this->face_no,
+ this->subface_index,
+ this->face_orientation,
+ this->mapping_data
+ ->descriptor[this->active_fe_index]
+ .face_orientations);
# ifdef DEBUG
if (evaluate_values == true)
if (!(integrate_values + integrate_gradients))
return;
- if (this->face_orientation)
- adjust_for_face_orientation(true, integrate_values, integrate_gradients);
-
- constexpr unsigned int static_dofs_per_face =
- fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
- numbers::invalid_unsigned_int;
- const unsigned int dofs_per_face =
- fe_degree > -1 ? static_dofs_per_face :
- Utilities::pow(this->data->fe_degree + 1, dim - 1);
-
- constexpr unsigned int stack_array_size_threshold = 100;
-
- VectorizedArrayType
- temp_data[static_dofs_per_face < stack_array_size_threshold ?
- n_components * 2 * static_dofs_per_face :
- 1];
- VectorizedArrayType *temp1;
- if (static_dofs_per_face < stack_array_size_threshold)
- temp1 = &temp_data[0];
- else
- temp1 = this->scratch_data;
-
- const unsigned int n_q_points_1d_actual = fe_degree > -1 ? n_q_points_1d : 0;
- if (fe_degree > -1 &&
- this->subface_index >= GeometryInfo<dim - 1>::max_children_per_cell &&
- this->data->element_type <=
- internal::MatrixFreeFunctions::tensor_symmetric)
- internal::FEFaceEvaluationImpl<
- true,
- dim,
- fe_degree,
- n_q_points_1d_actual,
- n_components,
- VectorizedArrayType>::integrate_in_face(*this->data,
- temp1,
- this->begin_values(),
- this->begin_gradients(),
- this->scratch_data +
- 2 * n_components *
- dofs_per_face,
- integrate_values,
- integrate_gradients,
- this->subface_index);
- else
- internal::FEFaceEvaluationImpl<
- false,
- dim,
- fe_degree,
- n_q_points_1d_actual,
- n_components,
- VectorizedArrayType>::integrate_in_face(*this->data,
- temp1,
- this->begin_values(),
- this->begin_gradients(),
- this->scratch_data +
- 2 * n_components *
- dofs_per_face,
- integrate_values,
- integrate_gradients,
- this->subface_index);
-
- internal::FEFaceNormalEvaluationImpl<dim,
- fe_degree,
- n_components,
- VectorizedArrayType>::
- template interpolate<false, false>(
- *this->data, temp1, values_array, integrate_gradients, this->face_no);
+ internal::FEFaceEvaluationSelector<
+ dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components,
+ Number,
+ VectorizedArrayType>::integrate(*this->data,
+ values_array,
+ this->begin_values(),
+ this->begin_gradients(),
+ this->scratch_data,
+ integrate_values,
+ integrate_gradients,
+ this->face_no,
+ this->subface_index,
+ this->face_orientation,
+ this->mapping_data
+ ->descriptor[this->active_fe_index]
+ .face_orientations);
}
const bool evaluate_values,
const bool evaluate_gradients)
{
- const unsigned int side = this->face_no % 2;
-
- constexpr unsigned int static_dofs_per_face =
- fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
- numbers::invalid_unsigned_int;
- const unsigned int dofs_per_face =
- fe_degree > -1 ? static_dofs_per_face :
- Utilities::pow(this->data->fe_degree + 1, dim - 1);
-
- constexpr unsigned int stack_array_size_threshold = 100;
-
- VectorizedArrayType
- temp_data[static_dofs_per_face < stack_array_size_threshold ?
- n_components_ * 2 * dofs_per_face :
- 1];
- VectorizedArrayType *__restrict temp1;
- if (static_dofs_per_face < stack_array_size_threshold)
- temp1 = &temp_data[0];
- else
- temp1 = this->scratch_data;
-
- internal::VectorReader<Number, VectorizedArrayType> reader;
- std::integral_constant<bool,
- internal::is_vectorizable<VectorType, Number>::value>
- vector_selector;
-
- // case 1: contiguous and interleaved indices
- if (((evaluate_gradients == false &&
- this->data->nodal_at_cell_boundaries == true) ||
- (this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
- fe_degree > 1)) &&
- this->dof_info
- ->index_storage_variants[this->dof_access_index][this->cell] ==
- internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
- interleaved_contiguous)
- {
- AssertDimension(
- this->dof_info
- ->n_vectorization_lanes_filled[this->dof_access_index][this->cell],
- VectorizedArrayType::n_array_elements);
- const unsigned int dof_index =
- this->dof_info
- ->dof_indices_contiguous[this->dof_access_index]
- [this->cell *
- VectorizedArrayType::n_array_elements] +
- this->dof_info
- ->component_dof_indices_offset[this->active_fe_index]
- [this->first_selected_component] *
- VectorizedArrayType::n_array_elements;
-
- if (fe_degree > 1 && evaluate_gradients == true)
- {
- // we know that the gradient weights for the Hermite case on the
- // right (side==1) are the negative from the value at the left
- // (side==0), so we only read out one of them.
- const VectorizedArrayType grad_weight =
- this->data->shape_data_on_face[0][fe_degree + 1 + side];
- AssertDimension(this->data->face_to_cell_index_hermite.size(1),
- 2 * dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_hermite(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 = index_array[2 * i];
- const unsigned int ind2 = index_array[2 * i + 1];
- AssertIndexRange(ind1, dofs_per_cell);
- AssertIndexRange(ind2, dofs_per_cell);
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- {
- reader.process_dofs_vectorized(
- 1,
- dof_index + (ind1 + comp * static_dofs_per_component) *
- VectorizedArrayType::n_array_elements,
- input_vector,
- temp1 + i + 2 * comp * dofs_per_face,
- vector_selector);
- reader.process_dofs_vectorized(
- 1,
- dof_index + (ind2 + comp * static_dofs_per_component) *
- VectorizedArrayType::n_array_elements,
- input_vector,
- temp1 + dofs_per_face + i + 2 * comp * dofs_per_face,
- vector_selector);
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face] =
- grad_weight *
- (temp1[i + 2 * comp * dofs_per_face] -
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face]);
- }
- }
- }
- else
- {
- AssertDimension(this->data->face_to_cell_index_nodal.size(1),
- dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_nodal(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind = index_array[i];
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- reader.process_dofs_vectorized(
- 1,
- dof_index + (ind + comp * static_dofs_per_component) *
- VectorizedArrayType::n_array_elements,
- input_vector,
- temp1 + i + 2 * comp * dofs_per_face,
- vector_selector);
- }
- }
- }
-
- // case 2: contiguous and interleaved indices with fixed stride
- else if (((evaluate_gradients == false &&
- this->data->nodal_at_cell_boundaries == true) ||
- (this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
- fe_degree > 1)) &&
- this->dof_info
- ->index_storage_variants[this->dof_access_index][this->cell] ==
- internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
- interleaved_contiguous_strided)
- {
- AssertDimension(
- this->dof_info
- ->n_vectorization_lanes_filled[this->dof_access_index][this->cell],
- VectorizedArrayType::n_array_elements);
- const unsigned int *indices =
- &this->dof_info
- ->dof_indices_contiguous[this->dof_access_index]
- [this->cell *
- VectorizedArrayType::n_array_elements];
- if (fe_degree > 1 && evaluate_gradients == true)
- {
- // we know that the gradient weights for the Hermite case on the
- // right (side==1) are the negative from the value at the left
- // (side==0), so we only read out one of them.
- const VectorizedArrayType grad_weight =
- this->data->shape_data_on_face[0][fe_degree + 1 + side];
- AssertDimension(this->data->face_to_cell_index_hermite.size(1),
- 2 * dofs_per_face);
-
- const unsigned int *index_array =
- &this->data->face_to_cell_index_hermite(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 =
- index_array[2 * i] * VectorizedArrayType::n_array_elements;
- const unsigned int ind2 =
- index_array[2 * i + 1] * VectorizedArrayType::n_array_elements;
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- {
- reader.process_dof_gather(
- indices,
- input_vector,
- ind1 +
- comp * static_dofs_per_component *
- VectorizedArrayType::n_array_elements +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index]
- [this->first_selected_component] *
- VectorizedArrayType::n_array_elements,
- temp1[i + 2 * comp * dofs_per_face],
- vector_selector);
- VectorizedArrayType grad;
- reader.process_dof_gather(
- indices,
- input_vector,
- ind2 +
- comp * static_dofs_per_component *
- VectorizedArrayType::n_array_elements +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index]
- [this->first_selected_component] *
- VectorizedArrayType::n_array_elements,
- grad,
- vector_selector);
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face] =
- grad_weight * (temp1[i + 2 * comp * dofs_per_face] - grad);
- }
- }
- }
- else
- {
- AssertDimension(this->data->face_to_cell_index_nodal.size(1),
- dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_nodal(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind =
- index_array[i] * VectorizedArrayType::n_array_elements;
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- reader.process_dof_gather(
- indices,
- input_vector,
- ind +
- comp * static_dofs_per_component *
- VectorizedArrayType::n_array_elements +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index]
- [this->first_selected_component] *
- VectorizedArrayType::n_array_elements,
- temp1[i + 2 * comp * dofs_per_face],
- vector_selector);
- }
- }
- }
-
- // case 3: contiguous and interleaved indices with mixed stride
- else if (((evaluate_gradients == false &&
- this->data->nodal_at_cell_boundaries == true) ||
- (this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
- fe_degree > 1)) &&
- this->dof_info
- ->index_storage_variants[this->dof_access_index][this->cell] ==
- internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
- interleaved_contiguous_mixed_strides)
- {
- const unsigned int *strides =
- &this->dof_info->dof_indices_interleave_strides
- [this->dof_access_index]
- [this->cell * VectorizedArrayType::n_array_elements];
- unsigned int indices[VectorizedArrayType::n_array_elements];
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- indices[v] =
- this->dof_info->dof_indices_contiguous
- [this->dof_access_index]
- [this->cell * VectorizedArrayType::n_array_elements + v] +
- this->dof_info
- ->component_dof_indices_offset[this->active_fe_index]
- [this->first_selected_component] *
- strides[v];
- const unsigned int nvec =
- this->dof_info
- ->n_vectorization_lanes_filled[this->dof_access_index][this->cell];
-
- if (fe_degree > 1 && evaluate_gradients == true)
- {
- // we know that the gradient weights for the Hermite case on the
- // right (side==1) are the negative from the value at the left
- // (side==0), so we only read out one of them.
- const VectorizedArrayType grad_weight =
- this->data->shape_data_on_face[0][fe_degree + 1 + side];
- AssertDimension(this->data->face_to_cell_index_hermite.size(1),
- 2 * dofs_per_face);
-
- const unsigned int *index_array =
- &this->data->face_to_cell_index_hermite(this->face_no, 0);
- if (nvec == VectorizedArrayType::n_array_elements)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- unsigned int ind1[VectorizedArrayType::n_array_elements];
- DEAL_II_OPENMP_SIMD_PRAGMA
- for (unsigned int v = 0;
- v < VectorizedArrayType::n_array_elements;
- ++v)
- ind1[v] = indices[v] + (comp * static_dofs_per_component +
- index_array[2 * i]) *
- strides[v];
- unsigned int ind2[VectorizedArrayType::n_array_elements];
- DEAL_II_OPENMP_SIMD_PRAGMA
- for (unsigned int v = 0;
- v < VectorizedArrayType::n_array_elements;
- ++v)
- ind2[v] = indices[v] + (comp * static_dofs_per_component +
- index_array[2 * i + 1]) *
- strides[v];
- reader.process_dof_gather(ind1,
- input_vector,
- 0,
- temp1[i + 2 * comp * dofs_per_face],
- vector_selector);
- VectorizedArrayType grad;
- reader.process_dof_gather(
- ind2, input_vector, 0, grad, vector_selector);
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face] =
- grad_weight * (temp1[i + 2 * comp * dofs_per_face] - grad);
- }
- else
- {
- for (unsigned int i = 0; i < n_components_ * 2 * dofs_per_face;
- ++i)
- temp1[i] = VectorizedArrayType();
- for (unsigned int v = 0; v < nvec; ++v)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 =
- indices[v] + (comp * static_dofs_per_component +
- index_array[2 * i]) *
- strides[v];
- const unsigned int ind2 =
- indices[v] + (comp * static_dofs_per_component +
- index_array[2 * i + 1]) *
- strides[v];
- reader.process_dof(
- ind1,
- const_cast<VectorType &>(input_vector),
- temp1[i + 2 * comp * dofs_per_face][v]);
- Number grad;
- reader.process_dof(ind2,
- const_cast<VectorType &>(input_vector),
- grad);
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face][v] =
- grad_weight[0] *
- (temp1[i + 2 * comp * dofs_per_face][v] - grad);
- }
- }
- }
- else
- {
- AssertDimension(this->data->face_to_cell_index_nodal.size(1),
- dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_nodal(this->face_no, 0);
- if (nvec == VectorizedArrayType::n_array_elements)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- unsigned int ind[VectorizedArrayType::n_array_elements];
- DEAL_II_OPENMP_SIMD_PRAGMA
- for (unsigned int v = 0;
- v < VectorizedArrayType::n_array_elements;
- ++v)
- ind[v] = indices[v] + (comp * static_dofs_per_component +
- index_array[i]) *
- strides[v];
- reader.process_dof_gather(ind,
- input_vector,
- 0,
- temp1[i + 2 * comp * dofs_per_face],
- vector_selector);
- }
- else
- {
- for (unsigned int i = 0; i < n_components_ * dofs_per_face; ++i)
- temp1[i] = VectorizedArrayType();
- for (unsigned int v = 0; v < nvec; ++v)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 =
- indices[v] +
- (comp * static_dofs_per_component + index_array[i]) *
- strides[v];
- reader.process_dof(
- ind1,
- const_cast<VectorType &>(input_vector),
- temp1[i + 2 * comp * dofs_per_face][v]);
- }
- }
- }
- }
-
- // case 4: contiguous indices without interleaving
- else if (((evaluate_gradients == false &&
- this->data->nodal_at_cell_boundaries == true) ||
- (this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
- fe_degree > 1)) &&
- this->dof_info
- ->index_storage_variants[this->dof_access_index][this->cell] ==
- internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
- contiguous &&
- this->dof_info->n_vectorization_lanes_filled[this->dof_access_index]
- [this->cell] ==
- VectorizedArrayType::n_array_elements)
- {
- const unsigned int *indices =
- &this->dof_info
- ->dof_indices_contiguous[this->dof_access_index]
- [this->cell *
- VectorizedArrayType::n_array_elements];
- if (evaluate_gradients == true &&
- this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite)
- {
- // we know that the gradient weights for the Hermite case on the
- // right (side==1) are the negative from the value at the left
- // (side==0), so we only read out one of them.
- const VectorizedArrayType grad_weight =
- this->data->shape_data_on_face[0][fe_degree + 1 + side];
- AssertDimension(this->data->face_to_cell_index_hermite.size(1),
- 2 * dofs_per_face);
-
- const unsigned int *index_array =
- &this->data->face_to_cell_index_hermite(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 = index_array[2 * i];
- const unsigned int ind2 = index_array[2 * i + 1];
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- {
- reader.process_dof_gather(
- indices,
- input_vector,
- ind1 + comp * static_dofs_per_component +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index][this->first_selected_component],
- temp1[i + 2 * comp * dofs_per_face],
- vector_selector);
- VectorizedArrayType grad;
- reader.process_dof_gather(
- indices,
- input_vector,
- ind2 + comp * static_dofs_per_component +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index][this->first_selected_component],
- grad,
- vector_selector);
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face] =
- grad_weight * (temp1[i + 2 * comp * dofs_per_face] - grad);
- }
- }
- }
- else
- {
- AssertDimension(this->data->face_to_cell_index_nodal.size(1),
- dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_nodal(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- {
- const unsigned int ind = index_array[i];
- reader.process_dof_gather(
- indices,
- input_vector,
- ind + comp * static_dofs_per_component +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index][this->first_selected_component],
- temp1[i + comp * 2 * dofs_per_face],
- vector_selector);
- }
- }
- }
-
- // case 5: default vector access
- else
+ static_assert(internal::has_begin<VectorType>::value &&
+ (std::is_same<decltype(std::declval<VectorType>().begin()),
+ double *>::value ||
+ std::is_same<decltype(std::declval<VectorType>().begin()),
+ float *>::value),
+ "This function requires a vector type with begin() function "
+ "evaluating to a pointer to basic number (float,double). "
+ "Use read_dof_values() followed by evaluate() instead.");
+
+ if (!internal::FEFaceEvaluationSelector<dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components,
+ Number,
+ VectorizedArrayType>::
+ gather_evaluate(input_vector.begin(),
+ *this->data,
+ *this->dof_info,
+ this->begin_values(),
+ this->begin_gradients(),
+ this->scratch_data,
+ evaluate_values,
+ evaluate_gradients,
+ this->active_fe_index,
+ this->first_selected_component,
+ this->cell,
+ this->face_no,
+ this->subface_index,
+ this->dof_access_index,
+ this->face_orientation,
+ this->mapping_data->descriptor[this->active_fe_index]
+ .face_orientations))
{
this->read_dof_values(input_vector);
- internal::FEFaceNormalEvaluationImpl<dim,
- fe_degree,
- n_components_,
- VectorizedArrayType>::
- template interpolate<true, false>(*this->data,
- this->values_dofs[0],
- temp1,
- evaluate_gradients,
- this->face_no);
+ this->evaluate(evaluate_values, evaluate_gradients);
}
- if (fe_degree > -1 &&
- this->subface_index >= GeometryInfo<dim>::max_children_per_cell &&
- this->data->element_type <=
- internal::MatrixFreeFunctions::tensor_symmetric)
- internal::FEFaceEvaluationImpl<
- true,
- dim,
- fe_degree,
- n_q_points_1d,
- n_components_,
- VectorizedArrayType>::evaluate_in_face(*this->data,
- temp1,
- this->values_quad[0],
- this->gradients_quad[0][0],
- this->scratch_data +
- 2 * n_components_ *
- dofs_per_face,
- evaluate_values,
- evaluate_gradients,
- this->subface_index);
- else
- internal::FEFaceEvaluationImpl<
- false,
- dim,
- fe_degree,
- n_q_points_1d,
- n_components_,
- VectorizedArrayType>::evaluate_in_face(*this->data,
- temp1,
- this->values_quad[0],
- this->gradients_quad[0][0],
- this->scratch_data +
- 2 * n_components_ *
- dofs_per_face,
- evaluate_values,
- evaluate_gradients,
- this->subface_index);
-
- if (this->face_orientation)
- adjust_for_face_orientation(false, evaluate_values, evaluate_gradients);
-
# ifdef DEBUG
if (evaluate_values == true)
this->values_quad_initialized = true;
const bool integrate_gradients,
VectorType &destination)
{
- const unsigned int side = this->face_no % 2;
- const unsigned int dofs_per_face =
- fe_degree > -1 ? Utilities::pow(fe_degree + 1, dim - 1) :
- Utilities::pow(this->data->fe_degree + 1, dim - 1);
-
- constexpr unsigned int stack_array_size_threshold = 100;
-
- VectorizedArrayType temp_data[dofs_per_face < stack_array_size_threshold ?
- n_components_ * 2 * dofs_per_face :
- 1];
- VectorizedArrayType *__restrict temp1;
- if (dofs_per_face < stack_array_size_threshold)
- temp1 = &temp_data[0];
- else
- temp1 = this->scratch_data;
-
- if (this->face_orientation)
- adjust_for_face_orientation(true, integrate_values, integrate_gradients);
- if (fe_degree > -1 &&
- this->subface_index >= GeometryInfo<dim>::max_children_per_cell &&
- this->data->element_type <=
- internal::MatrixFreeFunctions::tensor_symmetric)
- internal::FEFaceEvaluationImpl<
- true,
- dim,
- fe_degree,
- n_q_points_1d,
- n_components_,
- VectorizedArrayType>::integrate_in_face(*this->data,
- temp1,
- this->values_quad[0],
- this->gradients_quad[0][0],
- this->scratch_data +
- 2 * n_components_ *
- dofs_per_face,
- integrate_values,
- integrate_gradients,
- this->subface_index);
- else
- internal::FEFaceEvaluationImpl<
- false,
- dim,
- fe_degree,
- n_q_points_1d,
- n_components_,
- VectorizedArrayType>::integrate_in_face(*this->data,
- temp1,
- this->values_quad[0],
- this->gradients_quad[0][0],
- this->scratch_data +
- 2 * n_components_ *
- dofs_per_face,
- integrate_values,
- integrate_gradients,
- this->subface_index);
-
-# ifdef DEBUG
- this->dof_values_initialized = true;
-# endif
-
- internal::VectorDistributorLocalToGlobal<Number, VectorizedArrayType> writer;
- std::integral_constant<bool,
- internal::is_vectorizable<VectorType, Number>::value>
- vector_selector;
-
- // case 1: contiguous and interleaved indices
- if (((integrate_gradients == false &&
- this->data->nodal_at_cell_boundaries == true) ||
- (this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
- fe_degree > 1)) &&
- this->dof_info
- ->index_storage_variants[this->dof_access_index][this->cell] ==
- internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
- interleaved_contiguous)
- {
- AssertDimension(
- this->dof_info
- ->n_vectorization_lanes_filled[this->dof_access_index][this->cell],
- VectorizedArrayType::n_array_elements);
- const unsigned int dof_index =
- this->dof_info
- ->dof_indices_contiguous[this->dof_access_index]
- [this->cell *
- VectorizedArrayType::n_array_elements] +
- this->dof_info
- ->component_dof_indices_offset[this->active_fe_index]
- [this->first_selected_component] *
- VectorizedArrayType::n_array_elements;
-
- if (fe_degree > 1 && integrate_gradients == true)
- {
- // we know that the gradient weights for the Hermite case on the
- // right (side==1) are the negative from the value at the left
- // (side==0), so we only read out one of them.
- const VectorizedArrayType grad_weight =
- this->data->shape_data_on_face[0][fe_degree + 2 - side];
- AssertDimension(this->data->face_to_cell_index_hermite.size(1),
- 2 * dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_hermite(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 = index_array[2 * i];
- const unsigned int ind2 = index_array[2 * i + 1];
- AssertIndexRange(ind1, dofs_per_cell);
- AssertIndexRange(ind2, dofs_per_cell);
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- {
- VectorizedArrayType val =
- temp1[i + 2 * comp * dofs_per_face] -
- grad_weight *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
- VectorizedArrayType grad =
- grad_weight *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
- writer.process_dofs_vectorized(
- 1,
- dof_index + (ind1 + comp * static_dofs_per_component) *
- VectorizedArrayType::n_array_elements,
- destination,
- &val,
- vector_selector);
- writer.process_dofs_vectorized(
- 1,
- dof_index + (ind2 + comp * static_dofs_per_component) *
- VectorizedArrayType::n_array_elements,
- destination,
- &grad,
- vector_selector);
- }
- }
- }
- else
- {
- AssertDimension(this->data->face_to_cell_index_nodal.size(1),
- dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_nodal(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind = index_array[i];
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- writer.process_dofs_vectorized(
- 1,
- dof_index + (ind + comp * static_dofs_per_component) *
- VectorizedArrayType::n_array_elements,
- destination,
- temp1 + i + 2 * comp * dofs_per_face,
- vector_selector);
- }
- }
- }
-
- // case 2: contiguous and interleaved indices with fixed stride
- else if (((integrate_gradients == false &&
- this->data->nodal_at_cell_boundaries == true) ||
- (this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
- fe_degree > 1)) &&
- this->dof_info
- ->index_storage_variants[this->dof_access_index][this->cell] ==
- internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
- interleaved_contiguous_strided)
- {
- AssertDimension(
- this->dof_info
- ->n_vectorization_lanes_filled[this->dof_access_index][this->cell],
- VectorizedArrayType::n_array_elements);
- const unsigned int *indices =
- &this->dof_info
- ->dof_indices_contiguous[this->dof_access_index]
- [this->cell *
- VectorizedArrayType::n_array_elements];
- if (fe_degree > 1 && integrate_gradients == true)
- {
- // we know that the gradient weights for the Hermite case on the
- // right (side==1) are the negative from the value at the left
- // (side==0), so we only read out one of them.
- const VectorizedArrayType grad_weight =
- this->data->shape_data_on_face[0][fe_degree + 2 - side];
- AssertDimension(this->data->face_to_cell_index_hermite.size(1),
- 2 * dofs_per_face);
-
- const unsigned int *index_array =
- &this->data->face_to_cell_index_hermite(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 =
- index_array[2 * i] * VectorizedArrayType::n_array_elements;
- const unsigned int ind2 =
- index_array[2 * i + 1] * VectorizedArrayType::n_array_elements;
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- {
- VectorizedArrayType val =
- temp1[i + 2 * comp * dofs_per_face] -
- grad_weight *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
- VectorizedArrayType grad =
- grad_weight *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
- writer.process_dof_gather(
- indices,
- destination,
- ind1 +
- comp * static_dofs_per_component *
- VectorizedArrayType::n_array_elements +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index]
- [this->first_selected_component] *
- VectorizedArrayType::n_array_elements,
- val,
- vector_selector);
- writer.process_dof_gather(
- indices,
- destination,
- ind2 +
- comp * static_dofs_per_component *
- VectorizedArrayType::n_array_elements +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index]
- [this->first_selected_component] *
- VectorizedArrayType::n_array_elements,
- grad,
- vector_selector);
- }
- }
- }
- else
- {
- AssertDimension(this->data->face_to_cell_index_nodal.size(1),
- dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_nodal(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind =
- index_array[i] * VectorizedArrayType::n_array_elements;
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- writer.process_dof_gather(
- indices,
- destination,
- ind +
- comp * static_dofs_per_component *
- VectorizedArrayType::n_array_elements +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index]
- [this->first_selected_component] *
- VectorizedArrayType::n_array_elements,
- temp1[i + 2 * comp * dofs_per_face],
- vector_selector);
- }
- }
- }
-
- // case 3: contiguous and interleaved indices with mixed stride
- else if (((integrate_gradients == false &&
- this->data->nodal_at_cell_boundaries == true) ||
- (this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
- fe_degree > 1)) &&
- this->dof_info
- ->index_storage_variants[this->dof_access_index][this->cell] ==
- internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
- interleaved_contiguous_mixed_strides)
- {
- const unsigned int *strides =
- &this->dof_info->dof_indices_interleave_strides
- [this->dof_access_index]
- [this->cell * VectorizedArrayType::n_array_elements];
- unsigned int indices[VectorizedArrayType::n_array_elements];
- for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
- indices[v] =
- this->dof_info->dof_indices_contiguous
- [this->dof_access_index]
- [this->cell * VectorizedArrayType::n_array_elements + v] +
- this->dof_info
- ->component_dof_indices_offset[this->active_fe_index]
- [this->first_selected_component] *
- strides[v];
- const unsigned int nvec =
- this->dof_info
- ->n_vectorization_lanes_filled[this->dof_access_index][this->cell];
-
- if (fe_degree > 1 && integrate_gradients == true)
- {
- // we know that the gradient weights for the Hermite case on the
- // right (side==1) are the negative from the value at the left
- // (side==0), so we only read out one of them.
- const VectorizedArrayType grad_weight =
- this->data->shape_data_on_face[0][fe_degree + 2 - side];
- AssertDimension(this->data->face_to_cell_index_hermite.size(1),
- 2 * dofs_per_face);
-
- const unsigned int *index_array =
- &this->data->face_to_cell_index_hermite(this->face_no, 0);
- if (nvec == VectorizedArrayType::n_array_elements)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- unsigned int ind1[VectorizedArrayType::n_array_elements];
- DEAL_II_OPENMP_SIMD_PRAGMA
- for (unsigned int v = 0;
- v < VectorizedArrayType::n_array_elements;
- ++v)
- ind1[v] = indices[v] + (comp * static_dofs_per_component +
- index_array[2 * i]) *
- strides[v];
- unsigned int ind2[VectorizedArrayType::n_array_elements];
- DEAL_II_OPENMP_SIMD_PRAGMA
- for (unsigned int v = 0;
- v < VectorizedArrayType::n_array_elements;
- ++v)
- ind2[v] = indices[v] + (comp * static_dofs_per_component +
- index_array[2 * i + 1]) *
- strides[v];
- VectorizedArrayType val =
- temp1[i + 2 * comp * dofs_per_face] -
- grad_weight *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
- VectorizedArrayType grad =
- grad_weight *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
- writer.process_dof_gather(
- ind1, destination, 0, val, vector_selector);
- writer.process_dof_gather(
- ind2, destination, 0, grad, vector_selector);
- }
- else
- {
- for (unsigned int v = 0; v < nvec; ++v)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 =
- indices[v] + (comp * static_dofs_per_component +
- index_array[2 * i]) *
- strides[v];
- const unsigned int ind2 =
- indices[v] + (comp * static_dofs_per_component +
- index_array[2 * i + 1]) *
- strides[v];
- Number val =
- temp1[i + 2 * comp * dofs_per_face][v] -
- grad_weight[0] * temp1[i + dofs_per_face +
- 2 * comp * dofs_per_face][v];
- Number grad =
- grad_weight[0] *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face][v];
- writer.process_dof(ind1, destination, val);
- writer.process_dof(ind2, destination, grad);
- }
- }
- }
- else
- {
- AssertDimension(this->data->face_to_cell_index_nodal.size(1),
- dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_nodal(this->face_no, 0);
- if (nvec == VectorizedArrayType::n_array_elements)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- unsigned int ind[VectorizedArrayType::n_array_elements];
- DEAL_II_OPENMP_SIMD_PRAGMA
- for (unsigned int v = 0;
- v < VectorizedArrayType::n_array_elements;
- ++v)
- ind[v] = indices[v] + (comp * static_dofs_per_component +
- index_array[i]) *
- strides[v];
- writer.process_dof_gather(ind,
- destination,
- 0,
- temp1[i + 2 * comp * dofs_per_face],
- vector_selector);
- }
- else
- {
- for (unsigned int v = 0; v < nvec; ++v)
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 =
- indices[v] +
- (comp * static_dofs_per_component + index_array[i]) *
- strides[v];
- writer.process_dof(
- ind1,
- destination,
- temp1[i + 2 * comp * dofs_per_face][v]);
- }
- }
- }
- }
-
- // case 4: contiguous indices without interleaving
- else if (((integrate_gradients == false &&
- this->data->nodal_at_cell_boundaries == true) ||
- (this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite &&
- fe_degree > 1)) &&
- this->dof_info
- ->index_storage_variants[this->dof_access_index][this->cell] ==
- internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants::
- contiguous &&
- this->dof_info->n_vectorization_lanes_filled[this->dof_access_index]
- [this->cell] ==
- VectorizedArrayType::n_array_elements)
- {
- const unsigned int *indices =
- &this->dof_info
- ->dof_indices_contiguous[this->dof_access_index]
- [this->cell *
- VectorizedArrayType::n_array_elements];
-
- if (integrate_gradients == true &&
- this->data->element_type ==
- internal::MatrixFreeFunctions::tensor_symmetric_hermite)
- {
- // we know that the gradient weights for the Hermite case on the
- // right (side==1) are the negative from the value at the left
- // (side==0), so we only read out one of them.
- const VectorizedArrayType grad_weight =
- this->data->shape_data_on_face[0][fe_degree + 2 - side];
- AssertDimension(this->data->face_to_cell_index_hermite.size(1),
- 2 * dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_hermite(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind1 = index_array[2 * i];
- const unsigned int ind2 = index_array[2 * i + 1];
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- {
- VectorizedArrayType val =
- temp1[i + 2 * comp * dofs_per_face] -
- grad_weight *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
- VectorizedArrayType grad =
- grad_weight *
- temp1[i + dofs_per_face + 2 * comp * dofs_per_face];
- writer.process_dof_gather(
- indices,
- destination,
- comp * static_dofs_per_component + ind1 +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index][this->first_selected_component],
- val,
- vector_selector);
- writer.process_dof_gather(
- indices,
- destination,
- comp * static_dofs_per_component + ind2 +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index][this->first_selected_component],
- grad,
- vector_selector);
- }
- }
- }
- else
- {
- AssertDimension(this->data->face_to_cell_index_nodal.size(1),
- dofs_per_face);
- const unsigned int *index_array =
- &this->data->face_to_cell_index_nodal(this->face_no, 0);
- for (unsigned int i = 0; i < dofs_per_face; ++i)
- {
- const unsigned int ind = index_array[i];
- for (unsigned int comp = 0; comp < n_components_; ++comp)
- writer.process_dof_gather(
- indices,
- destination,
- comp * static_dofs_per_component + ind +
- this->dof_info->component_dof_indices_offset
- [this->active_fe_index][this->first_selected_component],
- temp1[i + 2 * comp * dofs_per_face],
- vector_selector);
- }
- }
- }
-
- // case 5: default vector access
- else
+ static_assert(internal::has_begin<VectorType>::value &&
+ (std::is_same<decltype(std::declval<VectorType>().begin()),
+ double *>::value ||
+ std::is_same<decltype(std::declval<VectorType>().begin()),
+ float *>::value),
+ "This function requires a vector type with begin() function "
+ "evaluating to a pointer to basic number (float,double). "
+ "Use integrate() followed by distribute_local_to_global() "
+ "instead.");
+
+ if (!internal::FEFaceEvaluationSelector<dim,
+ fe_degree,
+ n_q_points_1d,
+ n_components,
+ Number,
+ VectorizedArrayType>::
+ integrate_scatter(destination.begin(),
+ *this->data,
+ *this->dof_info,
+ this->begin_dof_values(),
+ this->begin_values(),
+ this->begin_gradients(),
+ this->scratch_data,
+ integrate_values,
+ integrate_gradients,
+ this->active_fe_index,
+ this->first_selected_component,
+ this->cell,
+ this->face_no,
+ this->subface_index,
+ this->dof_access_index,
+ this->face_orientation,
+ this->mapping_data->descriptor[this->active_fe_index]
+ .face_orientations))
{
- internal::FEFaceNormalEvaluationImpl<dim,
- fe_degree,
- n_components_,
- VectorizedArrayType>::
- template interpolate<false, false>(*this->data,
- temp1,
- this->values_dofs[0],
- integrate_gradients,
- this->face_no);
+ // if we arrive here, writing into the destination vector did not succeed
+ // because some of the assumptions in integrate_scatter were not
+ // fulfilled (e.g. an element or degree that does not support direct
+ // writing), so we must do it here
this->distribute_local_to_global(destination);
}
}
-template <int dim,
- int fe_degree,
- int n_q_points_1d,
- int n_components,
- typename Number,
- typename VectorizedArrayType>
-inline void
-FEFaceEvaluation<
- dim,
- fe_degree,
- n_q_points_1d,
- n_components,
- Number,
- VectorizedArrayType>::adjust_for_face_orientation(const bool integrate,
- const bool values,
- const bool gradients)
-{
- VectorizedArrayType *tmp_values = this->scratch_data;
- const unsigned int * orientations =
- &this->mapping_data->descriptor[this->active_fe_index]
- .face_orientations[this->face_orientation][0];
- for (unsigned int c = 0; c < n_components; ++c)
- {
- if (values == true)
- {
- if (integrate)
- for (unsigned int q = 0; q < n_q_points; ++q)
- tmp_values[q] = this->values_quad[c][orientations[q]];
- else
- for (unsigned int q = 0; q < n_q_points; ++q)
- tmp_values[orientations[q]] = this->values_quad[c][q];
- for (unsigned int q = 0; q < n_q_points; ++q)
- this->values_quad[c][q] = tmp_values[q];
- }
- if (gradients == true)
- for (unsigned int d = 0; d < dim; ++d)
- {
- if (integrate)
- for (unsigned int q = 0; q < n_q_points; ++q)
- tmp_values[q] = this->gradients_quad[c][d][orientations[q]];
- else
- for (unsigned int q = 0; q < n_q_points; ++q)
- tmp_values[orientations[q]] = this->gradients_quad[c][d][q];
- for (unsigned int q = 0; q < n_q_points; ++q)
- this->gradients_quad[c][d][q] = tmp_values[q];
- }
- }
-}
-
-
-
template <int dim,
int fe_degree,
int n_q_points_1d,
const VectorizedArrayType * in_array,
VectorizedArrayType * out_array) const
{
- constexpr unsigned int dofs_per_component =
- Utilities::pow(fe_degree + 1, dim);
- Assert(inverse_coefficients.size() > 0 &&
- inverse_coefficients.size() % dofs_per_component == 0,
- ExcMessage(
- "Expected diagonal to be a multiple of scalar dof per cells"));
- if (inverse_coefficients.size() != dofs_per_component)
- AssertDimension(n_actual_components * dofs_per_component,
- inverse_coefficients.size());
-
- Assert(dim >= 1 || dim <= 3, ExcNotImplemented());
-
- internal::EvaluatorTensorProduct<internal::evaluate_evenodd,
- dim,
- fe_degree + 1,
- fe_degree + 1,
- VectorizedArrayType>
- evaluator(inverse_shape, inverse_shape, inverse_shape);
-
- const unsigned int shift_coefficient =
- inverse_coefficients.size() > dofs_per_component ? dofs_per_component : 0;
- const VectorizedArrayType *inv_coefficient = inverse_coefficients.data();
- VectorizedArrayType temp_data_field[dofs_per_component];
- for (unsigned int d = 0; d < n_actual_components; ++d)
- {
- const VectorizedArrayType *in = in_array + d * dofs_per_component;
- VectorizedArrayType * out = out_array + d * dofs_per_component;
- // Need to select 'apply' method with hessian slot because values
- // assume symmetries that do not exist in the inverse shapes
- evaluator.template hessians<0, false, false>(in, temp_data_field);
- if (dim > 1)
- {
- evaluator.template hessians<1, false, false>(temp_data_field, out);
-
- if (dim == 3)
- {
- evaluator.template hessians<2, false, false>(out,
- temp_data_field);
- for (unsigned int q = 0; q < dofs_per_component; ++q)
- temp_data_field[q] *= inv_coefficient[q];
- evaluator.template hessians<2, true, false>(temp_data_field,
- out);
- }
- else if (dim == 2)
- for (unsigned int q = 0; q < dofs_per_component; ++q)
- out[q] *= inv_coefficient[q];
-
- evaluator.template hessians<1, true, false>(out, temp_data_field);
- }
- else
- {
- for (unsigned int q = 0; q < dofs_per_component; ++q)
- temp_data_field[q] *= inv_coefficient[q];
- }
- evaluator.template hessians<0, true, false>(temp_data_field, out);
-
- inv_coefficient += shift_coefficient;
- }
+ internal::CellwiseInverseMassMatrixImpl<
+ dim,
+ fe_degree,
+ n_components,
+ VectorizedArrayType>::apply(inverse_shape,
+ inverse_coefficients,
+ n_actual_components,
+ in_array,
+ out_array);
}
const VectorizedArrayType *in_array,
VectorizedArrayType * out_array) const
{
- constexpr unsigned int dofs_per_cell = Utilities::pow(fe_degree + 1, dim);
- internal::EvaluatorTensorProduct<internal::evaluate_evenodd,
- dim,
- fe_degree + 1,
- fe_degree + 1,
- VectorizedArrayType>
- evaluator(AlignedVector<VectorizedArrayType>(),
- AlignedVector<VectorizedArrayType>(),
- inverse_shape);
-
- for (unsigned int d = 0; d < n_actual_components; ++d)
- {
- const VectorizedArrayType *in = in_array + d * dofs_per_cell;
- VectorizedArrayType * out = out_array + d * dofs_per_cell;
-
- if (dim == 3)
- {
- evaluator.template hessians<2, true, false>(in, out);
- evaluator.template hessians<1, true, false>(out, out);
- evaluator.template hessians<0, true, false>(out, out);
- }
- if (dim == 2)
- {
- evaluator.template hessians<1, true, false>(in, out);
- evaluator.template hessians<0, true, false>(out, out);
- }
- if (dim == 1)
- evaluator.template hessians<0, true, false>(in, out);
- }
+ internal::CellwiseInverseMassMatrixImpl<dim,
+ fe_degree,
+ n_components,
+ VectorizedArrayType>::
+ transform_from_q_points_to_basis(inverse_shape,
+ n_actual_components,
+ in_array,
+ out_array);
}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+#ifndef dealii_matrix_free_vector_access_internal_h
+#define dealii_matrix_free_vector_access_internal_h
+
+#include <deal.II/base/config.h>
+
+#include <deal.II/base/vectorization.h>
+
+#include <deal.II/matrix_free/dof_info.h>
+#include <deal.II/matrix_free/type_traits.h>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace internal
+{
+ // below we use type-traits from matrix-free/type_traits.h
+
+ // access to generic const vectors that have operator ().
+ // FIXME: this is wrong for Trilinos/Petsc MPI vectors
+ // where we should first do Partitioner::local_to_global()
+ template <typename VectorType,
+ typename std::enable_if<!has_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline typename VectorType::value_type
+ vector_access(const VectorType &vec, const unsigned int entry)
+ {
+ return vec(entry);
+ }
+
+
+
+ // access to generic non-const vectors that have operator ().
+ // FIXME: this is wrong for Trilinos/Petsc MPI vectors
+ // where we should first do Partitioner::local_to_global()
+ template <typename VectorType,
+ typename std::enable_if<!has_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline typename VectorType::value_type &
+ vector_access(VectorType &vec, const unsigned int entry)
+ {
+ return vec(entry);
+ }
+
+
+
+ // access to distributed MPI vectors that have a local_element(uint)
+ // method to access data in local index space, which is what we use in
+ // DoFInfo and hence in read_dof_values etc.
+ template <typename VectorType,
+ typename std::enable_if<has_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline typename VectorType::value_type &
+ vector_access(VectorType &vec, const unsigned int entry)
+ {
+ return vec.local_element(entry);
+ }
+
+
+
+ // same for const access
+ template <typename VectorType,
+ typename std::enable_if<has_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline typename VectorType::value_type
+ vector_access(const VectorType &vec, const unsigned int entry)
+ {
+ return vec.local_element(entry);
+ }
+
+
+
+ template <typename VectorType,
+ typename std::enable_if<has_add_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline void
+ vector_access_add(VectorType & vec,
+ const unsigned int entry,
+ const typename VectorType::value_type &val)
+ {
+ vec.add_local_element(entry, val);
+ }
+
+
+
+ template <typename VectorType,
+ typename std::enable_if<!has_add_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline void
+ vector_access_add(VectorType & vec,
+ const unsigned int entry,
+ const typename VectorType::value_type &val)
+ {
+ vector_access(vec, entry) += val;
+ }
+
+
+
+ template <typename VectorType,
+ typename std::enable_if<has_add_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline void
+ vector_access_add_global(VectorType & vec,
+ const types::global_dof_index entry,
+ const typename VectorType::value_type &val)
+ {
+ vec.add(entry, val);
+ }
+
+
+
+ template <typename VectorType,
+ typename std::enable_if<!has_add_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline void
+ vector_access_add_global(VectorType & vec,
+ const types::global_dof_index entry,
+ const typename VectorType::value_type &val)
+ {
+ vec(entry) += val;
+ }
+
+
+
+ template <typename VectorType,
+ typename std::enable_if<has_set_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline void
+ vector_access_set(VectorType & vec,
+ const unsigned int entry,
+ const typename VectorType::value_type &val)
+ {
+ vec.set_local_element(entry, val);
+ }
+
+
+
+ template <typename VectorType,
+ typename std::enable_if<!has_set_local_element<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline void
+ vector_access_set(VectorType & vec,
+ const unsigned int entry,
+ const typename VectorType::value_type &val)
+ {
+ vector_access(vec, entry) = val;
+ }
+
+
+
+ // this is to make sure that the parallel partitioning in VectorType
+ // is really the same as stored in MatrixFree.
+ // version below is when has_partitioners_are_compatible == false
+ // FIXME: this is incorrect for PETSc/Trilinos MPI vectors
+ template <
+ typename VectorType,
+ typename std::enable_if<!has_partitioners_are_compatible<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline void
+ check_vector_compatibility(
+ const VectorType & vec,
+ const internal::MatrixFreeFunctions::DoFInfo &dof_info)
+ {
+ (void)vec;
+ (void)dof_info;
+
+ AssertDimension(vec.size(), dof_info.vector_partitioner->size());
+ }
+
+
+
+ // same as above for has_partitioners_are_compatible == true
+ template <
+ typename VectorType,
+ typename std::enable_if<has_partitioners_are_compatible<VectorType>::value,
+ VectorType>::type * = nullptr>
+ inline void
+ check_vector_compatibility(
+ const VectorType & vec,
+ const internal::MatrixFreeFunctions::DoFInfo &dof_info)
+ {
+ (void)vec;
+ (void)dof_info;
+ Assert(vec.partitioners_are_compatible(*dof_info.vector_partitioner),
+ ExcMessage(
+ "The parallel layout of the given vector is not "
+ "compatible with the parallel partitioning in MatrixFree. "
+ "Use MatrixFree::initialize_dof_vector to get a "
+ "compatible vector."));
+ }
+
+
+
+ // Below, three classes (VectorReader, VectorSetter,
+ // VectorDistributorLocalToGlobal) implement the same interface and can be
+ // used to to read from vector, set elements of a vector and add to elements
+ // of the vector.
+
+ // 1. A class to read data from vector
+ template <typename Number, typename VectorizedArrayType>
+ struct VectorReader
+ {
+ template <typename VectorType>
+ void
+ process_dof(const unsigned int index,
+ const VectorType & vec,
+ Number & res) const
+ {
+ res = vector_access(vec, index);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized(const unsigned int dofs_per_cell,
+ const unsigned int dof_index,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, true>) const
+ {
+ const Number *vec_ptr = vec.begin() + dof_index;
+ for (unsigned int i = 0; i < dofs_per_cell;
+ ++i, vec_ptr += VectorizedArrayType::n_array_elements)
+ dof_values[i].load(vec_ptr);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized(const unsigned int dofs_per_cell,
+ const unsigned int dof_index,
+ const VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int i = 0; i < dofs_per_cell; ++i)
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ dof_values[i][v] = vector_access(
+ vec, dof_index + v + i * VectorizedArrayType::n_array_elements);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
+ const unsigned int * dof_indices,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, true>) const
+ {
+ dealii::vectorized_load_and_transpose(dofs_per_cell,
+ vec.begin(),
+ dof_indices,
+ dof_values);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
+ const unsigned int * dof_indices,
+ const VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int d = 0; d < dofs_per_cell; ++d)
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ dof_values[d][v] = vector_access(vec, dof_indices[v] + d);
+ }
+
+
+
+ // variant where VectorType::value_type is the same as Number -> can call
+ // gather
+ template <typename VectorType>
+ void
+ process_dof_gather(const unsigned int * indices,
+ VectorType & vec,
+ const unsigned int constant_offset,
+ VectorizedArrayType &res,
+ std::integral_constant<bool, true>) const
+ {
+ res.gather(vec.begin() + constant_offset, indices);
+ }
+
+
+
+ // variant where VectorType::value_type is not the same as Number -> must
+ // manually load the data
+ template <typename VectorType>
+ void
+ process_dof_gather(const unsigned int * indices,
+ const VectorType & vec,
+ const unsigned int constant_offset,
+ VectorizedArrayType &res,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ res[v] = vector_access(vec, indices[v] + constant_offset);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dof_global(const types::global_dof_index index,
+ const VectorType & vec,
+ Number & res) const
+ {
+ res = vec(index);
+ }
+
+
+
+ void
+ pre_constraints(const Number &, Number &res) const
+ {
+ res = Number();
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_constraint(const unsigned int index,
+ const Number weight,
+ const VectorType & vec,
+ Number & res) const
+ {
+ res += weight * vector_access(vec, index);
+ }
+
+
+
+ void
+ post_constraints(const Number &sum, Number &write_pos) const
+ {
+ write_pos = sum;
+ }
+
+
+
+ void
+ process_empty(VectorizedArrayType &res) const
+ {
+ res = VectorizedArrayType();
+ }
+ };
+
+
+
+ // 2. A class to add values to the vector during
+ // FEEvaluation::distribute_local_to_global() call
+ template <typename Number, typename VectorizedArrayType>
+ struct VectorDistributorLocalToGlobal
+ {
+ template <typename VectorType>
+ void
+ process_dof(const unsigned int index, VectorType &vec, Number &res) const
+ {
+ vector_access_add(vec, index, res);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized(const unsigned int dofs_per_cell,
+ const unsigned int dof_index,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, true>) const
+ {
+ Number *vec_ptr = vec.begin() + dof_index;
+ for (unsigned int i = 0; i < dofs_per_cell;
+ ++i, vec_ptr += VectorizedArrayType::n_array_elements)
+ {
+ VectorizedArrayType tmp;
+ tmp.load(vec_ptr);
+ tmp += dof_values[i];
+ tmp.store(vec_ptr);
+ }
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized(const unsigned int dofs_per_cell,
+ const unsigned int dof_index,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int i = 0; i < dofs_per_cell; ++i)
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ vector_access_add(vec,
+ dof_index + v +
+ i * VectorizedArrayType::n_array_elements,
+ dof_values[i][v]);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
+ const unsigned int * dof_indices,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, true>) const
+ {
+ vectorized_transpose_and_store(
+ true, dofs_per_cell, dof_values, dof_indices, vec.begin());
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
+ const unsigned int * dof_indices,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int d = 0; d < dofs_per_cell; ++d)
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ vector_access_add(vec, dof_indices[v] + d, dof_values[d][v]);
+ }
+
+
+
+ // variant where VectorType::value_type is the same as Number -> can call
+ // scatter
+ template <typename VectorType>
+ void
+ process_dof_gather(const unsigned int * indices,
+ VectorType & vec,
+ const unsigned int constant_offset,
+ VectorizedArrayType &res,
+ std::integral_constant<bool, true>) const
+ {
+#if DEAL_II_COMPILER_VECTORIZATION_LEVEL < 3
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ vector_access(vec, indices[v] + constant_offset) += res[v];
+#else
+ // only use gather in case there is also scatter.
+ VectorizedArrayType tmp;
+ tmp.gather(vec.begin() + constant_offset, indices);
+ tmp += res;
+ tmp.scatter(indices, vec.begin() + constant_offset);
+#endif
+ }
+
+
+
+ // variant where VectorType::value_type is not the same as Number -> must
+ // manually append all data
+ template <typename VectorType>
+ void
+ process_dof_gather(const unsigned int * indices,
+ VectorType & vec,
+ const unsigned int constant_offset,
+ VectorizedArrayType &res,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ vector_access_add(vec, indices[v] + constant_offset, res[v]);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dof_global(const types::global_dof_index index,
+ VectorType & vec,
+ Number & res) const
+ {
+ vector_access_add_global(vec, index, res);
+ }
+
+
+
+ void
+ pre_constraints(const Number &input, Number &res) const
+ {
+ res = input;
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_constraint(const unsigned int index,
+ const Number weight,
+ VectorType & vec,
+ Number & res) const
+ {
+ vector_access_add(vec, index, weight * res);
+ }
+
+
+
+ void
+ post_constraints(const Number &, Number &) const
+ {}
+
+
+
+ void
+ process_empty(VectorizedArrayType &) const
+ {}
+ };
+
+
+
+ // 3. A class to set elements of the vector
+ template <typename Number, typename VectorizedArrayType>
+ struct VectorSetter
+ {
+ template <typename VectorType>
+ void
+ process_dof(const unsigned int index, VectorType &vec, Number &res) const
+ {
+ vector_access(vec, index) = res;
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized(const unsigned int dofs_per_cell,
+ const unsigned int dof_index,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, true>) const
+ {
+ Number *vec_ptr = vec.begin() + dof_index;
+ for (unsigned int i = 0; i < dofs_per_cell;
+ ++i, vec_ptr += VectorizedArrayType::n_array_elements)
+ dof_values[i].store(vec_ptr);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized(const unsigned int dofs_per_cell,
+ const unsigned int dof_index,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int i = 0; i < dofs_per_cell; ++i)
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ vector_access(vec,
+ dof_index + v +
+ i * VectorizedArrayType::n_array_elements) =
+ dof_values[i][v];
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
+ const unsigned int * dof_indices,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, true>) const
+ {
+ vectorized_transpose_and_store(
+ false, dofs_per_cell, dof_values, dof_indices, vec.begin());
+ }
+
+
+
+ template <typename VectorType, bool booltype>
+ void
+ process_dofs_vectorized_transpose(const unsigned int dofs_per_cell,
+ const unsigned int * dof_indices,
+ VectorType & vec,
+ VectorizedArrayType *dof_values,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int i = 0; i < dofs_per_cell; ++i)
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ vector_access(vec, dof_indices[v] + i) = dof_values[i][v];
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dof_gather(const unsigned int * indices,
+ VectorType & vec,
+ const unsigned int constant_offset,
+ VectorizedArrayType &res,
+ std::integral_constant<bool, true>) const
+ {
+ res.scatter(indices, vec.begin() + constant_offset);
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dof_gather(const unsigned int * indices,
+ VectorType & vec,
+ const unsigned int constant_offset,
+ VectorizedArrayType &res,
+ std::integral_constant<bool, false>) const
+ {
+ for (unsigned int v = 0; v < VectorizedArrayType::n_array_elements; ++v)
+ vector_access(vec, indices[v] + constant_offset) = res[v];
+ }
+
+
+
+ template <typename VectorType>
+ void
+ process_dof_global(const types::global_dof_index index,
+ VectorType & vec,
+ Number & res) const
+ {
+ vec(index) = res;
+ }
+
+
+
+ void
+ pre_constraints(const Number &, Number &) const
+ {}
+
+
+
+ template <typename VectorType>
+ void
+ process_constraint(const unsigned int,
+ const Number,
+ VectorType &,
+ Number &) const
+ {}
+
+
+
+ void
+ post_constraints(const Number &, Number &) const
+ {}
+
+
+
+ void
+ process_empty(VectorizedArrayType &) const
+ {}
+ };
+} // namespace internal
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif