]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add new vectorized_load_and_transpose and vectorized_transpose_and_store 9556/head
authorPeter Munch <peterrmuench@gmail.com>
Thu, 20 Feb 2020 22:28:53 +0000 (23:28 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Tue, 3 Mar 2020 10:14:00 +0000 (11:14 +0100)
include/deal.II/base/vectorization.h
tests/base/vectorization_15.cc [new file with mode: 0644]
tests/base/vectorization_15.output [new file with mode: 0644]

index 24739e5a37c6bb4c77c73ea55c807d19bd95219c..0ffc4c538a8824546562f68f891da1493a643374 100644 (file)
@@ -22,6 +22,7 @@
 #include <deal.II/base/exceptions.h>
 #include <deal.II/base/template_constraints.h>
 
+#include <array>
 #include <cmath>
 
 // Note:
@@ -676,6 +677,31 @@ inline DEAL_II_ALWAYS_INLINE VectorizedArrayType
 
 
 
+/**
+ * Load @p n_array_elements from memory into the the VectorizedArray @p out,
+ * starting at the given addresses and with given offset, each entry from the
+ * offset providing one element of the vectorized array.
+ *
+ * This operation corresponds to the following code:
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ *   out.data[v] = ptrs[v][offset];
+ * @endcode
+ */
+template <typename Number, int width, std::size_t width_>
+inline DEAL_II_ALWAYS_INLINE void
+gather(VectorizedArray<Number, width> &    out,
+       const std::array<Number *, width_> &ptrs,
+       const unsigned int                  offset)
+{
+  static_assert(width == width_, "Length of input vector do not match!");
+
+  for (unsigned int v = 0; v < width_; v++)
+    out.data[v] = ptrs[v][offset];
+}
+
+
+
 /**
  * This method loads VectorizedArray::n_array_elements data streams from the
  * given array @p in. The offsets to the input array are given by the array @p
@@ -716,6 +742,33 @@ vectorized_load_and_transpose(const unsigned int              n_entries,
 }
 
 
+/**
+ * The same as above with the difference that an array of pointers are
+ * passed in as input argument @p in.
+ *
+ * In analogy to the function above, one can consider that
+ * `in+offset[v]` is precomputed and passed as input argument.
+ *
+ * However, this function can also be used if some function returns an array
+ * of pointers and no assumption can be made that they belong to the same array,
+ * i.e., they can have their origin in different memory allocations.
+ */
+template <typename Number, int width, std::size_t width_>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_load_and_transpose(const unsigned int                  n_entries,
+                              const std::array<Number *, width_> &in,
+                              VectorizedArray<Number, width> *    out)
+{
+  static_assert(width == width_, "Length of input vector do not match!");
+
+  for (unsigned int i = 0; i < n_entries; ++i)
+    for (unsigned int v = 0;
+         v < VectorizedArray<Number, width>::n_array_elements;
+         ++v)
+      out[i][v] = in[v][i];
+}
+
+
 
 /**
  * This method stores the vectorized arrays in transposed form into the given
@@ -778,6 +831,41 @@ vectorized_transpose_and_store(const bool                            add_into,
 }
 
 
+/**
+ * The same as above with the difference that an array of pointers are
+ * passed in as input argument @p out.
+ *
+ * In analogy to the function above, one can consider that
+ * `out+offset[v]` is precomputed and passed as input argument.
+ *
+ * However, this function can also be used if some function returns an array
+ * of pointers and no assumption can be made that they belong to the same array,
+ * i.e., they can have their origin in different memory allocations.
+ */
+template <typename Number, int width, std::size_t width_>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_transpose_and_store(const bool                            add_into,
+                               const unsigned int                    n_entries,
+                               const VectorizedArray<Number, width> *in,
+                               std::array<Number *, width_> &        out)
+{
+  static_assert(width == width_, "Length of input vector do not match!");
+
+  if (add_into)
+    for (unsigned int i = 0; i < n_entries; ++i)
+      for (unsigned int v = 0;
+           v < VectorizedArray<Number, width>::n_array_elements;
+           ++v)
+        out[v][i] += in[i][v];
+  else
+    for (unsigned int i = 0; i < n_entries; ++i)
+      for (unsigned int v = 0;
+           v < VectorizedArray<Number, width>::n_array_elements;
+           ++v)
+        out[v][i] = in[i][v];
+}
+
+
 //@}
 
 #ifndef DOXYGEN
@@ -1139,6 +1227,45 @@ vectorized_load_and_transpose(const unsigned int          n_entries,
 
 
 
+/**
+ * Specialization for double and AVX-512.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_load_and_transpose(const unsigned int             n_entries,
+                              const std::array<double *, 8> &in,
+                              VectorizedArray<double, 8> *   out)
+{
+  const unsigned int n_chunks = n_entries / 4;
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m512d t0, t1, t2, t3 = {};
+
+      t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
+      t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
+      t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
+      t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
+      t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
+      t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
+      t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
+      t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
+
+      __m512d v0          = _mm512_shuffle_f64x2(t0, t2, 0x88);
+      __m512d v1          = _mm512_shuffle_f64x2(t0, t2, 0xdd);
+      __m512d v2          = _mm512_shuffle_f64x2(t1, t3, 0x88);
+      __m512d v3          = _mm512_shuffle_f64x2(t1, t3, 0xdd);
+      out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
+      out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
+      out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
+      out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
+    }
+
+  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+    gather(out[i], in, i);
+}
+
+
+
 /**
  * Specialization for double and AVX-512.
  */
@@ -1222,6 +1349,84 @@ vectorized_transpose_and_store(const bool                        add_into,
 
 
 
+/**
+ * Specialization for double and AVX-512.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_transpose_and_store(const bool                        add_into,
+                               const unsigned int                n_entries,
+                               const VectorizedArray<double, 8> *in,
+                               std::array<double *, 8> &         out)
+{
+  // see the comments in the vectorized_transpose_and_store above
+
+  const unsigned int n_chunks = n_entries / 4;
+  __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
+  __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m512d t0   = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
+      __m512d t1   = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
+      __m512d t2   = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
+      __m512d t3   = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
+      __m512d v0   = _mm512_permutex2var_pd(t0, mask1, t2);
+      __m512d v1   = _mm512_permutex2var_pd(t0, mask2, t2);
+      __m512d v2   = _mm512_permutex2var_pd(t1, mask1, t3);
+      __m512d v3   = _mm512_permutex2var_pd(t1, mask2, t3);
+      __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
+      __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
+      __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
+      __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
+      __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
+      __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
+      __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
+      __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
+
+      if (add_into)
+        {
+          res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
+          _mm256_storeu_pd(out[0] + 4 * i, res0);
+          res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
+          _mm256_storeu_pd(out[1] + 4 * i, res1);
+          res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
+          _mm256_storeu_pd(out[2] + 4 * i, res2);
+          res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
+          _mm256_storeu_pd(out[3] + 4 * i, res3);
+          res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
+          _mm256_storeu_pd(out[4] + 4 * i, res4);
+          res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
+          _mm256_storeu_pd(out[5] + 4 * i, res5);
+          res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
+          _mm256_storeu_pd(out[6] + 4 * i, res6);
+          res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
+          _mm256_storeu_pd(out[7] + 4 * i, res7);
+        }
+      else
+        {
+          _mm256_storeu_pd(out[0] + 4 * i, res0);
+          _mm256_storeu_pd(out[1] + 4 * i, res1);
+          _mm256_storeu_pd(out[2] + 4 * i, res2);
+          _mm256_storeu_pd(out[3] + 4 * i, res3);
+          _mm256_storeu_pd(out[4] + 4 * i, res4);
+          _mm256_storeu_pd(out[5] + 4 * i, res5);
+          _mm256_storeu_pd(out[6] + 4 * i, res6);
+          _mm256_storeu_pd(out[7] + 4 * i, res7);
+        }
+    }
+
+  if (add_into)
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 8; ++v)
+        out[v][i] += in[i][v];
+  else
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 8; ++v)
+        out[v][i] = in[i][v];
+}
+
+
+
 /**
  * Specialization for float and AVX512.
  */
@@ -1548,7 +1753,7 @@ vectorized_load_and_transpose(const unsigned int          n_entries,
   // To avoid warnings about uninitialized variables, need to initialize one
   // variable to a pre-exisiting value in out, which will never get used in
   // the end. Keep the initialization outside the loop because of a bug in
-  // gcc-9 which generates a "vmovapd" instruction instead of "vmovupd" in
+  // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
   // case t3 is initialized to zero (inside/outside of loop), see
   // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
   __m512 t0, t1, t2, t3;
@@ -1586,8 +1791,59 @@ vectorized_load_and_transpose(const unsigned int          n_entries,
 
   // remainder loop of work that does not divide by 4
   for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
-    for (unsigned int v = 0; v < 8; ++v)
-      out[i].gather(in + i, offsets);
+    out[i].gather(in + i, offsets);
+}
+
+
+
+/**
+ * Specialization for float and AVX-512.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_load_and_transpose(const unsigned int             n_entries,
+                              const std::array<float *, 16> &in,
+                              VectorizedArray<float, 16> *   out)
+{
+  // see the comments in the vectorized_load_and_transpose above
+
+  const unsigned int n_chunks = n_entries / 4;
+
+  __m512 t0, t1, t2, t3;
+  if (n_chunks > 0)
+    t3 = out[0].data;
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
+      t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
+      t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
+      t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
+      t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
+      t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
+      t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
+      t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
+      t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
+      t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
+      t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
+      t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
+      t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
+      t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
+      t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
+      t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
+
+      __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
+      __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
+      __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
+      __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
+
+      out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
+      out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
+      out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
+      out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
+    }
+
+  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+    gather(out[i], in, i);
 }
 
 
@@ -1704,6 +1960,117 @@ vectorized_transpose_and_store(const bool                        add_into,
         out[offsets[v] + i] = in[i][v];
 }
 
+
+
+/**
+ * Specialization for float and AVX-512.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_transpose_and_store(const bool                        add_into,
+                               const unsigned int                n_entries,
+                               const VectorizedArray<float, 16> *in,
+                               std::array<float *, 16> &         out)
+{
+  // see the comments in the vectorized_transpose_and_store above
+
+  const unsigned int n_chunks = n_entries / 4;
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
+      __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
+      __m512 t2 =
+        _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
+      __m512 t3 =
+        _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
+      __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
+      __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
+      __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
+      __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
+
+      __m128 res0  = _mm512_extractf32x4_ps(u0, 0);
+      __m128 res4  = _mm512_extractf32x4_ps(u0, 1);
+      __m128 res8  = _mm512_extractf32x4_ps(u0, 2);
+      __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
+      __m128 res1  = _mm512_extractf32x4_ps(u1, 0);
+      __m128 res5  = _mm512_extractf32x4_ps(u1, 1);
+      __m128 res9  = _mm512_extractf32x4_ps(u1, 2);
+      __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
+      __m128 res2  = _mm512_extractf32x4_ps(u2, 0);
+      __m128 res6  = _mm512_extractf32x4_ps(u2, 1);
+      __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
+      __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
+      __m128 res3  = _mm512_extractf32x4_ps(u3, 0);
+      __m128 res7  = _mm512_extractf32x4_ps(u3, 1);
+      __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
+      __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
+
+      if (add_into)
+        {
+          res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
+          _mm_storeu_ps(out[0] + 4 * i, res0);
+          res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
+          _mm_storeu_ps(out[1] + 4 * i, res1);
+          res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
+          _mm_storeu_ps(out[2] + 4 * i, res2);
+          res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
+          _mm_storeu_ps(out[3] + 4 * i, res3);
+          res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
+          _mm_storeu_ps(out[4] + 4 * i, res4);
+          res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
+          _mm_storeu_ps(out[5] + 4 * i, res5);
+          res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
+          _mm_storeu_ps(out[6] + 4 * i, res6);
+          res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
+          _mm_storeu_ps(out[7] + 4 * i, res7);
+          res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
+          _mm_storeu_ps(out[8] + 4 * i, res8);
+          res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
+          _mm_storeu_ps(out[9] + 4 * i, res9);
+          res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
+          _mm_storeu_ps(out[10] + 4 * i, res10);
+          res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
+          _mm_storeu_ps(out[11] + 4 * i, res11);
+          res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
+          _mm_storeu_ps(out[12] + 4 * i, res12);
+          res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
+          _mm_storeu_ps(out[13] + 4 * i, res13);
+          res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
+          _mm_storeu_ps(out[14] + 4 * i, res14);
+          res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
+          _mm_storeu_ps(out[15] + 4 * i, res15);
+        }
+      else
+        {
+          _mm_storeu_ps(out[0] + 4 * i, res0);
+          _mm_storeu_ps(out[1] + 4 * i, res1);
+          _mm_storeu_ps(out[2] + 4 * i, res2);
+          _mm_storeu_ps(out[3] + 4 * i, res3);
+          _mm_storeu_ps(out[4] + 4 * i, res4);
+          _mm_storeu_ps(out[5] + 4 * i, res5);
+          _mm_storeu_ps(out[6] + 4 * i, res6);
+          _mm_storeu_ps(out[7] + 4 * i, res7);
+          _mm_storeu_ps(out[8] + 4 * i, res8);
+          _mm_storeu_ps(out[9] + 4 * i, res9);
+          _mm_storeu_ps(out[10] + 4 * i, res10);
+          _mm_storeu_ps(out[11] + 4 * i, res11);
+          _mm_storeu_ps(out[12] + 4 * i, res12);
+          _mm_storeu_ps(out[13] + 4 * i, res13);
+          _mm_storeu_ps(out[14] + 4 * i, res14);
+          _mm_storeu_ps(out[15] + 4 * i, res15);
+        }
+    }
+
+  if (add_into)
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 16; ++v)
+        out[v][i] += in[i][v];
+  else
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 16; ++v)
+        out[v][i] = in[i][v];
+}
+
 #  endif
 
 #  if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
@@ -2045,6 +2412,45 @@ vectorized_load_and_transpose(const unsigned int          n_entries,
 
 
 
+/**
+ * Specialization for double and AVX.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_load_and_transpose(const unsigned int             n_entries,
+                              const std::array<double *, 4> &in,
+                              VectorizedArray<double, 4> *   out)
+{
+  // see the comments in the vectorized_load_and_transpose above
+
+  const unsigned int n_chunks = n_entries / 4;
+  const double *     in0      = in[0];
+  const double *     in1      = in[1];
+  const double *     in2      = in[2];
+  const double *     in3      = in[3];
+
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m256d u0          = _mm256_loadu_pd(in0 + 4 * i);
+      __m256d u1          = _mm256_loadu_pd(in1 + 4 * i);
+      __m256d u2          = _mm256_loadu_pd(in2 + 4 * i);
+      __m256d u3          = _mm256_loadu_pd(in3 + 4 * i);
+      __m256d t0          = _mm256_permute2f128_pd(u0, u2, 0x20);
+      __m256d t1          = _mm256_permute2f128_pd(u1, u3, 0x20);
+      __m256d t2          = _mm256_permute2f128_pd(u0, u2, 0x31);
+      __m256d t3          = _mm256_permute2f128_pd(u1, u3, 0x31);
+      out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1);
+      out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1);
+      out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3);
+      out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3);
+    }
+
+  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+    gather(out[i], in, i);
+}
+
+
+
 /**
  * Specialization for double and AVX.
  */
@@ -2113,13 +2519,81 @@ vectorized_transpose_and_store(const bool                        add_into,
 
 
 /**
- * Specialization for float and AVX.
+ * Specialization for double and AVX.
  */
 template <>
-class VectorizedArray<float, 8>
-  : public VectorizedArrayBase<VectorizedArray<float, 8>>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_transpose_and_store(const bool                        add_into,
+                               const unsigned int                n_entries,
+                               const VectorizedArray<double, 4> *in,
+                               std::array<double *, 4> &         out)
 {
-public:
+  // see the comments in the vectorized_transpose_and_store above
+
+  const unsigned int n_chunks = n_entries / 4;
+  double *           out0     = out[0];
+  double *           out1     = out[1];
+  double *           out2     = out[2];
+  double *           out3     = out[3];
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m256d u0   = in[4 * i + 0].data;
+      __m256d u1   = in[4 * i + 1].data;
+      __m256d u2   = in[4 * i + 2].data;
+      __m256d u3   = in[4 * i + 3].data;
+      __m256d t0   = _mm256_permute2f128_pd(u0, u2, 0x20);
+      __m256d t1   = _mm256_permute2f128_pd(u1, u3, 0x20);
+      __m256d t2   = _mm256_permute2f128_pd(u0, u2, 0x31);
+      __m256d t3   = _mm256_permute2f128_pd(u1, u3, 0x31);
+      __m256d res0 = _mm256_unpacklo_pd(t0, t1);
+      __m256d res1 = _mm256_unpackhi_pd(t0, t1);
+      __m256d res2 = _mm256_unpacklo_pd(t2, t3);
+      __m256d res3 = _mm256_unpackhi_pd(t2, t3);
+
+      // Cannot use the same store instructions in both paths of the 'if'
+      // because the compiler cannot know that there is no aliasing between
+      // pointers
+      if (add_into)
+        {
+          res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
+          _mm256_storeu_pd(out0 + 4 * i, res0);
+          res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
+          _mm256_storeu_pd(out1 + 4 * i, res1);
+          res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
+          _mm256_storeu_pd(out2 + 4 * i, res2);
+          res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
+          _mm256_storeu_pd(out3 + 4 * i, res3);
+        }
+      else
+        {
+          _mm256_storeu_pd(out0 + 4 * i, res0);
+          _mm256_storeu_pd(out1 + 4 * i, res1);
+          _mm256_storeu_pd(out2 + 4 * i, res2);
+          _mm256_storeu_pd(out3 + 4 * i, res3);
+        }
+    }
+
+  // remainder loop of work that does not divide by 4
+  if (add_into)
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 4; ++v)
+        out[v][i] += in[i][v];
+  else
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 4; ++v)
+        out[v][i] = in[i][v];
+}
+
+
+
+/**
+ * Specialization for float and AVX.
+ */
+template <>
+class VectorizedArray<float, 8>
+  : public VectorizedArrayBase<VectorizedArray<float, 8>>
+{
+public:
   /**
    * This gives the type of the array elements.
    */
@@ -2452,6 +2926,46 @@ vectorized_load_and_transpose(const unsigned int         n_entries,
 
 
 
+/**
+ * Specialization for float and AVX.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_load_and_transpose(const unsigned int            n_entries,
+                              const std::array<float *, 8> &in,
+                              VectorizedArray<float, 8> *   out)
+{
+  // see the comments in the vectorized_load_and_transpose above
+
+  const unsigned int n_chunks = n_entries / 4;
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m256 t0, t1, t2, t3 = {};
+      t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
+      t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
+      t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
+      t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
+      t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
+      t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
+      t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
+      t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
+
+      __m256 v0           = _mm256_shuffle_ps(t0, t1, 0x44);
+      __m256 v1           = _mm256_shuffle_ps(t0, t1, 0xee);
+      __m256 v2           = _mm256_shuffle_ps(t2, t3, 0x44);
+      __m256 v3           = _mm256_shuffle_ps(t2, t3, 0xee);
+      out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88);
+      out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd);
+      out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88);
+      out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd);
+    }
+
+  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+    gather(out[i], in, i);
+}
+
+
+
 /**
  * Specialization for float and AVX.
  */
@@ -2533,6 +3047,86 @@ vectorized_transpose_and_store(const bool                       add_into,
         out[offsets[v] + i] = in[i][v];
 }
 
+
+
+/**
+ * Specialization for float and AVX.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_transpose_and_store(const bool                       add_into,
+                               const unsigned int               n_entries,
+                               const VectorizedArray<float, 8> *in,
+                               std::array<float *, 8> &         out)
+{
+  // see the comments in the vectorized_transpose_and_store above
+
+  const unsigned int n_chunks = n_entries / 4;
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m256 u0   = in[4 * i + 0].data;
+      __m256 u1   = in[4 * i + 1].data;
+      __m256 u2   = in[4 * i + 2].data;
+      __m256 u3   = in[4 * i + 3].data;
+      __m256 t0   = _mm256_shuffle_ps(u0, u1, 0x44);
+      __m256 t1   = _mm256_shuffle_ps(u0, u1, 0xee);
+      __m256 t2   = _mm256_shuffle_ps(u2, u3, 0x44);
+      __m256 t3   = _mm256_shuffle_ps(u2, u3, 0xee);
+      u0          = _mm256_shuffle_ps(t0, t2, 0x88);
+      u1          = _mm256_shuffle_ps(t0, t2, 0xdd);
+      u2          = _mm256_shuffle_ps(t1, t3, 0x88);
+      u3          = _mm256_shuffle_ps(t1, t3, 0xdd);
+      __m128 res0 = _mm256_extractf128_ps(u0, 0);
+      __m128 res4 = _mm256_extractf128_ps(u0, 1);
+      __m128 res1 = _mm256_extractf128_ps(u1, 0);
+      __m128 res5 = _mm256_extractf128_ps(u1, 1);
+      __m128 res2 = _mm256_extractf128_ps(u2, 0);
+      __m128 res6 = _mm256_extractf128_ps(u2, 1);
+      __m128 res3 = _mm256_extractf128_ps(u3, 0);
+      __m128 res7 = _mm256_extractf128_ps(u3, 1);
+
+      if (add_into)
+        {
+          res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
+          _mm_storeu_ps(out[0] + 4 * i, res0);
+          res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
+          _mm_storeu_ps(out[1] + 4 * i, res1);
+          res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
+          _mm_storeu_ps(out[2] + 4 * i, res2);
+          res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
+          _mm_storeu_ps(out[3] + 4 * i, res3);
+          res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
+          _mm_storeu_ps(out[4] + 4 * i, res4);
+          res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
+          _mm_storeu_ps(out[5] + 4 * i, res5);
+          res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
+          _mm_storeu_ps(out[6] + 4 * i, res6);
+          res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
+          _mm_storeu_ps(out[7] + 4 * i, res7);
+        }
+      else
+        {
+          _mm_storeu_ps(out[0] + 4 * i, res0);
+          _mm_storeu_ps(out[1] + 4 * i, res1);
+          _mm_storeu_ps(out[2] + 4 * i, res2);
+          _mm_storeu_ps(out[3] + 4 * i, res3);
+          _mm_storeu_ps(out[4] + 4 * i, res4);
+          _mm_storeu_ps(out[5] + 4 * i, res5);
+          _mm_storeu_ps(out[6] + 4 * i, res6);
+          _mm_storeu_ps(out[7] + 4 * i, res7);
+        }
+    }
+
+  if (add_into)
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 8; ++v)
+        out[v][i] += in[i][v];
+  else
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 8; ++v)
+        out[v][i] = in[i][v];
+}
+
 #  endif
 
 #  if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
@@ -2848,6 +3442,33 @@ vectorized_load_and_transpose(const unsigned int          n_entries,
 
 
 
+/**
+ * Specialization for double and SSE2.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_load_and_transpose(const unsigned int             n_entries,
+                              const std::array<double *, 2> &in,
+                              VectorizedArray<double, 2> *   out)
+{
+  // see the comments in the vectorized_load_and_transpose above
+
+  const unsigned int n_chunks = n_entries / 2;
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m128d u0          = _mm_loadu_pd(in[0] + 2 * i);
+      __m128d u1          = _mm_loadu_pd(in[1] + 2 * i);
+      out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
+      out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
+    }
+
+  for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+    for (unsigned int v = 0; v < 2; ++v)
+      out[i][v] = in[v][i];
+}
+
+
+
 /**
  * Specialization for double and SSE2.
  */
@@ -2900,6 +3521,57 @@ vectorized_transpose_and_store(const bool                        add_into,
 
 
 
+/**
+ * Specialization for double and SSE2.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_transpose_and_store(const bool                        add_into,
+                               const unsigned int                n_entries,
+                               const VectorizedArray<double, 2> *in,
+                               std::array<double *, 2> &         out)
+{
+  // see the comments in the vectorized_transpose_and_store above
+
+  const unsigned int n_chunks = n_entries / 2;
+  if (add_into)
+    {
+      for (unsigned int i = 0; i < n_chunks; ++i)
+        {
+          __m128d u0   = in[2 * i + 0].data;
+          __m128d u1   = in[2 * i + 1].data;
+          __m128d res0 = _mm_unpacklo_pd(u0, u1);
+          __m128d res1 = _mm_unpackhi_pd(u0, u1);
+          _mm_storeu_pd(out[0] + 2 * i,
+                        _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
+          _mm_storeu_pd(out[1] + 2 * i,
+                        _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
+        }
+
+      for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+        for (unsigned int v = 0; v < 2; ++v)
+          out[v][i] += in[i][v];
+    }
+  else
+    {
+      for (unsigned int i = 0; i < n_chunks; ++i)
+        {
+          __m128d u0   = in[2 * i + 0].data;
+          __m128d u1   = in[2 * i + 1].data;
+          __m128d res0 = _mm_unpacklo_pd(u0, u1);
+          __m128d res1 = _mm_unpackhi_pd(u0, u1);
+          _mm_storeu_pd(out[0] + 2 * i, res0);
+          _mm_storeu_pd(out[1] + 2 * i, res1);
+        }
+
+      for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+        for (unsigned int v = 0; v < 2; ++v)
+          out[v][i] = in[i][v];
+    }
+}
+
+
+
 /**
  * Specialization for float and SSE2.
  */
@@ -3219,6 +3891,41 @@ vectorized_load_and_transpose(const unsigned int         n_entries,
 
 
 
+/**
+ * Specialization for float and SSE2.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_load_and_transpose(const unsigned int            n_entries,
+                              const std::array<float *, 4> &in,
+                              VectorizedArray<float, 4> *   out)
+{
+  // see the comments in the vectorized_load_and_transpose above
+
+  const unsigned int n_chunks = n_entries / 4;
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m128 u0           = _mm_loadu_ps(in[0] + 4 * i);
+      __m128 u1           = _mm_loadu_ps(in[1] + 4 * i);
+      __m128 u2           = _mm_loadu_ps(in[2] + 4 * i);
+      __m128 u3           = _mm_loadu_ps(in[3] + 4 * i);
+      __m128 v0           = _mm_shuffle_ps(u0, u1, 0x44);
+      __m128 v1           = _mm_shuffle_ps(u0, u1, 0xee);
+      __m128 v2           = _mm_shuffle_ps(u2, u3, 0x44);
+      __m128 v3           = _mm_shuffle_ps(u2, u3, 0xee);
+      out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
+      out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
+      out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
+      out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
+    }
+
+  for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+    for (unsigned int v = 0; v < 4; ++v)
+      out[i][v] = in[v][i];
+}
+
+
+
 /**
  * Specialization for float and SSE2.
  */
@@ -3282,6 +3989,66 @@ vectorized_transpose_and_store(const bool                       add_into,
 
 
 
+/**
+ * Specialization for float and SSE2.
+ */
+template <>
+inline DEAL_II_ALWAYS_INLINE void
+vectorized_transpose_and_store(const bool                       add_into,
+                               const unsigned int               n_entries,
+                               const VectorizedArray<float, 4> *in,
+                               std::array<float *, 4> &         out)
+{
+  // see the comments in the vectorized_transpose_and_store above
+
+  const unsigned int n_chunks = n_entries / 4;
+  for (unsigned int i = 0; i < n_chunks; ++i)
+    {
+      __m128 u0 = in[4 * i + 0].data;
+      __m128 u1 = in[4 * i + 1].data;
+      __m128 u2 = in[4 * i + 2].data;
+      __m128 u3 = in[4 * i + 3].data;
+      __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
+      __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
+      __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
+      __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
+      u0        = _mm_shuffle_ps(t0, t2, 0x88);
+      u1        = _mm_shuffle_ps(t0, t2, 0xdd);
+      u2        = _mm_shuffle_ps(t1, t3, 0x88);
+      u3        = _mm_shuffle_ps(t1, t3, 0xdd);
+
+      if (add_into)
+        {
+          u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
+          _mm_storeu_ps(out[0] + 4 * i, u0);
+          u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
+          _mm_storeu_ps(out[1] + 4 * i, u1);
+          u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
+          _mm_storeu_ps(out[2] + 4 * i, u2);
+          u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
+          _mm_storeu_ps(out[3] + 4 * i, u3);
+        }
+      else
+        {
+          _mm_storeu_ps(out[0] + 4 * i, u0);
+          _mm_storeu_ps(out[1] + 4 * i, u1);
+          _mm_storeu_ps(out[2] + 4 * i, u2);
+          _mm_storeu_ps(out[3] + 4 * i, u3);
+        }
+    }
+
+  if (add_into)
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 4; ++v)
+        out[v][i] += in[i][v];
+  else
+    for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+      for (unsigned int v = 0; v < 4; ++v)
+        out[v][i] = in[i][v];
+}
+
+
+
 #  endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 && defined(__SSE2__)
 
 #  if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__ALTIVEC__) && \
diff --git a/tests/base/vectorization_15.cc b/tests/base/vectorization_15.cc
new file mode 100644 (file)
index 0000000..38a5b23
--- /dev/null
@@ -0,0 +1,194 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+// test transpose operations of vectorized array using the array+offset method
+// for the set of all supported vectorization widths (otherwise the same as
+// vectorization_10)
+
+#include <deal.II/base/vectorization.h>
+
+#include <limits>
+
+#include "../tests.h"
+
+
+template <typename Number, int n_numbers, int width>
+void
+do_test()
+{
+  // since the number of array elements is system dependent, it is not a good
+  // idea to print them to an output file. Instead, check the values manually
+  const unsigned int n_vectors =
+    VectorizedArray<Number, width>::n_array_elements;
+  VectorizedArray<Number, width> arr[n_numbers];
+  Number                         other[n_vectors * n_numbers];
+  unsigned int                   offsets[n_vectors];
+  for (unsigned int v = 0; v < n_vectors; ++v)
+    offsets[v] = v * n_numbers;
+
+  std::array<Number *, width> other_and_offset;
+  for (unsigned int v = 0; v < width; v++)
+    other_and_offset[v] = other + offsets[v];
+
+  for (unsigned int i = 0; i < n_vectors; ++i)
+    for (unsigned int j = 0; j < n_numbers; ++j)
+      other[i * n_numbers + j] = i * n_numbers + j;
+
+  vectorized_load_and_transpose<Number, width>(n_numbers,
+                                               other_and_offset,
+                                               arr);
+  unsigned int n_errors = 0;
+  for (unsigned int j = 0; j < n_numbers; ++j)
+    for (unsigned int i = 0; i < n_vectors; ++i)
+      if (arr[j][i] != i * n_numbers + j)
+        ++n_errors;
+  if (n_errors > 0)
+    {
+      deallog << "load_and_transpose at          n=" << n_numbers
+              << " width=" << width << ": #errors: " << n_errors << std::endl;
+
+      for (unsigned int i = 0; i < n_numbers; ++i)
+        {
+          for (unsigned int j = 0; j < n_vectors; ++j)
+            deallog << arr[i][j] << " ";
+          deallog << std::endl;
+        }
+    }
+
+  vectorized_transpose_and_store<Number, width>(true,
+                                                n_numbers,
+                                                arr,
+                                                other_and_offset);
+  n_errors = 0;
+  for (unsigned int i = 0; i < n_vectors; ++i)
+    for (unsigned int j = 0; j < n_numbers; ++j)
+      if (other[i * n_numbers + j] != 2. * (i * n_numbers + j))
+        ++n_errors;
+  if (n_errors > 0)
+    {
+      deallog << "transpose_and_store (  add) at n=" << n_numbers
+              << " width=" << width << ": #errors: " << n_errors << std::endl;
+
+      for (unsigned int i = 0; i < n_vectors; ++i)
+        {
+          for (unsigned int j = 0; j < n_numbers; ++j)
+            deallog << other[i * n_numbers + j] << " ";
+          deallog << std::endl;
+        }
+    }
+
+  vectorized_transpose_and_store<Number, width>(false,
+                                                n_numbers,
+                                                arr,
+                                                other_and_offset);
+  n_errors = 0;
+  for (unsigned int i = 0; i < n_vectors; ++i)
+    for (unsigned int j = 0; j < n_numbers; ++j)
+      if (other[i * n_numbers + j] != (i * n_numbers + j))
+        ++n_errors;
+  if (n_errors > 0)
+    {
+      deallog << "transpose_and_store (noadd) at n=" << n_numbers
+              << " width=" << width << ": #errors: " << n_errors << std::endl;
+
+      for (unsigned int i = 0; i < n_vectors; ++i)
+        {
+          for (unsigned int j = 0; j < n_numbers; ++j)
+            deallog << other[i * n_numbers + j] << " ";
+          deallog << std::endl;
+        }
+    }
+}
+
+
+template <typename Number, int n_numbers>
+struct Tester
+{
+  static void
+  test()
+  {
+    do_test<Number, n_numbers, 1>();
+  }
+};
+
+template <int n_numbers>
+struct Tester<double, n_numbers>
+{
+  static void
+  test()
+  {
+#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
+    do_test<double, n_numbers, 8>();
+#endif
+
+#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
+    do_test<double, n_numbers, 4>();
+#endif
+
+#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
+    do_test<double, n_numbers, 2>();
+#endif
+
+    do_test<double, n_numbers, 1>();
+  }
+};
+
+template <int n_numbers>
+struct Tester<float, n_numbers>
+{
+  static void
+  test()
+  {
+#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__)
+    do_test<float, n_numbers, 16>();
+#endif
+
+#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
+    do_test<float, n_numbers, 8>();
+#endif
+
+#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__)
+    do_test<float, n_numbers, 4>();
+#endif
+
+    do_test<float, n_numbers, 1>();
+  }
+};
+
+
+int
+main()
+{
+  initlog();
+
+  deallog.push("double");
+  Tester<double, 1>::test();
+  Tester<double, 9>::test();
+  Tester<double, 32>::test();
+  deallog.pop();
+  deallog.push("float");
+  Tester<float, 1>::test();
+  Tester<float, 17>::test();
+  Tester<float, 32>::test();
+  deallog.pop();
+
+  // test long double: in that case, the default
+  // path of VectorizedArray is taken no matter
+  // what was done for double or float
+  deallog.push("long double");
+  Tester<long double, 4>::test();
+  deallog.pop();
+}
diff --git a/tests/base/vectorization_15.output b/tests/base/vectorization_15.output
new file mode 100644 (file)
index 0000000..8b13789
--- /dev/null
@@ -0,0 +1 @@
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.