From b5c8a08ec2ca64afbdbbe998bce026dbebbe6ef3 Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Thu, 20 Feb 2020 23:28:53 +0100 Subject: [PATCH] Add new vectorized_load_and_transpose and vectorized_transpose_and_store --- include/deal.II/base/vectorization.h | 781 ++++++++++++++++++++++++++- tests/base/vectorization_15.cc | 194 +++++++ tests/base/vectorization_15.output | 1 + 3 files changed, 969 insertions(+), 7 deletions(-) create mode 100644 tests/base/vectorization_15.cc create mode 100644 tests/base/vectorization_15.output diff --git a/include/deal.II/base/vectorization.h b/include/deal.II/base/vectorization.h index 24739e5a37..0ffc4c538a 100644 --- a/include/deal.II/base/vectorization.h +++ b/include/deal.II/base/vectorization.h @@ -22,6 +22,7 @@ #include #include +#include #include // Note: @@ -676,6 +677,31 @@ inline DEAL_II_ALWAYS_INLINE VectorizedArrayType +/** + * Load @p n_array_elements from memory into the the VectorizedArray @p out, + * starting at the given addresses and with given offset, each entry from the + * offset providing one element of the vectorized array. + * + * This operation corresponds to the following code: + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * out.data[v] = ptrs[v][offset]; + * @endcode + */ +template +inline DEAL_II_ALWAYS_INLINE void +gather(VectorizedArray & out, + const std::array &ptrs, + const unsigned int offset) +{ + static_assert(width == width_, "Length of input vector do not match!"); + + for (unsigned int v = 0; v < width_; v++) + out.data[v] = ptrs[v][offset]; +} + + + /** * This method loads VectorizedArray::n_array_elements data streams from the * given array @p in. The offsets to the input array are given by the array @p @@ -716,6 +742,33 @@ vectorized_load_and_transpose(const unsigned int n_entries, } +/** + * The same as above with the difference that an array of pointers are + * passed in as input argument @p in. + * + * In analogy to the function above, one can consider that + * `in+offset[v]` is precomputed and passed as input argument. + * + * However, this function can also be used if some function returns an array + * of pointers and no assumption can be made that they belong to the same array, + * i.e., they can have their origin in different memory allocations. + */ +template +inline DEAL_II_ALWAYS_INLINE void +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) +{ + static_assert(width == width_, "Length of input vector do not match!"); + + for (unsigned int i = 0; i < n_entries; ++i) + for (unsigned int v = 0; + v < VectorizedArray::n_array_elements; + ++v) + out[i][v] = in[v][i]; +} + + /** * This method stores the vectorized arrays in transposed form into the given @@ -778,6 +831,41 @@ vectorized_transpose_and_store(const bool add_into, } +/** + * The same as above with the difference that an array of pointers are + * passed in as input argument @p out. + * + * In analogy to the function above, one can consider that + * `out+offset[v]` is precomputed and passed as input argument. + * + * However, this function can also be used if some function returns an array + * of pointers and no assumption can be made that they belong to the same array, + * i.e., they can have their origin in different memory allocations. + */ +template +inline DEAL_II_ALWAYS_INLINE void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) +{ + static_assert(width == width_, "Length of input vector do not match!"); + + if (add_into) + for (unsigned int i = 0; i < n_entries; ++i) + for (unsigned int v = 0; + v < VectorizedArray::n_array_elements; + ++v) + out[v][i] += in[i][v]; + else + for (unsigned int i = 0; i < n_entries; ++i) + for (unsigned int v = 0; + v < VectorizedArray::n_array_elements; + ++v) + out[v][i] = in[i][v]; +} + + //@} #ifndef DOXYGEN @@ -1139,6 +1227,45 @@ vectorized_load_and_transpose(const unsigned int n_entries, +/** + * Specialization for double and AVX-512. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) +{ + const unsigned int n_chunks = n_entries / 4; + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m512d t0, t1, t2, t3 = {}; + + t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0); + t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1); + t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0); + t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1); + t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0); + t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1); + t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0); + t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1); + + __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88); + __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd); + __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88); + __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd); + out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2); + out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2); + out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3); + out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3); + } + + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + gather(out[i], in, i); +} + + + /** * Specialization for double and AVX-512. */ @@ -1222,6 +1349,84 @@ vectorized_transpose_and_store(const bool add_into, +/** + * Specialization for double and AVX-512. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) +{ + // see the comments in the vectorized_transpose_and_store above + + const unsigned int n_chunks = n_entries / 4; + __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0); + __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2); + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data); + __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data); + __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); + __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); + __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2); + __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2); + __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3); + __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3); + __m256d res0 = _mm512_extractf64x4_pd(v0, 0); + __m256d res4 = _mm512_extractf64x4_pd(v0, 1); + __m256d res1 = _mm512_extractf64x4_pd(v2, 0); + __m256d res5 = _mm512_extractf64x4_pd(v2, 1); + __m256d res2 = _mm512_extractf64x4_pd(v1, 0); + __m256d res6 = _mm512_extractf64x4_pd(v1, 1); + __m256d res3 = _mm512_extractf64x4_pd(v3, 0); + __m256d res7 = _mm512_extractf64x4_pd(v3, 1); + + if (add_into) + { + res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0); + _mm256_storeu_pd(out[0] + 4 * i, res0); + res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1); + _mm256_storeu_pd(out[1] + 4 * i, res1); + res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2); + _mm256_storeu_pd(out[2] + 4 * i, res2); + res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3); + _mm256_storeu_pd(out[3] + 4 * i, res3); + res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4); + _mm256_storeu_pd(out[4] + 4 * i, res4); + res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5); + _mm256_storeu_pd(out[5] + 4 * i, res5); + res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6); + _mm256_storeu_pd(out[6] + 4 * i, res6); + res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7); + _mm256_storeu_pd(out[7] + 4 * i, res7); + } + else + { + _mm256_storeu_pd(out[0] + 4 * i, res0); + _mm256_storeu_pd(out[1] + 4 * i, res1); + _mm256_storeu_pd(out[2] + 4 * i, res2); + _mm256_storeu_pd(out[3] + 4 * i, res3); + _mm256_storeu_pd(out[4] + 4 * i, res4); + _mm256_storeu_pd(out[5] + 4 * i, res5); + _mm256_storeu_pd(out[6] + 4 * i, res6); + _mm256_storeu_pd(out[7] + 4 * i, res7); + } + } + + if (add_into) + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 8; ++v) + out[v][i] += in[i][v]; + else + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 8; ++v) + out[v][i] = in[i][v]; +} + + + /** * Specialization for float and AVX512. */ @@ -1548,7 +1753,7 @@ vectorized_load_and_transpose(const unsigned int n_entries, // To avoid warnings about uninitialized variables, need to initialize one // variable to a pre-exisiting value in out, which will never get used in // the end. Keep the initialization outside the loop because of a bug in - // gcc-9 which generates a "vmovapd" instruction instead of "vmovupd" in + // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in // case t3 is initialized to zero (inside/outside of loop), see // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991 __m512 t0, t1, t2, t3; @@ -1586,8 +1791,59 @@ vectorized_load_and_transpose(const unsigned int n_entries, // remainder loop of work that does not divide by 4 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 8; ++v) - out[i].gather(in + i, offsets); + out[i].gather(in + i, offsets); +} + + + +/** + * Specialization for float and AVX-512. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) +{ + // see the comments in the vectorized_load_and_transpose above + + const unsigned int n_chunks = n_entries / 4; + + __m512 t0, t1, t2, t3; + if (n_chunks > 0) + t3 = out[0].data; + for (unsigned int i = 0; i < n_chunks; ++i) + { + t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3); + t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3); + t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3); + + __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44); + __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee); + __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44); + __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee); + + out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88); + out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd); + out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88); + out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd); + } + + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + gather(out[i], in, i); } @@ -1704,6 +1960,117 @@ vectorized_transpose_and_store(const bool add_into, out[offsets[v] + i] = in[i][v]; } + + +/** + * Specialization for float and AVX-512. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) +{ + // see the comments in the vectorized_transpose_and_store above + + const unsigned int n_chunks = n_entries / 4; + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44); + __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee); + __m512 t2 = + _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44); + __m512 t3 = + _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee); + __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88); + __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd); + __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88); + __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd); + + __m128 res0 = _mm512_extractf32x4_ps(u0, 0); + __m128 res4 = _mm512_extractf32x4_ps(u0, 1); + __m128 res8 = _mm512_extractf32x4_ps(u0, 2); + __m128 res12 = _mm512_extractf32x4_ps(u0, 3); + __m128 res1 = _mm512_extractf32x4_ps(u1, 0); + __m128 res5 = _mm512_extractf32x4_ps(u1, 1); + __m128 res9 = _mm512_extractf32x4_ps(u1, 2); + __m128 res13 = _mm512_extractf32x4_ps(u1, 3); + __m128 res2 = _mm512_extractf32x4_ps(u2, 0); + __m128 res6 = _mm512_extractf32x4_ps(u2, 1); + __m128 res10 = _mm512_extractf32x4_ps(u2, 2); + __m128 res14 = _mm512_extractf32x4_ps(u2, 3); + __m128 res3 = _mm512_extractf32x4_ps(u3, 0); + __m128 res7 = _mm512_extractf32x4_ps(u3, 1); + __m128 res11 = _mm512_extractf32x4_ps(u3, 2); + __m128 res15 = _mm512_extractf32x4_ps(u3, 3); + + if (add_into) + { + res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0); + _mm_storeu_ps(out[0] + 4 * i, res0); + res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1); + _mm_storeu_ps(out[1] + 4 * i, res1); + res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2); + _mm_storeu_ps(out[2] + 4 * i, res2); + res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3); + _mm_storeu_ps(out[3] + 4 * i, res3); + res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4); + _mm_storeu_ps(out[4] + 4 * i, res4); + res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5); + _mm_storeu_ps(out[5] + 4 * i, res5); + res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6); + _mm_storeu_ps(out[6] + 4 * i, res6); + res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7); + _mm_storeu_ps(out[7] + 4 * i, res7); + res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8); + _mm_storeu_ps(out[8] + 4 * i, res8); + res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9); + _mm_storeu_ps(out[9] + 4 * i, res9); + res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10); + _mm_storeu_ps(out[10] + 4 * i, res10); + res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11); + _mm_storeu_ps(out[11] + 4 * i, res11); + res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12); + _mm_storeu_ps(out[12] + 4 * i, res12); + res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13); + _mm_storeu_ps(out[13] + 4 * i, res13); + res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14); + _mm_storeu_ps(out[14] + 4 * i, res14); + res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15); + _mm_storeu_ps(out[15] + 4 * i, res15); + } + else + { + _mm_storeu_ps(out[0] + 4 * i, res0); + _mm_storeu_ps(out[1] + 4 * i, res1); + _mm_storeu_ps(out[2] + 4 * i, res2); + _mm_storeu_ps(out[3] + 4 * i, res3); + _mm_storeu_ps(out[4] + 4 * i, res4); + _mm_storeu_ps(out[5] + 4 * i, res5); + _mm_storeu_ps(out[6] + 4 * i, res6); + _mm_storeu_ps(out[7] + 4 * i, res7); + _mm_storeu_ps(out[8] + 4 * i, res8); + _mm_storeu_ps(out[9] + 4 * i, res9); + _mm_storeu_ps(out[10] + 4 * i, res10); + _mm_storeu_ps(out[11] + 4 * i, res11); + _mm_storeu_ps(out[12] + 4 * i, res12); + _mm_storeu_ps(out[13] + 4 * i, res13); + _mm_storeu_ps(out[14] + 4 * i, res14); + _mm_storeu_ps(out[15] + 4 * i, res15); + } + } + + if (add_into) + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 16; ++v) + out[v][i] += in[i][v]; + else + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 16; ++v) + out[v][i] = in[i][v]; +} + # endif # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__) @@ -2045,6 +2412,45 @@ vectorized_load_and_transpose(const unsigned int n_entries, +/** + * Specialization for double and AVX. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) +{ + // see the comments in the vectorized_load_and_transpose above + + const unsigned int n_chunks = n_entries / 4; + const double * in0 = in[0]; + const double * in1 = in[1]; + const double * in2 = in[2]; + const double * in3 = in[3]; + + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m256d u0 = _mm256_loadu_pd(in0 + 4 * i); + __m256d u1 = _mm256_loadu_pd(in1 + 4 * i); + __m256d u2 = _mm256_loadu_pd(in2 + 4 * i); + __m256d u3 = _mm256_loadu_pd(in3 + 4 * i); + __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20); + __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20); + __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31); + __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31); + out[4 * i + 0].data = _mm256_unpacklo_pd(t0, t1); + out[4 * i + 1].data = _mm256_unpackhi_pd(t0, t1); + out[4 * i + 2].data = _mm256_unpacklo_pd(t2, t3); + out[4 * i + 3].data = _mm256_unpackhi_pd(t2, t3); + } + + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + gather(out[i], in, i); +} + + + /** * Specialization for double and AVX. */ @@ -2113,13 +2519,81 @@ vectorized_transpose_and_store(const bool add_into, /** - * Specialization for float and AVX. + * Specialization for double and AVX. */ template <> -class VectorizedArray - : public VectorizedArrayBase> +inline DEAL_II_ALWAYS_INLINE void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) { -public: + // see the comments in the vectorized_transpose_and_store above + + const unsigned int n_chunks = n_entries / 4; + double * out0 = out[0]; + double * out1 = out[1]; + double * out2 = out[2]; + double * out3 = out[3]; + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m256d u0 = in[4 * i + 0].data; + __m256d u1 = in[4 * i + 1].data; + __m256d u2 = in[4 * i + 2].data; + __m256d u3 = in[4 * i + 3].data; + __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20); + __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20); + __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31); + __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31); + __m256d res0 = _mm256_unpacklo_pd(t0, t1); + __m256d res1 = _mm256_unpackhi_pd(t0, t1); + __m256d res2 = _mm256_unpacklo_pd(t2, t3); + __m256d res3 = _mm256_unpackhi_pd(t2, t3); + + // Cannot use the same store instructions in both paths of the 'if' + // because the compiler cannot know that there is no aliasing between + // pointers + if (add_into) + { + res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0); + _mm256_storeu_pd(out0 + 4 * i, res0); + res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1); + _mm256_storeu_pd(out1 + 4 * i, res1); + res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2); + _mm256_storeu_pd(out2 + 4 * i, res2); + res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3); + _mm256_storeu_pd(out3 + 4 * i, res3); + } + else + { + _mm256_storeu_pd(out0 + 4 * i, res0); + _mm256_storeu_pd(out1 + 4 * i, res1); + _mm256_storeu_pd(out2 + 4 * i, res2); + _mm256_storeu_pd(out3 + 4 * i, res3); + } + } + + // remainder loop of work that does not divide by 4 + if (add_into) + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 4; ++v) + out[v][i] += in[i][v]; + else + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 4; ++v) + out[v][i] = in[i][v]; +} + + + +/** + * Specialization for float and AVX. + */ +template <> +class VectorizedArray + : public VectorizedArrayBase> +{ +public: /** * This gives the type of the array elements. */ @@ -2452,6 +2926,46 @@ vectorized_load_and_transpose(const unsigned int n_entries, +/** + * Specialization for float and AVX. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) +{ + // see the comments in the vectorized_load_and_transpose above + + const unsigned int n_chunks = n_entries / 4; + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m256 t0, t1, t2, t3 = {}; + t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[0] + 4 * i), 0); + t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in[4] + 4 * i), 1); + t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[1] + 4 * i), 0); + t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in[5] + 4 * i), 1); + t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[2] + 4 * i), 0); + t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in[6] + 4 * i), 1); + t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[3] + 4 * i), 0); + t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in[7] + 4 * i), 1); + + __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44); + __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee); + __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44); + __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee); + out[4 * i + 0].data = _mm256_shuffle_ps(v0, v2, 0x88); + out[4 * i + 1].data = _mm256_shuffle_ps(v0, v2, 0xdd); + out[4 * i + 2].data = _mm256_shuffle_ps(v1, v3, 0x88); + out[4 * i + 3].data = _mm256_shuffle_ps(v1, v3, 0xdd); + } + + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + gather(out[i], in, i); +} + + + /** * Specialization for float and AVX. */ @@ -2533,6 +3047,86 @@ vectorized_transpose_and_store(const bool add_into, out[offsets[v] + i] = in[i][v]; } + + +/** + * Specialization for float and AVX. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) +{ + // see the comments in the vectorized_transpose_and_store above + + const unsigned int n_chunks = n_entries / 4; + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m256 u0 = in[4 * i + 0].data; + __m256 u1 = in[4 * i + 1].data; + __m256 u2 = in[4 * i + 2].data; + __m256 u3 = in[4 * i + 3].data; + __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44); + __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee); + __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44); + __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee); + u0 = _mm256_shuffle_ps(t0, t2, 0x88); + u1 = _mm256_shuffle_ps(t0, t2, 0xdd); + u2 = _mm256_shuffle_ps(t1, t3, 0x88); + u3 = _mm256_shuffle_ps(t1, t3, 0xdd); + __m128 res0 = _mm256_extractf128_ps(u0, 0); + __m128 res4 = _mm256_extractf128_ps(u0, 1); + __m128 res1 = _mm256_extractf128_ps(u1, 0); + __m128 res5 = _mm256_extractf128_ps(u1, 1); + __m128 res2 = _mm256_extractf128_ps(u2, 0); + __m128 res6 = _mm256_extractf128_ps(u2, 1); + __m128 res3 = _mm256_extractf128_ps(u3, 0); + __m128 res7 = _mm256_extractf128_ps(u3, 1); + + if (add_into) + { + res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0); + _mm_storeu_ps(out[0] + 4 * i, res0); + res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1); + _mm_storeu_ps(out[1] + 4 * i, res1); + res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2); + _mm_storeu_ps(out[2] + 4 * i, res2); + res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3); + _mm_storeu_ps(out[3] + 4 * i, res3); + res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4); + _mm_storeu_ps(out[4] + 4 * i, res4); + res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5); + _mm_storeu_ps(out[5] + 4 * i, res5); + res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6); + _mm_storeu_ps(out[6] + 4 * i, res6); + res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7); + _mm_storeu_ps(out[7] + 4 * i, res7); + } + else + { + _mm_storeu_ps(out[0] + 4 * i, res0); + _mm_storeu_ps(out[1] + 4 * i, res1); + _mm_storeu_ps(out[2] + 4 * i, res2); + _mm_storeu_ps(out[3] + 4 * i, res3); + _mm_storeu_ps(out[4] + 4 * i, res4); + _mm_storeu_ps(out[5] + 4 * i, res5); + _mm_storeu_ps(out[6] + 4 * i, res6); + _mm_storeu_ps(out[7] + 4 * i, res7); + } + } + + if (add_into) + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 8; ++v) + out[v][i] += in[i][v]; + else + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 8; ++v) + out[v][i] = in[i][v]; +} + # endif # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__) @@ -2848,6 +3442,33 @@ vectorized_load_and_transpose(const unsigned int n_entries, +/** + * Specialization for double and SSE2. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) +{ + // see the comments in the vectorized_load_and_transpose above + + const unsigned int n_chunks = n_entries / 2; + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m128d u0 = _mm_loadu_pd(in[0] + 2 * i); + __m128d u1 = _mm_loadu_pd(in[1] + 2 * i); + out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1); + out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1); + } + + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[i][v] = in[v][i]; +} + + + /** * Specialization for double and SSE2. */ @@ -2900,6 +3521,57 @@ vectorized_transpose_and_store(const bool add_into, +/** + * Specialization for double and SSE2. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) +{ + // see the comments in the vectorized_transpose_and_store above + + const unsigned int n_chunks = n_entries / 2; + if (add_into) + { + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m128d u0 = in[2 * i + 0].data; + __m128d u1 = in[2 * i + 1].data; + __m128d res0 = _mm_unpacklo_pd(u0, u1); + __m128d res1 = _mm_unpackhi_pd(u0, u1); + _mm_storeu_pd(out[0] + 2 * i, + _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0)); + _mm_storeu_pd(out[1] + 2 * i, + _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1)); + } + + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[v][i] += in[i][v]; + } + else + { + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m128d u0 = in[2 * i + 0].data; + __m128d u1 = in[2 * i + 1].data; + __m128d res0 = _mm_unpacklo_pd(u0, u1); + __m128d res1 = _mm_unpackhi_pd(u0, u1); + _mm_storeu_pd(out[0] + 2 * i, res0); + _mm_storeu_pd(out[1] + 2 * i, res1); + } + + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[v][i] = in[i][v]; + } +} + + + /** * Specialization for float and SSE2. */ @@ -3219,6 +3891,41 @@ vectorized_load_and_transpose(const unsigned int n_entries, +/** + * Specialization for float and SSE2. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) +{ + // see the comments in the vectorized_load_and_transpose above + + const unsigned int n_chunks = n_entries / 4; + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m128 u0 = _mm_loadu_ps(in[0] + 4 * i); + __m128 u1 = _mm_loadu_ps(in[1] + 4 * i); + __m128 u2 = _mm_loadu_ps(in[2] + 4 * i); + __m128 u3 = _mm_loadu_ps(in[3] + 4 * i); + __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44); + __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee); + __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44); + __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee); + out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88); + out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd); + out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88); + out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd); + } + + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 4; ++v) + out[i][v] = in[v][i]; +} + + + /** * Specialization for float and SSE2. */ @@ -3282,6 +3989,66 @@ vectorized_transpose_and_store(const bool add_into, +/** + * Specialization for float and SSE2. + */ +template <> +inline DEAL_II_ALWAYS_INLINE void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) +{ + // see the comments in the vectorized_transpose_and_store above + + const unsigned int n_chunks = n_entries / 4; + for (unsigned int i = 0; i < n_chunks; ++i) + { + __m128 u0 = in[4 * i + 0].data; + __m128 u1 = in[4 * i + 1].data; + __m128 u2 = in[4 * i + 2].data; + __m128 u3 = in[4 * i + 3].data; + __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44); + __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee); + __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44); + __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee); + u0 = _mm_shuffle_ps(t0, t2, 0x88); + u1 = _mm_shuffle_ps(t0, t2, 0xdd); + u2 = _mm_shuffle_ps(t1, t3, 0x88); + u3 = _mm_shuffle_ps(t1, t3, 0xdd); + + if (add_into) + { + u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0); + _mm_storeu_ps(out[0] + 4 * i, u0); + u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1); + _mm_storeu_ps(out[1] + 4 * i, u1); + u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2); + _mm_storeu_ps(out[2] + 4 * i, u2); + u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3); + _mm_storeu_ps(out[3] + 4 * i, u3); + } + else + { + _mm_storeu_ps(out[0] + 4 * i, u0); + _mm_storeu_ps(out[1] + 4 * i, u1); + _mm_storeu_ps(out[2] + 4 * i, u2); + _mm_storeu_ps(out[3] + 4 * i, u3); + } + } + + if (add_into) + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 4; ++v) + out[v][i] += in[i][v]; + else + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 4; ++v) + out[v][i] = in[i][v]; +} + + + # endif // if DEAL_II_COMPILER_VECTORIZATION_LEVEL > 0 && defined(__SSE2__) # if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__ALTIVEC__) && \ diff --git a/tests/base/vectorization_15.cc b/tests/base/vectorization_15.cc new file mode 100644 index 0000000000..38a5b23a6a --- /dev/null +++ b/tests/base/vectorization_15.cc @@ -0,0 +1,194 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2015 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// test transpose operations of vectorized array using the array+offset method +// for the set of all supported vectorization widths (otherwise the same as +// vectorization_10) + +#include + +#include + +#include "../tests.h" + + +template +void +do_test() +{ + // since the number of array elements is system dependent, it is not a good + // idea to print them to an output file. Instead, check the values manually + const unsigned int n_vectors = + VectorizedArray::n_array_elements; + VectorizedArray arr[n_numbers]; + Number other[n_vectors * n_numbers]; + unsigned int offsets[n_vectors]; + for (unsigned int v = 0; v < n_vectors; ++v) + offsets[v] = v * n_numbers; + + std::array other_and_offset; + for (unsigned int v = 0; v < width; v++) + other_and_offset[v] = other + offsets[v]; + + for (unsigned int i = 0; i < n_vectors; ++i) + for (unsigned int j = 0; j < n_numbers; ++j) + other[i * n_numbers + j] = i * n_numbers + j; + + vectorized_load_and_transpose(n_numbers, + other_and_offset, + arr); + unsigned int n_errors = 0; + for (unsigned int j = 0; j < n_numbers; ++j) + for (unsigned int i = 0; i < n_vectors; ++i) + if (arr[j][i] != i * n_numbers + j) + ++n_errors; + if (n_errors > 0) + { + deallog << "load_and_transpose at n=" << n_numbers + << " width=" << width << ": #errors: " << n_errors << std::endl; + + for (unsigned int i = 0; i < n_numbers; ++i) + { + for (unsigned int j = 0; j < n_vectors; ++j) + deallog << arr[i][j] << " "; + deallog << std::endl; + } + } + + vectorized_transpose_and_store(true, + n_numbers, + arr, + other_and_offset); + n_errors = 0; + for (unsigned int i = 0; i < n_vectors; ++i) + for (unsigned int j = 0; j < n_numbers; ++j) + if (other[i * n_numbers + j] != 2. * (i * n_numbers + j)) + ++n_errors; + if (n_errors > 0) + { + deallog << "transpose_and_store ( add) at n=" << n_numbers + << " width=" << width << ": #errors: " << n_errors << std::endl; + + for (unsigned int i = 0; i < n_vectors; ++i) + { + for (unsigned int j = 0; j < n_numbers; ++j) + deallog << other[i * n_numbers + j] << " "; + deallog << std::endl; + } + } + + vectorized_transpose_and_store(false, + n_numbers, + arr, + other_and_offset); + n_errors = 0; + for (unsigned int i = 0; i < n_vectors; ++i) + for (unsigned int j = 0; j < n_numbers; ++j) + if (other[i * n_numbers + j] != (i * n_numbers + j)) + ++n_errors; + if (n_errors > 0) + { + deallog << "transpose_and_store (noadd) at n=" << n_numbers + << " width=" << width << ": #errors: " << n_errors << std::endl; + + for (unsigned int i = 0; i < n_vectors; ++i) + { + for (unsigned int j = 0; j < n_numbers; ++j) + deallog << other[i * n_numbers + j] << " "; + deallog << std::endl; + } + } +} + + +template +struct Tester +{ + static void + test() + { + do_test(); + } +}; + +template +struct Tester +{ + static void + test() + { +#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__) + do_test(); +#endif + +#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__) + do_test(); +#endif + +#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__) + do_test(); +#endif + + do_test(); + } +}; + +template +struct Tester +{ + static void + test() + { +#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 3 && defined(__AVX512F__) + do_test(); +#endif + +#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__) + do_test(); +#endif + +#if DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 1 && defined(__SSE2__) + do_test(); +#endif + + do_test(); + } +}; + + +int +main() +{ + initlog(); + + deallog.push("double"); + Tester::test(); + Tester::test(); + Tester::test(); + deallog.pop(); + deallog.push("float"); + Tester::test(); + Tester::test(); + Tester::test(); + deallog.pop(); + + // test long double: in that case, the default + // path of VectorizedArray is taken no matter + // what was done for double or float + deallog.push("long double"); + Tester::test(); + deallog.pop(); +} diff --git a/tests/base/vectorization_15.output b/tests/base/vectorization_15.output new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/tests/base/vectorization_15.output @@ -0,0 +1 @@ + -- 2.39.5