From: Maximilian Bergbauer Date: Wed, 24 May 2023 20:38:14 +0000 (+0200) Subject: Rearrange specializations and implement sum inline X-Git-Tag: v9.5.0-rc1~196^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ba78f77f58ac656521e50a24ac3205b1314d28b3;p=dealii.git Rearrange specializations and implement sum inline --- diff --git a/include/deal.II/base/vectorization.h b/include/deal.II/base/vectorization.h index 2100c84335..69c9d5c773 100644 --- a/include/deal.II/base/vectorization.h +++ b/include/deal.II/base/vectorization.h @@ -966,17 +966,14 @@ vectorized_transpose_and_store(const bool add_into, #ifndef DOXYGEN -// for safety, also check that __AVX512F__ is defined in case the user manually -// set some conflicting compile flags which prevent compilation - -# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__) +# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__) /** - * Specialization of VectorizedArray class for double and AVX-512. + * Specialization for double and SSE2. */ template <> -class VectorizedArray - : public VectorizedArrayBase, 8> +class VectorizedArray + : public VectorizedArrayBase, 2> { public: /** @@ -1003,7 +1000,7 @@ public: */ template VectorizedArray(const std::initializer_list &list) - : VectorizedArrayBase, 8>(list) + : VectorizedArrayBase, 2>(list) {} /** @@ -1013,11 +1010,10 @@ public: VectorizedArray & operator=(const double x) & { - data = _mm512_set1_pd(x); + data = _mm_set1_pd(x); return *this; } - /** * Assign a scalar to the current object. This overload is used for * rvalue references; because it does not make sense to assign @@ -1033,7 +1029,7 @@ public: double & operator[](const unsigned int comp) { - AssertIndexRange(comp, 8); + AssertIndexRange(comp, 2); return *(reinterpret_cast(&data) + comp); } @@ -1044,7 +1040,7 @@ public: const double & operator[](const unsigned int comp) const { - AssertIndexRange(comp, 8); + AssertIndexRange(comp, 2); return *(reinterpret_cast(&data) + comp); } @@ -1055,15 +1051,10 @@ public: VectorizedArray & operator+=(const VectorizedArray &vec) { - // if the compiler supports vector arithmetic, we can simply use += - // operator on the given data type. this allows the compiler to combine - // additions with multiplication (fused multiply-add) if those - // instructions are available. Otherwise, we need to use the built-in - // intrinsic command for __m512d # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data += vec.data; # else - data = _mm512_add_pd(data, vec.data); + data = _mm_add_pd(data, vec.data); # endif return *this; } @@ -1078,10 +1069,11 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data -= vec.data; # else - data = _mm512_sub_pd(data, vec.data); + data = _mm_sub_pd(data, vec.data); # endif return *this; } + /** * Multiplication. */ @@ -1092,7 +1084,7 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data *= vec.data; # else - data = _mm512_mul_pd(data, vec.data); + data = _mm_mul_pd(data, vec.data); # endif return *this; } @@ -1107,61 +1099,65 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data /= vec.data; # else - data = _mm512_div_pd(data, vec.data); + data = _mm_div_pd(data, vec.data); # endif return *this; } /** - * Load size() data items from memory into the calling class, starting at - * the given address. The memory need not be aligned by 64 bytes, as opposed + * Load @p size() from memory into the calling class, starting at + * the given address. The memory need not be aligned by 16 bytes, as opposed * to casting a double address to VectorizedArray*. */ DEAL_II_ALWAYS_INLINE void load(const double *ptr) { - data = _mm512_loadu_pd(ptr); + data = _mm_loadu_pd(ptr); } DEAL_II_ALWAYS_INLINE void load(const float *ptr) { - data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr)); + DEAL_II_OPENMP_SIMD_PRAGMA + for (unsigned int i = 0; i < 2; ++i) + data[i] = ptr[i]; } /** * Write the content of the calling class into memory in form of @p * size() to the given address. The memory need not be aligned by - * 64 bytes, as opposed to casting a double address to + * 16 bytes, as opposed to casting a double address to * VectorizedArray*. */ DEAL_II_ALWAYS_INLINE void store(double *ptr) const { - _mm512_storeu_pd(ptr, data); + _mm_storeu_pd(ptr, data); } DEAL_II_ALWAYS_INLINE void store(float *ptr) const { - _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data)); + DEAL_II_OPENMP_SIMD_PRAGMA + for (unsigned int i = 0; i < 2; ++i) + ptr[i] = data[i]; } /** * @copydoc VectorizedArray::streaming_store() - * @note Memory must be aligned by 64 bytes. + * @note Memory must be aligned by 16 bytes. */ DEAL_II_ALWAYS_INLINE void streaming_store(double *ptr) const { - Assert(reinterpret_cast(ptr) % 64 == 0, + Assert(reinterpret_cast(ptr) % 16 == 0, ExcMessage("Memory not aligned")); - _mm512_stream_pd(ptr, data); + _mm_stream_pd(ptr, data); } /** @@ -1180,20 +1176,8 @@ public: void gather(const double *base_ptr, const unsigned int *offsets) { - // unfortunately, there does not appear to be a 256 bit integer load, so - // do it by some reinterpret casts here. this is allowed because the Intel - // API allows aliasing between different vector types. - const __m256 index_val = - _mm256_loadu_ps(reinterpret_cast(offsets)); - const __m256i index = *reinterpret_cast(&index_val); - - // work around a warning with gcc-12 about an uninitialized initial state - // for gather by starting with a zero guess, even though all lanes will be - // overwritten - __m512d zero = {}; - __mmask8 mask = 0xFF; - - data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8); + for (unsigned int i = 0; i < 2; ++i) + *(reinterpret_cast(&data) + i) = base_ptr[offsets[i]]; } /** @@ -1212,19 +1196,8 @@ public: void scatter(const unsigned int *offsets, double *base_ptr) const { - for (unsigned int i = 0; i < 8; ++i) - for (unsigned int j = i + 1; j < 8; ++j) - Assert(offsets[i] != offsets[j], - ExcMessage("Result of scatter undefined if two offset elements" - " point to the same position")); - - // unfortunately, there does not appear to be a 256 bit integer load, so - // do it by some reinterpret casts here. this is allowed because the Intel - // API allows aliasing between different vector types. - const __m256 index_val = - _mm256_loadu_ps(reinterpret_cast(offsets)); - const __m256i index = *reinterpret_cast(&index_val); - _mm512_i32scatter_pd(base_ptr, index, data, 8); + for (unsigned int i = 0; i < 2; ++i) + base_ptr[offsets[i]] = *(reinterpret_cast(&data) + i); } /** @@ -1232,36 +1205,21 @@ public: * this->data[i]$. */ double - sum(); + sum() + { + __m128d t1 = _mm_unpackhi_pd(data, data); + __m128d t2 = _mm_add_pd(data, t1); + return _mm_cvtsd_f64(t2); + } /** * Actual data field. To be consistent with the standard layout type and to * enable interaction with external SIMD functionality, this member is * declared public. */ - __m512d data; + __m128d data; private: - /** - * Extract lower half of data field. - */ - DEAL_II_ALWAYS_INLINE - __m256d - get_lower() const - { - return _mm512_castpd512_pd256(data); - } - - /** - * Extract upper half of data field. - */ - DEAL_II_ALWAYS_INLINE - __m256d - get_upper() const - { - return _mm512_extractf64x4_pd(data, 1); - } - /** * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. @@ -1271,7 +1229,7 @@ private: get_sqrt() const { VectorizedArray res; - res.data = _mm512_sqrt_pd(data); + res.data = _mm_sqrt_pd(data); return res; } @@ -1283,16 +1241,13 @@ private: VectorizedArray get_abs() const { - // to compute the absolute value, perform bitwise andnot with -0. This - // will leave all value and exponent bits unchanged but force the sign - // value to +. Since there is no andnot for AVX512, we interpret the data - // as 64 bit integers and do the andnot on those types (note that andnot - // is a bitwise operation so the data type does not matter) - __m512d mask = _mm512_set1_pd(-0.); + // to compute the absolute value, perform + // bitwise andnot with -0. This will leave all + // value and exponent bits unchanged but force + // the sign value to +. + __m128d mask = _mm_set1_pd(-0.); VectorizedArray res; - res.data = reinterpret_cast<__m512d>( - _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask), - reinterpret_cast<__m512i>(data))); + res.data = _mm_andnot_pd(mask, data); return res; } @@ -1305,7 +1260,7 @@ private: get_max(const VectorizedArray &other) const { VectorizedArray res; - res.data = _mm512_max_pd(data, other.data); + res.data = _mm_max_pd(data, other.data); return res; } @@ -1318,7 +1273,7 @@ private: get_min(const VectorizedArray &other) const { VectorizedArray res; - res.data = _mm512_min_pd(data, other.data); + res.data = _mm_min_pd(data, other.data); return res; } @@ -1342,256 +1297,168 @@ private: /** - * Specialization for double and AVX-512. + * Specialization for double and SSE2. */ template <> inline DEAL_II_ALWAYS_INLINE void vectorized_load_and_transpose(const unsigned int n_entries, const double * in, const unsigned int * offsets, - VectorizedArray *out) + VectorizedArray *out) { - // do not do full transpose because the code is long and will most - // likely not pay off because many processors have two load units - // (for the top 8 instructions) but only 1 permute unit (for the 8 - // shuffle/unpack instructions). rather start the transposition on the - // vectorized array of half the size with 256 bits - const unsigned int n_chunks = n_entries / 4; + const unsigned int n_chunks = n_entries / 2; for (unsigned int i = 0; i < n_chunks; ++i) { - __m512d t0, t1, t2, t3 = {}; - - t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0); - t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1); - t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0); - t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1); - t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0); - t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1); - t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0); - t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1); - - __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88); - __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd); - __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88); - __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd); - out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2); - out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2); - out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3); - out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3); + __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]); + __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]); + out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1); + out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1); } - // remainder loop of work that does not divide by 4 - for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - out[i].gather(in + i, offsets); + + // remainder loop of work that does not divide by 2 + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[i][v] = in[offsets[v] + i]; } /** - * Specialization for double and AVX-512. + * Specialization for double and SSE2. */ template <> inline DEAL_II_ALWAYS_INLINE void vectorized_load_and_transpose(const unsigned int n_entries, - const std::array &in, - VectorizedArray * out) + const std::array &in, + VectorizedArray * out) { - const unsigned int n_chunks = n_entries / 4; + // see the comments in the vectorized_load_and_transpose above + + const unsigned int n_chunks = n_entries / 2; for (unsigned int i = 0; i < n_chunks; ++i) { - __m512d t0, t1, t2, t3 = {}; - - t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0); - t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1); - t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0); - t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1); - t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0); - t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1); - t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0); - t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1); - - __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88); - __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd); - __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88); - __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd); - out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2); - out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2); - out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3); - out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3); + __m128d u0 = _mm_loadu_pd(in[0] + 2 * i); + __m128d u1 = _mm_loadu_pd(in[1] + 2 * i); + out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1); + out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1); } - for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - gather(out[i], in, i); + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[i][v] = in[v][i]; } /** - * Specialization for double and AVX-512. + * Specialization for double and SSE2. */ template <> inline DEAL_II_ALWAYS_INLINE void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, - const VectorizedArray *in, + const VectorizedArray *in, const unsigned int * offsets, double * out) { - // as for the load, we split the store operations into 256 bit units to - // better balance between code size, shuffle instructions, and stores - const unsigned int n_chunks = n_entries / 4; - __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0); - __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2); - for (unsigned int i = 0; i < n_chunks; ++i) + const unsigned int n_chunks = n_entries / 2; + if (add_into) { - __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data); - __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data); - __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); - __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); - __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2); - __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2); - __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3); - __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3); - __m256d res0 = _mm512_extractf64x4_pd(v0, 0); - __m256d res4 = _mm512_extractf64x4_pd(v0, 1); - __m256d res1 = _mm512_extractf64x4_pd(v2, 0); - __m256d res5 = _mm512_extractf64x4_pd(v2, 1); - __m256d res2 = _mm512_extractf64x4_pd(v1, 0); - __m256d res6 = _mm512_extractf64x4_pd(v1, 1); - __m256d res3 = _mm512_extractf64x4_pd(v3, 0); - __m256d res7 = _mm512_extractf64x4_pd(v3, 1); - - // Cannot use the same store instructions in both paths of the 'if' - // because the compiler cannot know that there is no aliasing - // between pointers - if (add_into) + for (unsigned int i = 0; i < n_chunks; ++i) { - res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0); - _mm256_storeu_pd(out + 4 * i + offsets[0], res0); - res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1); - _mm256_storeu_pd(out + 4 * i + offsets[1], res1); - res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2); - _mm256_storeu_pd(out + 4 * i + offsets[2], res2); - res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3); - _mm256_storeu_pd(out + 4 * i + offsets[3], res3); - res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4); - _mm256_storeu_pd(out + 4 * i + offsets[4], res4); - res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5); - _mm256_storeu_pd(out + 4 * i + offsets[5], res5); - res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6); - _mm256_storeu_pd(out + 4 * i + offsets[6], res6); - res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7); - _mm256_storeu_pd(out + 4 * i + offsets[7], res7); + __m128d u0 = in[2 * i + 0].data; + __m128d u1 = in[2 * i + 1].data; + __m128d res0 = _mm_unpacklo_pd(u0, u1); + __m128d res1 = _mm_unpackhi_pd(u0, u1); + _mm_storeu_pd(out + 2 * i + offsets[0], + _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]), + res0)); + _mm_storeu_pd(out + 2 * i + offsets[1], + _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]), + res1)); } - else + // remainder loop of work that does not divide by 2 + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[offsets[v] + i] += in[i][v]; + } + else + { + for (unsigned int i = 0; i < n_chunks; ++i) { - _mm256_storeu_pd(out + 4 * i + offsets[0], res0); - _mm256_storeu_pd(out + 4 * i + offsets[1], res1); - _mm256_storeu_pd(out + 4 * i + offsets[2], res2); - _mm256_storeu_pd(out + 4 * i + offsets[3], res3); - _mm256_storeu_pd(out + 4 * i + offsets[4], res4); - _mm256_storeu_pd(out + 4 * i + offsets[5], res5); - _mm256_storeu_pd(out + 4 * i + offsets[6], res6); - _mm256_storeu_pd(out + 4 * i + offsets[7], res7); + __m128d u0 = in[2 * i + 0].data; + __m128d u1 = in[2 * i + 1].data; + __m128d res0 = _mm_unpacklo_pd(u0, u1); + __m128d res1 = _mm_unpackhi_pd(u0, u1); + _mm_storeu_pd(out + 2 * i + offsets[0], res0); + _mm_storeu_pd(out + 2 * i + offsets[1], res1); } + // remainder loop of work that does not divide by 2 + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[offsets[v] + i] = in[i][v]; } - - // remainder loop of work that does not divide by 4 - if (add_into) - for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 8; ++v) - out[offsets[v] + i] += in[i][v]; - else - for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 8; ++v) - out[offsets[v] + i] = in[i][v]; } /** - * Specialization for double and AVX-512. + * Specialization for double and SSE2. */ template <> inline DEAL_II_ALWAYS_INLINE void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, - const VectorizedArray *in, - std::array & out) + const VectorizedArray *in, + std::array & out) { // see the comments in the vectorized_transpose_and_store above - const unsigned int n_chunks = n_entries / 4; - __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0); - __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2); - for (unsigned int i = 0; i < n_chunks; ++i) + const unsigned int n_chunks = n_entries / 2; + if (add_into) { - __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data); - __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data); - __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); - __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); - __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2); - __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2); - __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3); - __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3); - __m256d res0 = _mm512_extractf64x4_pd(v0, 0); - __m256d res4 = _mm512_extractf64x4_pd(v0, 1); - __m256d res1 = _mm512_extractf64x4_pd(v2, 0); - __m256d res5 = _mm512_extractf64x4_pd(v2, 1); - __m256d res2 = _mm512_extractf64x4_pd(v1, 0); - __m256d res6 = _mm512_extractf64x4_pd(v1, 1); - __m256d res3 = _mm512_extractf64x4_pd(v3, 0); - __m256d res7 = _mm512_extractf64x4_pd(v3, 1); - - if (add_into) + for (unsigned int i = 0; i < n_chunks; ++i) { - res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0); - _mm256_storeu_pd(out[0] + 4 * i, res0); - res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1); - _mm256_storeu_pd(out[1] + 4 * i, res1); - res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2); - _mm256_storeu_pd(out[2] + 4 * i, res2); - res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3); - _mm256_storeu_pd(out[3] + 4 * i, res3); - res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4); - _mm256_storeu_pd(out[4] + 4 * i, res4); - res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5); - _mm256_storeu_pd(out[5] + 4 * i, res5); - res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6); - _mm256_storeu_pd(out[6] + 4 * i, res6); - res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7); - _mm256_storeu_pd(out[7] + 4 * i, res7); + __m128d u0 = in[2 * i + 0].data; + __m128d u1 = in[2 * i + 1].data; + __m128d res0 = _mm_unpacklo_pd(u0, u1); + __m128d res1 = _mm_unpackhi_pd(u0, u1); + _mm_storeu_pd(out[0] + 2 * i, + _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0)); + _mm_storeu_pd(out[1] + 2 * i, + _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1)); } - else + + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[v][i] += in[i][v]; + } + else + { + for (unsigned int i = 0; i < n_chunks; ++i) { - _mm256_storeu_pd(out[0] + 4 * i, res0); - _mm256_storeu_pd(out[1] + 4 * i, res1); - _mm256_storeu_pd(out[2] + 4 * i, res2); - _mm256_storeu_pd(out[3] + 4 * i, res3); - _mm256_storeu_pd(out[4] + 4 * i, res4); - _mm256_storeu_pd(out[5] + 4 * i, res5); - _mm256_storeu_pd(out[6] + 4 * i, res6); - _mm256_storeu_pd(out[7] + 4 * i, res7); + __m128d u0 = in[2 * i + 0].data; + __m128d u1 = in[2 * i + 1].data; + __m128d res0 = _mm_unpacklo_pd(u0, u1); + __m128d res1 = _mm_unpackhi_pd(u0, u1); + _mm_storeu_pd(out[0] + 2 * i, res0); + _mm_storeu_pd(out[1] + 2 * i, res1); } - } - if (add_into) - for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 8; ++v) - out[v][i] += in[i][v]; - else - for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 8; ++v) - out[v][i] = in[i][v]; + for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 2; ++v) + out[v][i] = in[i][v]; + } } /** - * Specialization for float and AVX512. + * Specialization for float and SSE2. */ template <> -class VectorizedArray - : public VectorizedArrayBase, 16> +class VectorizedArray + : public VectorizedArrayBase, 4> { public: /** @@ -1599,6 +1466,10 @@ public: */ using value_type = float; + /** + * This function can be used to set all data fields to a given scalar. + */ + /** * Default empty constructor, leaving the data in an uninitialized state * similar to float/double. @@ -1618,17 +1489,14 @@ public: */ template VectorizedArray(const std::initializer_list &list) - : VectorizedArrayBase, 16>(list) + : VectorizedArrayBase, 4>(list) {} - /** - * This function can be used to set all data fields to a given scalar. - */ DEAL_II_ALWAYS_INLINE VectorizedArray & operator=(const float x) & { - data = _mm512_set1_ps(x); + data = _mm_set1_ps(x); return *this; } @@ -1647,7 +1515,7 @@ public: float & operator[](const unsigned int comp) { - AssertIndexRange(comp, 16); + AssertIndexRange(comp, 4); return *(reinterpret_cast(&data) + comp); } @@ -1658,7 +1526,7 @@ public: const float & operator[](const unsigned int comp) const { - AssertIndexRange(comp, 16); + AssertIndexRange(comp, 4); return *(reinterpret_cast(&data) + comp); } @@ -1669,15 +1537,10 @@ public: VectorizedArray & operator+=(const VectorizedArray &vec) { - // if the compiler supports vector arithmetic, we can simply use += - // operator on the given data type. this allows the compiler to combine - // additions with multiplication (fused multiply-add) if those - // instructions are available. Otherwise, we need to use the built-in - // intrinsic command for __m512d # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data += vec.data; # else - data = _mm512_add_ps(data, vec.data); + data = _mm_add_ps(data, vec.data); # endif return *this; } @@ -1692,10 +1555,11 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data -= vec.data; # else - data = _mm512_sub_ps(data, vec.data); + data = _mm_sub_ps(data, vec.data); # endif return *this; } + /** * Multiplication. */ @@ -1706,7 +1570,7 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data *= vec.data; # else - data = _mm512_mul_ps(data, vec.data); + data = _mm_mul_ps(data, vec.data); # endif return *this; } @@ -1721,47 +1585,47 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data /= vec.data; # else - data = _mm512_div_ps(data, vec.data); + data = _mm_div_ps(data, vec.data); # endif return *this; } /** * Load @p size() from memory into the calling class, starting at - * the given address. The memory need not be aligned by 64 bytes, as opposed + * the given address. The memory need not be aligned by 16 bytes, as opposed * to casting a float address to VectorizedArray*. */ DEAL_II_ALWAYS_INLINE void load(const float *ptr) { - data = _mm512_loadu_ps(ptr); + data = _mm_loadu_ps(ptr); } /** * Write the content of the calling class into memory in form of @p * size() to the given address. The memory need not be aligned by - * 64 bytes, as opposed to casting a float address to + * 16 bytes, as opposed to casting a float address to * VectorizedArray*. */ DEAL_II_ALWAYS_INLINE void store(float *ptr) const { - _mm512_storeu_ps(ptr, data); + _mm_storeu_ps(ptr, data); } /** * @copydoc VectorizedArray::streaming_store() - * @note Memory must be aligned by 64 bytes. + * @note Memory must be aligned by 16 bytes. */ DEAL_II_ALWAYS_INLINE void streaming_store(float *ptr) const { - Assert(reinterpret_cast(ptr) % 64 == 0, + Assert(reinterpret_cast(ptr) % 16 == 0, ExcMessage("Memory not aligned")); - _mm512_stream_ps(ptr, data); + _mm_stream_ps(ptr, data); } /** @@ -1780,20 +1644,8 @@ public: void gather(const float *base_ptr, const unsigned int *offsets) { - // unfortunately, there does not appear to be a 512 bit integer load, so - // do it by some reinterpret casts here. this is allowed because the Intel - // API allows aliasing between different vector types. - const __m512 index_val = - _mm512_loadu_ps(reinterpret_cast(offsets)); - const __m512i index = *reinterpret_cast(&index_val); - - // work around a warning with gcc-12 about an uninitialized initial state - // for gather by starting with a zero guess, even though all lanes will be - // overwritten - __m512 zero = {}; - __mmask16 mask = 0xFFFF; - - data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4); + for (unsigned int i = 0; i < 4; ++i) + *(reinterpret_cast(&data) + i) = base_ptr[offsets[i]]; } /** @@ -1812,19 +1664,8 @@ public: void scatter(const unsigned int *offsets, float *base_ptr) const { - for (unsigned int i = 0; i < 16; ++i) - for (unsigned int j = i + 1; j < 16; ++j) - Assert(offsets[i] != offsets[j], - ExcMessage("Result of scatter undefined if two offset elements" - " point to the same position")); - - // unfortunately, there does not appear to be a 512 bit integer load, so - // do it by some reinterpret casts here. this is allowed because the Intel - // API allows aliasing between different vector types. - const __m512 index_val = - _mm512_loadu_ps(reinterpret_cast(offsets)); - const __m512i index = *reinterpret_cast(&index_val); - _mm512_i32scatter_ps(base_ptr, index, data, 4); + for (unsigned int i = 0; i < 4; ++i) + base_ptr[offsets[i]] = *(reinterpret_cast(&data) + i); } /** @@ -1832,36 +1673,23 @@ public: * this->data[i]$. */ float - sum(); + sum() + { + __m128 t1 = _mm_movehl_ps(data, data); + __m128 t2 = _mm_add_ps(data, t1); + __m128 t3 = _mm_shuffle_ps(t2, t2, 1); + __m128 t4 = _mm_add_ss(t2, t3); + return _mm_cvtss_f32(t4); + } /** * Actual data field. To be consistent with the standard layout type and to * enable interaction with external SIMD functionality, this member is * declared public. */ - __m512 data; + __m128 data; private: - /** - * Extract lower half of data field. - */ - DEAL_II_ALWAYS_INLINE - __m256 - get_lower() const - { - return _mm512_castps512_ps256(data); - } - - /** - * Extract upper half of data field. - */ - DEAL_II_ALWAYS_INLINE - __m256 - get_upper() const - { - return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(data), 1)); - } - /** * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. @@ -1871,7 +1699,7 @@ private: get_sqrt() const { VectorizedArray res; - res.data = _mm512_sqrt_ps(data); + res.data = _mm_sqrt_ps(data); return res; } @@ -1885,14 +1713,10 @@ private: { // to compute the absolute value, perform bitwise andnot with -0. This // will leave all value and exponent bits unchanged but force the sign - // value to +. Since there is no andnot for AVX512, we interpret the data - // as 32 bit integers and do the andnot on those types (note that andnot - // is a bitwise operation so the data type does not matter) - __m512 mask = _mm512_set1_ps(-0.f); + // value to +. + __m128 mask = _mm_set1_ps(-0.f); VectorizedArray res; - res.data = reinterpret_cast<__m512>( - _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask), - reinterpret_cast<__m512i>(data))); + res.data = _mm_andnot_ps(mask, data); return res; } @@ -1905,7 +1729,7 @@ private: get_max(const VectorizedArray &other) const { VectorizedArray res; - res.data = _mm512_max_ps(data, other.data); + res.data = _mm_max_ps(data, other.data); return res; } @@ -1918,7 +1742,7 @@ private: get_min(const VectorizedArray &other) const { VectorizedArray res; - res.data = _mm512_min_ps(data, other.data); + res.data = _mm_min_ps(data, other.data); return res; } @@ -1942,343 +1766,199 @@ private: /** - * Specialization for float and AVX-512. + * Specialization for float and SSE2. */ template <> inline DEAL_II_ALWAYS_INLINE void -vectorized_load_and_transpose(const unsigned int n_entries, - const float * in, - const unsigned int * offsets, - VectorizedArray *out) +vectorized_load_and_transpose(const unsigned int n_entries, + const float * in, + const unsigned int * offsets, + VectorizedArray *out) { - // Similar to the double case, we perform the work on smaller entities. In - // this case, we start from 128 bit arrays and insert them into a full 512 - // bit index. This reduces the code size and register pressure because we do - // shuffles on 4 numbers rather than 16. const unsigned int n_chunks = n_entries / 4; - - // To avoid warnings about uninitialized variables, need to initialize one - // variable to a pre-exisiting value in out, which will never get used in - // the end. Keep the initialization outside the loop because of a bug in - // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in - // case t3 is initialized to zero (inside/outside of loop), see - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991 - __m512 t0, t1, t2, t3; - if (n_chunks > 0) - t3 = out[0].data; for (unsigned int i = 0; i < n_chunks; ++i) { - t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0); - t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1); - t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2); - t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3); - t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0); - t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1); - t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2); - t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3); - t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0); - t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1); - t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2); - t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3); - t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0); - t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1); - t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2); - t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3); - - __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44); - __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee); - __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44); - __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee); - - out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88); - out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd); - out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88); - out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd); + __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]); + __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]); + __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]); + __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]); + __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44); + __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee); + __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44); + __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee); + out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88); + out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd); + out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88); + out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd); } // remainder loop of work that does not divide by 4 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - out[i].gather(in + i, offsets); + for (unsigned int v = 0; v < 4; ++v) + out[i][v] = in[offsets[v] + i]; } /** - * Specialization for float and AVX-512. + * Specialization for float and SSE2. */ template <> inline DEAL_II_ALWAYS_INLINE void -vectorized_load_and_transpose(const unsigned int n_entries, - const std::array &in, - VectorizedArray * out) +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) { // see the comments in the vectorized_load_and_transpose above const unsigned int n_chunks = n_entries / 4; - - __m512 t0, t1, t2, t3; - if (n_chunks > 0) - t3 = out[0].data; for (unsigned int i = 0; i < n_chunks; ++i) { - t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0); - t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1); - t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2); - t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3); - t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0); - t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1); - t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2); - t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3); - t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0); - t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1); - t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2); - t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3); - t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0); - t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1); - t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2); - t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3); - - __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44); - __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee); - __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44); - __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee); - - out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88); - out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd); - out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88); - out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd); + __m128 u0 = _mm_loadu_ps(in[0] + 4 * i); + __m128 u1 = _mm_loadu_ps(in[1] + 4 * i); + __m128 u2 = _mm_loadu_ps(in[2] + 4 * i); + __m128 u3 = _mm_loadu_ps(in[3] + 4 * i); + __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44); + __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee); + __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44); + __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee); + out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88); + out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd); + out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88); + out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd); } for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - gather(out[i], in, i); + for (unsigned int v = 0; v < 4; ++v) + out[i][v] = in[v][i]; } /** - * Specialization for float and AVX-512. + * Specialization for float and SSE2. */ template <> inline DEAL_II_ALWAYS_INLINE void -vectorized_transpose_and_store(const bool add_into, - const unsigned int n_entries, - const VectorizedArray *in, - const unsigned int * offsets, - float * out) +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + const unsigned int * offsets, + float * out) { const unsigned int n_chunks = n_entries / 4; for (unsigned int i = 0; i < n_chunks; ++i) { - __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44); - __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee); - __m512 t2 = - _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44); - __m512 t3 = - _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee); - __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88); - __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd); - __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88); - __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd); - - __m128 res0 = _mm512_extractf32x4_ps(u0, 0); - __m128 res4 = _mm512_extractf32x4_ps(u0, 1); - __m128 res8 = _mm512_extractf32x4_ps(u0, 2); - __m128 res12 = _mm512_extractf32x4_ps(u0, 3); - __m128 res1 = _mm512_extractf32x4_ps(u1, 0); - __m128 res5 = _mm512_extractf32x4_ps(u1, 1); - __m128 res9 = _mm512_extractf32x4_ps(u1, 2); - __m128 res13 = _mm512_extractf32x4_ps(u1, 3); - __m128 res2 = _mm512_extractf32x4_ps(u2, 0); - __m128 res6 = _mm512_extractf32x4_ps(u2, 1); - __m128 res10 = _mm512_extractf32x4_ps(u2, 2); - __m128 res14 = _mm512_extractf32x4_ps(u2, 3); - __m128 res3 = _mm512_extractf32x4_ps(u3, 0); - __m128 res7 = _mm512_extractf32x4_ps(u3, 1); - __m128 res11 = _mm512_extractf32x4_ps(u3, 2); - __m128 res15 = _mm512_extractf32x4_ps(u3, 3); + __m128 u0 = in[4 * i + 0].data; + __m128 u1 = in[4 * i + 1].data; + __m128 u2 = in[4 * i + 2].data; + __m128 u3 = in[4 * i + 3].data; + __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44); + __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee); + __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44); + __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee); + u0 = _mm_shuffle_ps(t0, t2, 0x88); + u1 = _mm_shuffle_ps(t0, t2, 0xdd); + u2 = _mm_shuffle_ps(t1, t3, 0x88); + u3 = _mm_shuffle_ps(t1, t3, 0xdd); // Cannot use the same store instructions in both paths of the 'if' // because the compiler cannot know that there is no aliasing between // pointers if (add_into) { - res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0); - _mm_storeu_ps(out + 4 * i + offsets[0], res0); - res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1); - _mm_storeu_ps(out + 4 * i + offsets[1], res1); - res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2); - _mm_storeu_ps(out + 4 * i + offsets[2], res2); - res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3); - _mm_storeu_ps(out + 4 * i + offsets[3], res3); - res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4); - _mm_storeu_ps(out + 4 * i + offsets[4], res4); - res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5); - _mm_storeu_ps(out + 4 * i + offsets[5], res5); - res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6); - _mm_storeu_ps(out + 4 * i + offsets[6], res6); - res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7); - _mm_storeu_ps(out + 4 * i + offsets[7], res7); - res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8); - _mm_storeu_ps(out + 4 * i + offsets[8], res8); - res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9); - _mm_storeu_ps(out + 4 * i + offsets[9], res9); - res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10); - _mm_storeu_ps(out + 4 * i + offsets[10], res10); - res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11); - _mm_storeu_ps(out + 4 * i + offsets[11], res11); - res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12); - _mm_storeu_ps(out + 4 * i + offsets[12], res12); - res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13); - _mm_storeu_ps(out + 4 * i + offsets[13], res13); - res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14); - _mm_storeu_ps(out + 4 * i + offsets[14], res14); - res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15); - _mm_storeu_ps(out + 4 * i + offsets[15], res15); + u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0); + _mm_storeu_ps(out + 4 * i + offsets[0], u0); + u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1); + _mm_storeu_ps(out + 4 * i + offsets[1], u1); + u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2); + _mm_storeu_ps(out + 4 * i + offsets[2], u2); + u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3); + _mm_storeu_ps(out + 4 * i + offsets[3], u3); } else { - _mm_storeu_ps(out + 4 * i + offsets[0], res0); - _mm_storeu_ps(out + 4 * i + offsets[1], res1); - _mm_storeu_ps(out + 4 * i + offsets[2], res2); - _mm_storeu_ps(out + 4 * i + offsets[3], res3); - _mm_storeu_ps(out + 4 * i + offsets[4], res4); - _mm_storeu_ps(out + 4 * i + offsets[5], res5); - _mm_storeu_ps(out + 4 * i + offsets[6], res6); - _mm_storeu_ps(out + 4 * i + offsets[7], res7); - _mm_storeu_ps(out + 4 * i + offsets[8], res8); - _mm_storeu_ps(out + 4 * i + offsets[9], res9); - _mm_storeu_ps(out + 4 * i + offsets[10], res10); - _mm_storeu_ps(out + 4 * i + offsets[11], res11); - _mm_storeu_ps(out + 4 * i + offsets[12], res12); - _mm_storeu_ps(out + 4 * i + offsets[13], res13); - _mm_storeu_ps(out + 4 * i + offsets[14], res14); - _mm_storeu_ps(out + 4 * i + offsets[15], res15); + _mm_storeu_ps(out + 4 * i + offsets[0], u0); + _mm_storeu_ps(out + 4 * i + offsets[1], u1); + _mm_storeu_ps(out + 4 * i + offsets[2], u2); + _mm_storeu_ps(out + 4 * i + offsets[3], u3); } } // remainder loop of work that does not divide by 4 if (add_into) for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 16; ++v) + for (unsigned int v = 0; v < 4; ++v) out[offsets[v] + i] += in[i][v]; else for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 16; ++v) + for (unsigned int v = 0; v < 4; ++v) out[offsets[v] + i] = in[i][v]; } /** - * Specialization for float and AVX-512. + * Specialization for float and SSE2. */ template <> inline DEAL_II_ALWAYS_INLINE void -vectorized_transpose_and_store(const bool add_into, - const unsigned int n_entries, - const VectorizedArray *in, - std::array & out) +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) { // see the comments in the vectorized_transpose_and_store above const unsigned int n_chunks = n_entries / 4; for (unsigned int i = 0; i < n_chunks; ++i) { - __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44); - __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee); - __m512 t2 = - _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44); - __m512 t3 = - _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee); - __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88); - __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd); - __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88); - __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd); - - __m128 res0 = _mm512_extractf32x4_ps(u0, 0); - __m128 res4 = _mm512_extractf32x4_ps(u0, 1); - __m128 res8 = _mm512_extractf32x4_ps(u0, 2); - __m128 res12 = _mm512_extractf32x4_ps(u0, 3); - __m128 res1 = _mm512_extractf32x4_ps(u1, 0); - __m128 res5 = _mm512_extractf32x4_ps(u1, 1); - __m128 res9 = _mm512_extractf32x4_ps(u1, 2); - __m128 res13 = _mm512_extractf32x4_ps(u1, 3); - __m128 res2 = _mm512_extractf32x4_ps(u2, 0); - __m128 res6 = _mm512_extractf32x4_ps(u2, 1); - __m128 res10 = _mm512_extractf32x4_ps(u2, 2); - __m128 res14 = _mm512_extractf32x4_ps(u2, 3); - __m128 res3 = _mm512_extractf32x4_ps(u3, 0); - __m128 res7 = _mm512_extractf32x4_ps(u3, 1); - __m128 res11 = _mm512_extractf32x4_ps(u3, 2); - __m128 res15 = _mm512_extractf32x4_ps(u3, 3); + __m128 u0 = in[4 * i + 0].data; + __m128 u1 = in[4 * i + 1].data; + __m128 u2 = in[4 * i + 2].data; + __m128 u3 = in[4 * i + 3].data; + __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44); + __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee); + __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44); + __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee); + u0 = _mm_shuffle_ps(t0, t2, 0x88); + u1 = _mm_shuffle_ps(t0, t2, 0xdd); + u2 = _mm_shuffle_ps(t1, t3, 0x88); + u3 = _mm_shuffle_ps(t1, t3, 0xdd); if (add_into) { - res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0); - _mm_storeu_ps(out[0] + 4 * i, res0); - res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1); - _mm_storeu_ps(out[1] + 4 * i, res1); - res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2); - _mm_storeu_ps(out[2] + 4 * i, res2); - res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3); - _mm_storeu_ps(out[3] + 4 * i, res3); - res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4); - _mm_storeu_ps(out[4] + 4 * i, res4); - res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5); - _mm_storeu_ps(out[5] + 4 * i, res5); - res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6); - _mm_storeu_ps(out[6] + 4 * i, res6); - res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7); - _mm_storeu_ps(out[7] + 4 * i, res7); - res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8); - _mm_storeu_ps(out[8] + 4 * i, res8); - res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9); - _mm_storeu_ps(out[9] + 4 * i, res9); - res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10); - _mm_storeu_ps(out[10] + 4 * i, res10); - res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11); - _mm_storeu_ps(out[11] + 4 * i, res11); - res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12); - _mm_storeu_ps(out[12] + 4 * i, res12); - res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13); - _mm_storeu_ps(out[13] + 4 * i, res13); - res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14); - _mm_storeu_ps(out[14] + 4 * i, res14); - res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15); - _mm_storeu_ps(out[15] + 4 * i, res15); + u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0); + _mm_storeu_ps(out[0] + 4 * i, u0); + u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1); + _mm_storeu_ps(out[1] + 4 * i, u1); + u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2); + _mm_storeu_ps(out[2] + 4 * i, u2); + u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3); + _mm_storeu_ps(out[3] + 4 * i, u3); } else { - _mm_storeu_ps(out[0] + 4 * i, res0); - _mm_storeu_ps(out[1] + 4 * i, res1); - _mm_storeu_ps(out[2] + 4 * i, res2); - _mm_storeu_ps(out[3] + 4 * i, res3); - _mm_storeu_ps(out[4] + 4 * i, res4); - _mm_storeu_ps(out[5] + 4 * i, res5); - _mm_storeu_ps(out[6] + 4 * i, res6); - _mm_storeu_ps(out[7] + 4 * i, res7); - _mm_storeu_ps(out[8] + 4 * i, res8); - _mm_storeu_ps(out[9] + 4 * i, res9); - _mm_storeu_ps(out[10] + 4 * i, res10); - _mm_storeu_ps(out[11] + 4 * i, res11); - _mm_storeu_ps(out[12] + 4 * i, res12); - _mm_storeu_ps(out[13] + 4 * i, res13); - _mm_storeu_ps(out[14] + 4 * i, res14); - _mm_storeu_ps(out[15] + 4 * i, res15); + _mm_storeu_ps(out[0] + 4 * i, u0); + _mm_storeu_ps(out[1] + 4 * i, u1); + _mm_storeu_ps(out[2] + 4 * i, u2); + _mm_storeu_ps(out[3] + 4 * i, u3); } } if (add_into) for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 16; ++v) + for (unsigned int v = 0; v < 4; ++v) out[v][i] += in[i][v]; else for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 16; ++v) + for (unsigned int v = 0; v < 4; ++v) out[v][i] = in[i][v]; } -# endif + + +# endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__) # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__) @@ -2537,7 +2217,12 @@ public: * this->data[i]$. */ double - sum(); + sum() + { + VectorizedArray t1; + t1.data = _mm_add_pd(this->get_lower(), this->get_upper()); + return t1.sum(); + } /** * Actual data field. To be consistent with the standard layout type and to @@ -3096,7 +2781,12 @@ public: * this->data[i]$. */ float - sum(); + sum() + { + VectorizedArray t1; + t1.data = _mm_add_ps(this->get_lower(), this->get_upper()); + return t1.sum(); + } /** * Actual data field. To be consistent with the standard layout type and to @@ -3446,14 +3136,17 @@ vectorized_transpose_and_store(const bool add_into, # endif -# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__) +// for safety, also check that __AVX512F__ is defined in case the user manually +// set some conflicting compile flags which prevent compilation + +# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__) /** - * Specialization for double and SSE2. + * Specialization of VectorizedArray class for double and AVX-512. */ template <> -class VectorizedArray - : public VectorizedArrayBase, 2> +class VectorizedArray + : public VectorizedArrayBase, 8> { public: /** @@ -3480,7 +3173,7 @@ public: */ template VectorizedArray(const std::initializer_list &list) - : VectorizedArrayBase, 2>(list) + : VectorizedArrayBase, 8>(list) {} /** @@ -3490,10 +3183,11 @@ public: VectorizedArray & operator=(const double x) & { - data = _mm_set1_pd(x); + data = _mm512_set1_pd(x); return *this; } + /** * Assign a scalar to the current object. This overload is used for * rvalue references; because it does not make sense to assign @@ -3509,7 +3203,7 @@ public: double & operator[](const unsigned int comp) { - AssertIndexRange(comp, 2); + AssertIndexRange(comp, 8); return *(reinterpret_cast(&data) + comp); } @@ -3520,7 +3214,7 @@ public: const double & operator[](const unsigned int comp) const { - AssertIndexRange(comp, 2); + AssertIndexRange(comp, 8); return *(reinterpret_cast(&data) + comp); } @@ -3531,10 +3225,15 @@ public: VectorizedArray & operator+=(const VectorizedArray &vec) { + // if the compiler supports vector arithmetic, we can simply use += + // operator on the given data type. this allows the compiler to combine + // additions with multiplication (fused multiply-add) if those + // instructions are available. Otherwise, we need to use the built-in + // intrinsic command for __m512d # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data += vec.data; # else - data = _mm_add_pd(data, vec.data); + data = _mm512_add_pd(data, vec.data); # endif return *this; } @@ -3549,11 +3248,10 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data -= vec.data; # else - data = _mm_sub_pd(data, vec.data); + data = _mm512_sub_pd(data, vec.data); # endif return *this; } - /** * Multiplication. */ @@ -3564,7 +3262,7 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data *= vec.data; # else - data = _mm_mul_pd(data, vec.data); + data = _mm512_mul_pd(data, vec.data); # endif return *this; } @@ -3579,65 +3277,61 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data /= vec.data; # else - data = _mm_div_pd(data, vec.data); + data = _mm512_div_pd(data, vec.data); # endif return *this; } /** - * Load @p size() from memory into the calling class, starting at - * the given address. The memory need not be aligned by 16 bytes, as opposed + * Load size() data items from memory into the calling class, starting at + * the given address. The memory need not be aligned by 64 bytes, as opposed * to casting a double address to VectorizedArray*. */ DEAL_II_ALWAYS_INLINE void load(const double *ptr) { - data = _mm_loadu_pd(ptr); + data = _mm512_loadu_pd(ptr); } DEAL_II_ALWAYS_INLINE void load(const float *ptr) { - DEAL_II_OPENMP_SIMD_PRAGMA - for (unsigned int i = 0; i < 2; ++i) - data[i] = ptr[i]; + data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr)); } /** * Write the content of the calling class into memory in form of @p * size() to the given address. The memory need not be aligned by - * 16 bytes, as opposed to casting a double address to + * 64 bytes, as opposed to casting a double address to * VectorizedArray*. */ DEAL_II_ALWAYS_INLINE void store(double *ptr) const { - _mm_storeu_pd(ptr, data); + _mm512_storeu_pd(ptr, data); } DEAL_II_ALWAYS_INLINE void store(float *ptr) const { - DEAL_II_OPENMP_SIMD_PRAGMA - for (unsigned int i = 0; i < 2; ++i) - ptr[i] = data[i]; + _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data)); } /** * @copydoc VectorizedArray::streaming_store() - * @note Memory must be aligned by 16 bytes. + * @note Memory must be aligned by 64 bytes. */ DEAL_II_ALWAYS_INLINE void streaming_store(double *ptr) const { - Assert(reinterpret_cast(ptr) % 16 == 0, + Assert(reinterpret_cast(ptr) % 64 == 0, ExcMessage("Memory not aligned")); - _mm_stream_pd(ptr, data); + _mm512_stream_pd(ptr, data); } /** @@ -3656,8 +3350,20 @@ public: void gather(const double *base_ptr, const unsigned int *offsets) { - for (unsigned int i = 0; i < 2; ++i) - *(reinterpret_cast(&data) + i) = base_ptr[offsets[i]]; + // unfortunately, there does not appear to be a 256 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m256 index_val = + _mm256_loadu_ps(reinterpret_cast(offsets)); + const __m256i index = *reinterpret_cast(&index_val); + + // work around a warning with gcc-12 about an uninitialized initial state + // for gather by starting with a zero guess, even though all lanes will be + // overwritten + __m512d zero = {}; + __mmask8 mask = 0xFF; + + data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8); } /** @@ -3676,8 +3382,19 @@ public: void scatter(const unsigned int *offsets, double *base_ptr) const { - for (unsigned int i = 0; i < 2; ++i) - base_ptr[offsets[i]] = *(reinterpret_cast(&data) + i); + for (unsigned int i = 0; i < 8; ++i) + for (unsigned int j = i + 1; j < 8; ++j) + Assert(offsets[i] != offsets[j], + ExcMessage("Result of scatter undefined if two offset elements" + " point to the same position")); + + // unfortunately, there does not appear to be a 256 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m256 index_val = + _mm256_loadu_ps(reinterpret_cast(offsets)); + const __m256i index = *reinterpret_cast(&index_val); + _mm512_i32scatter_pd(base_ptr, index, data, 8); } /** @@ -3685,16 +3402,41 @@ public: * this->data[i]$. */ double - sum(); + sum() + { + VectorizedArray t1; + t1.data = _mm256_add_pd(this->get_lower(), this->get_upper()); + return t1.sum(); + } /** * Actual data field. To be consistent with the standard layout type and to * enable interaction with external SIMD functionality, this member is * declared public. */ - __m128d data; + __m512d data; private: + /** + * Extract lower half of data field. + */ + DEAL_II_ALWAYS_INLINE + __m256d + get_lower() const + { + return _mm512_castpd512_pd256(data); + } + + /** + * Extract upper half of data field. + */ + DEAL_II_ALWAYS_INLINE + __m256d + get_upper() const + { + return _mm512_extractf64x4_pd(data, 1); + } + /** * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. @@ -3704,7 +3446,7 @@ private: get_sqrt() const { VectorizedArray res; - res.data = _mm_sqrt_pd(data); + res.data = _mm512_sqrt_pd(data); return res; } @@ -3716,13 +3458,16 @@ private: VectorizedArray get_abs() const { - // to compute the absolute value, perform - // bitwise andnot with -0. This will leave all - // value and exponent bits unchanged but force - // the sign value to +. - __m128d mask = _mm_set1_pd(-0.); + // to compute the absolute value, perform bitwise andnot with -0. This + // will leave all value and exponent bits unchanged but force the sign + // value to +. Since there is no andnot for AVX512, we interpret the data + // as 64 bit integers and do the andnot on those types (note that andnot + // is a bitwise operation so the data type does not matter) + __m512d mask = _mm512_set1_pd(-0.); VectorizedArray res; - res.data = _mm_andnot_pd(mask, data); + res.data = reinterpret_cast<__m512d>( + _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask), + reinterpret_cast<__m512i>(data))); return res; } @@ -3735,7 +3480,7 @@ private: get_max(const VectorizedArray &other) const { VectorizedArray res; - res.data = _mm_max_pd(data, other.data); + res.data = _mm512_max_pd(data, other.data); return res; } @@ -3748,7 +3493,7 @@ private: get_min(const VectorizedArray &other) const { VectorizedArray res; - res.data = _mm_min_pd(data, other.data); + res.data = _mm512_min_pd(data, other.data); return res; } @@ -3772,168 +3517,256 @@ private: /** - * Specialization for double and SSE2. + * Specialization for double and AVX-512. */ template <> inline DEAL_II_ALWAYS_INLINE void vectorized_load_and_transpose(const unsigned int n_entries, const double * in, const unsigned int * offsets, - VectorizedArray *out) + VectorizedArray *out) { - const unsigned int n_chunks = n_entries / 2; + // do not do full transpose because the code is long and will most + // likely not pay off because many processors have two load units + // (for the top 8 instructions) but only 1 permute unit (for the 8 + // shuffle/unpack instructions). rather start the transposition on the + // vectorized array of half the size with 256 bits + const unsigned int n_chunks = n_entries / 4; for (unsigned int i = 0; i < n_chunks; ++i) { - __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]); - __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]); - out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1); - out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1); - } + __m512d t0, t1, t2, t3 = {}; - // remainder loop of work that does not divide by 2 - for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 2; ++v) - out[i][v] = in[offsets[v] + i]; + t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0); + t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1); + t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0); + t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1); + t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0); + t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1); + t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0); + t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1); + + __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88); + __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd); + __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88); + __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd); + out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2); + out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2); + out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3); + out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3); + } + // remainder loop of work that does not divide by 4 + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + out[i].gather(in + i, offsets); } /** - * Specialization for double and SSE2. + * Specialization for double and AVX-512. */ template <> inline DEAL_II_ALWAYS_INLINE void vectorized_load_and_transpose(const unsigned int n_entries, - const std::array &in, - VectorizedArray * out) + const std::array &in, + VectorizedArray * out) { - // see the comments in the vectorized_load_and_transpose above - - const unsigned int n_chunks = n_entries / 2; + const unsigned int n_chunks = n_entries / 4; for (unsigned int i = 0; i < n_chunks; ++i) { - __m128d u0 = _mm_loadu_pd(in[0] + 2 * i); - __m128d u1 = _mm_loadu_pd(in[1] + 2 * i); - out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1); - out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1); + __m512d t0, t1, t2, t3 = {}; + + t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0); + t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1); + t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0); + t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1); + t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0); + t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1); + t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0); + t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1); + + __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88); + __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd); + __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88); + __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd); + out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2); + out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2); + out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3); + out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3); } - for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 2; ++v) - out[i][v] = in[v][i]; + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + gather(out[i], in, i); } /** - * Specialization for double and SSE2. + * Specialization for double and AVX-512. */ template <> inline DEAL_II_ALWAYS_INLINE void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, - const VectorizedArray *in, + const VectorizedArray *in, const unsigned int * offsets, double * out) { - const unsigned int n_chunks = n_entries / 2; - if (add_into) + // as for the load, we split the store operations into 256 bit units to + // better balance between code size, shuffle instructions, and stores + const unsigned int n_chunks = n_entries / 4; + __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0); + __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2); + for (unsigned int i = 0; i < n_chunks; ++i) { - for (unsigned int i = 0; i < n_chunks; ++i) + __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data); + __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data); + __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); + __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); + __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2); + __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2); + __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3); + __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3); + __m256d res0 = _mm512_extractf64x4_pd(v0, 0); + __m256d res4 = _mm512_extractf64x4_pd(v0, 1); + __m256d res1 = _mm512_extractf64x4_pd(v2, 0); + __m256d res5 = _mm512_extractf64x4_pd(v2, 1); + __m256d res2 = _mm512_extractf64x4_pd(v1, 0); + __m256d res6 = _mm512_extractf64x4_pd(v1, 1); + __m256d res3 = _mm512_extractf64x4_pd(v3, 0); + __m256d res7 = _mm512_extractf64x4_pd(v3, 1); + + // Cannot use the same store instructions in both paths of the 'if' + // because the compiler cannot know that there is no aliasing + // between pointers + if (add_into) { - __m128d u0 = in[2 * i + 0].data; - __m128d u1 = in[2 * i + 1].data; - __m128d res0 = _mm_unpacklo_pd(u0, u1); - __m128d res1 = _mm_unpackhi_pd(u0, u1); - _mm_storeu_pd(out + 2 * i + offsets[0], - _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]), - res0)); - _mm_storeu_pd(out + 2 * i + offsets[1], - _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]), - res1)); + res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0); + _mm256_storeu_pd(out + 4 * i + offsets[0], res0); + res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1); + _mm256_storeu_pd(out + 4 * i + offsets[1], res1); + res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2); + _mm256_storeu_pd(out + 4 * i + offsets[2], res2); + res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3); + _mm256_storeu_pd(out + 4 * i + offsets[3], res3); + res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4); + _mm256_storeu_pd(out + 4 * i + offsets[4], res4); + res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5); + _mm256_storeu_pd(out + 4 * i + offsets[5], res5); + res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6); + _mm256_storeu_pd(out + 4 * i + offsets[6], res6); + res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7); + _mm256_storeu_pd(out + 4 * i + offsets[7], res7); } - // remainder loop of work that does not divide by 2 - for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 2; ++v) - out[offsets[v] + i] += in[i][v]; - } - else - { - for (unsigned int i = 0; i < n_chunks; ++i) + else { - __m128d u0 = in[2 * i + 0].data; - __m128d u1 = in[2 * i + 1].data; - __m128d res0 = _mm_unpacklo_pd(u0, u1); - __m128d res1 = _mm_unpackhi_pd(u0, u1); - _mm_storeu_pd(out + 2 * i + offsets[0], res0); - _mm_storeu_pd(out + 2 * i + offsets[1], res1); + _mm256_storeu_pd(out + 4 * i + offsets[0], res0); + _mm256_storeu_pd(out + 4 * i + offsets[1], res1); + _mm256_storeu_pd(out + 4 * i + offsets[2], res2); + _mm256_storeu_pd(out + 4 * i + offsets[3], res3); + _mm256_storeu_pd(out + 4 * i + offsets[4], res4); + _mm256_storeu_pd(out + 4 * i + offsets[5], res5); + _mm256_storeu_pd(out + 4 * i + offsets[6], res6); + _mm256_storeu_pd(out + 4 * i + offsets[7], res7); } - // remainder loop of work that does not divide by 2 - for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 2; ++v) - out[offsets[v] + i] = in[i][v]; } + + // remainder loop of work that does not divide by 4 + if (add_into) + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 8; ++v) + out[offsets[v] + i] += in[i][v]; + else + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 8; ++v) + out[offsets[v] + i] = in[i][v]; } /** - * Specialization for double and SSE2. + * Specialization for double and AVX-512. */ template <> inline DEAL_II_ALWAYS_INLINE void vectorized_transpose_and_store(const bool add_into, const unsigned int n_entries, - const VectorizedArray *in, - std::array & out) + const VectorizedArray *in, + std::array & out) { // see the comments in the vectorized_transpose_and_store above - const unsigned int n_chunks = n_entries / 2; - if (add_into) + const unsigned int n_chunks = n_entries / 4; + __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0); + __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2); + for (unsigned int i = 0; i < n_chunks; ++i) { - for (unsigned int i = 0; i < n_chunks; ++i) + __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data); + __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data); + __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); + __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data); + __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2); + __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2); + __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3); + __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3); + __m256d res0 = _mm512_extractf64x4_pd(v0, 0); + __m256d res4 = _mm512_extractf64x4_pd(v0, 1); + __m256d res1 = _mm512_extractf64x4_pd(v2, 0); + __m256d res5 = _mm512_extractf64x4_pd(v2, 1); + __m256d res2 = _mm512_extractf64x4_pd(v1, 0); + __m256d res6 = _mm512_extractf64x4_pd(v1, 1); + __m256d res3 = _mm512_extractf64x4_pd(v3, 0); + __m256d res7 = _mm512_extractf64x4_pd(v3, 1); + + if (add_into) { - __m128d u0 = in[2 * i + 0].data; - __m128d u1 = in[2 * i + 1].data; - __m128d res0 = _mm_unpacklo_pd(u0, u1); - __m128d res1 = _mm_unpackhi_pd(u0, u1); - _mm_storeu_pd(out[0] + 2 * i, - _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0)); - _mm_storeu_pd(out[1] + 2 * i, - _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1)); + res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0); + _mm256_storeu_pd(out[0] + 4 * i, res0); + res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1); + _mm256_storeu_pd(out[1] + 4 * i, res1); + res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2); + _mm256_storeu_pd(out[2] + 4 * i, res2); + res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3); + _mm256_storeu_pd(out[3] + 4 * i, res3); + res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4); + _mm256_storeu_pd(out[4] + 4 * i, res4); + res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5); + _mm256_storeu_pd(out[5] + 4 * i, res5); + res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6); + _mm256_storeu_pd(out[6] + 4 * i, res6); + res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7); + _mm256_storeu_pd(out[7] + 4 * i, res7); } - - for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 2; ++v) - out[v][i] += in[i][v]; - } - else - { - for (unsigned int i = 0; i < n_chunks; ++i) + else { - __m128d u0 = in[2 * i + 0].data; - __m128d u1 = in[2 * i + 1].data; - __m128d res0 = _mm_unpacklo_pd(u0, u1); - __m128d res1 = _mm_unpackhi_pd(u0, u1); - _mm_storeu_pd(out[0] + 2 * i, res0); - _mm_storeu_pd(out[1] + 2 * i, res1); + _mm256_storeu_pd(out[0] + 4 * i, res0); + _mm256_storeu_pd(out[1] + 4 * i, res1); + _mm256_storeu_pd(out[2] + 4 * i, res2); + _mm256_storeu_pd(out[3] + 4 * i, res3); + _mm256_storeu_pd(out[4] + 4 * i, res4); + _mm256_storeu_pd(out[5] + 4 * i, res5); + _mm256_storeu_pd(out[6] + 4 * i, res6); + _mm256_storeu_pd(out[7] + 4 * i, res7); } - - for (unsigned int i = 2 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 2; ++v) - out[v][i] = in[i][v]; } + + if (add_into) + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 8; ++v) + out[v][i] += in[i][v]; + else + for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) + for (unsigned int v = 0; v < 8; ++v) + out[v][i] = in[i][v]; } /** - * Specialization for float and SSE2. + * Specialization for float and AVX512. */ template <> -class VectorizedArray - : public VectorizedArrayBase, 4> +class VectorizedArray + : public VectorizedArrayBase, 16> { public: /** @@ -3941,10 +3774,6 @@ public: */ using value_type = float; - /** - * This function can be used to set all data fields to a given scalar. - */ - /** * Default empty constructor, leaving the data in an uninitialized state * similar to float/double. @@ -3964,14 +3793,17 @@ public: */ template VectorizedArray(const std::initializer_list &list) - : VectorizedArrayBase, 4>(list) + : VectorizedArrayBase, 16>(list) {} + /** + * This function can be used to set all data fields to a given scalar. + */ DEAL_II_ALWAYS_INLINE VectorizedArray & operator=(const float x) & { - data = _mm_set1_ps(x); + data = _mm512_set1_ps(x); return *this; } @@ -3990,7 +3822,7 @@ public: float & operator[](const unsigned int comp) { - AssertIndexRange(comp, 4); + AssertIndexRange(comp, 16); return *(reinterpret_cast(&data) + comp); } @@ -4001,7 +3833,7 @@ public: const float & operator[](const unsigned int comp) const { - AssertIndexRange(comp, 4); + AssertIndexRange(comp, 16); return *(reinterpret_cast(&data) + comp); } @@ -4012,10 +3844,15 @@ public: VectorizedArray & operator+=(const VectorizedArray &vec) { + // if the compiler supports vector arithmetic, we can simply use += + // operator on the given data type. this allows the compiler to combine + // additions with multiplication (fused multiply-add) if those + // instructions are available. Otherwise, we need to use the built-in + // intrinsic command for __m512d # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data += vec.data; # else - data = _mm_add_ps(data, vec.data); + data = _mm512_add_ps(data, vec.data); # endif return *this; } @@ -4030,11 +3867,10 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data -= vec.data; # else - data = _mm_sub_ps(data, vec.data); + data = _mm512_sub_ps(data, vec.data); # endif return *this; } - /** * Multiplication. */ @@ -4045,7 +3881,7 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data *= vec.data; # else - data = _mm_mul_ps(data, vec.data); + data = _mm512_mul_ps(data, vec.data); # endif return *this; } @@ -4060,47 +3896,47 @@ public: # ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS data /= vec.data; # else - data = _mm_div_ps(data, vec.data); + data = _mm512_div_ps(data, vec.data); # endif return *this; } /** * Load @p size() from memory into the calling class, starting at - * the given address. The memory need not be aligned by 16 bytes, as opposed + * the given address. The memory need not be aligned by 64 bytes, as opposed * to casting a float address to VectorizedArray*. */ DEAL_II_ALWAYS_INLINE void load(const float *ptr) { - data = _mm_loadu_ps(ptr); + data = _mm512_loadu_ps(ptr); } /** * Write the content of the calling class into memory in form of @p * size() to the given address. The memory need not be aligned by - * 16 bytes, as opposed to casting a float address to + * 64 bytes, as opposed to casting a float address to * VectorizedArray*. */ DEAL_II_ALWAYS_INLINE void store(float *ptr) const { - _mm_storeu_ps(ptr, data); + _mm512_storeu_ps(ptr, data); } /** * @copydoc VectorizedArray::streaming_store() - * @note Memory must be aligned by 16 bytes. + * @note Memory must be aligned by 64 bytes. */ DEAL_II_ALWAYS_INLINE void streaming_store(float *ptr) const { - Assert(reinterpret_cast(ptr) % 16 == 0, + Assert(reinterpret_cast(ptr) % 64 == 0, ExcMessage("Memory not aligned")); - _mm_stream_ps(ptr, data); + _mm512_stream_ps(ptr, data); } /** @@ -4119,8 +3955,20 @@ public: void gather(const float *base_ptr, const unsigned int *offsets) { - for (unsigned int i = 0; i < 4; ++i) - *(reinterpret_cast(&data) + i) = base_ptr[offsets[i]]; + // unfortunately, there does not appear to be a 512 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m512 index_val = + _mm512_loadu_ps(reinterpret_cast(offsets)); + const __m512i index = *reinterpret_cast(&index_val); + + // work around a warning with gcc-12 about an uninitialized initial state + // for gather by starting with a zero guess, even though all lanes will be + // overwritten + __m512 zero = {}; + __mmask16 mask = 0xFFFF; + + data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4); } /** @@ -4139,8 +3987,19 @@ public: void scatter(const unsigned int *offsets, float *base_ptr) const { - for (unsigned int i = 0; i < 4; ++i) - base_ptr[offsets[i]] = *(reinterpret_cast(&data) + i); + for (unsigned int i = 0; i < 16; ++i) + for (unsigned int j = i + 1; j < 16; ++j) + Assert(offsets[i] != offsets[j], + ExcMessage("Result of scatter undefined if two offset elements" + " point to the same position")); + + // unfortunately, there does not appear to be a 512 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m512 index_val = + _mm512_loadu_ps(reinterpret_cast(offsets)); + const __m512i index = *reinterpret_cast(&index_val); + _mm512_i32scatter_ps(base_ptr, index, data, 4); } /** @@ -4148,16 +4007,41 @@ public: * this->data[i]$. */ float - sum(); + sum() + { + VectorizedArray t1; + t1.data = _mm256_add_ps(this->get_lower(), this->get_upper()); + return t1.sum(); + } /** * Actual data field. To be consistent with the standard layout type and to * enable interaction with external SIMD functionality, this member is * declared public. */ - __m128 data; + __m512 data; private: + /** + * Extract lower half of data field. + */ + DEAL_II_ALWAYS_INLINE + __m256 + get_lower() const + { + return _mm512_castps512_ps256(data); + } + + /** + * Extract upper half of data field. + */ + DEAL_II_ALWAYS_INLINE + __m256 + get_upper() const + { + return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(data), 1)); + } + /** * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. @@ -4167,7 +4051,7 @@ private: get_sqrt() const { VectorizedArray res; - res.data = _mm_sqrt_ps(data); + res.data = _mm512_sqrt_ps(data); return res; } @@ -4181,10 +4065,14 @@ private: { // to compute the absolute value, perform bitwise andnot with -0. This // will leave all value and exponent bits unchanged but force the sign - // value to +. - __m128 mask = _mm_set1_ps(-0.f); + // value to +. Since there is no andnot for AVX512, we interpret the data + // as 32 bit integers and do the andnot on those types (note that andnot + // is a bitwise operation so the data type does not matter) + __m512 mask = _mm512_set1_ps(-0.f); VectorizedArray res; - res.data = _mm_andnot_ps(mask, data); + res.data = reinterpret_cast<__m512>( + _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask), + reinterpret_cast<__m512i>(data))); return res; } @@ -4197,7 +4085,7 @@ private: get_max(const VectorizedArray &other) const { VectorizedArray res; - res.data = _mm_max_ps(data, other.data); + res.data = _mm512_max_ps(data, other.data); return res; } @@ -4210,7 +4098,7 @@ private: get_min(const VectorizedArray &other) const { VectorizedArray res; - res.data = _mm_min_ps(data, other.data); + res.data = _mm512_min_ps(data, other.data); return res; } @@ -4234,199 +4122,343 @@ private: /** - * Specialization for float and SSE2. + * Specialization for float and AVX-512. */ template <> inline DEAL_II_ALWAYS_INLINE void -vectorized_load_and_transpose(const unsigned int n_entries, - const float * in, - const unsigned int * offsets, - VectorizedArray *out) +vectorized_load_and_transpose(const unsigned int n_entries, + const float * in, + const unsigned int * offsets, + VectorizedArray *out) { + // Similar to the double case, we perform the work on smaller entities. In + // this case, we start from 128 bit arrays and insert them into a full 512 + // bit index. This reduces the code size and register pressure because we do + // shuffles on 4 numbers rather than 16. const unsigned int n_chunks = n_entries / 4; + + // To avoid warnings about uninitialized variables, need to initialize one + // variable to a pre-exisiting value in out, which will never get used in + // the end. Keep the initialization outside the loop because of a bug in + // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in + // case t3 is initialized to zero (inside/outside of loop), see + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991 + __m512 t0, t1, t2, t3; + if (n_chunks > 0) + t3 = out[0].data; for (unsigned int i = 0; i < n_chunks; ++i) { - __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]); - __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]); - __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]); - __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]); - __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44); - __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee); - __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44); - __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee); - out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88); - out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd); - out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88); - out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd); + t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3); + t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3); + t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3); + + __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44); + __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee); + __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44); + __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee); + + out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88); + out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd); + out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88); + out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd); } // remainder loop of work that does not divide by 4 for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 4; ++v) - out[i][v] = in[offsets[v] + i]; + out[i].gather(in + i, offsets); } /** - * Specialization for float and SSE2. + * Specialization for float and AVX-512. */ template <> inline DEAL_II_ALWAYS_INLINE void -vectorized_load_and_transpose(const unsigned int n_entries, - const std::array &in, - VectorizedArray * out) +vectorized_load_and_transpose(const unsigned int n_entries, + const std::array &in, + VectorizedArray * out) { // see the comments in the vectorized_load_and_transpose above const unsigned int n_chunks = n_entries / 4; + + __m512 t0, t1, t2, t3; + if (n_chunks > 0) + t3 = out[0].data; for (unsigned int i = 0; i < n_chunks; ++i) { - __m128 u0 = _mm_loadu_ps(in[0] + 4 * i); - __m128 u1 = _mm_loadu_ps(in[1] + 4 * i); - __m128 u2 = _mm_loadu_ps(in[2] + 4 * i); - __m128 u3 = _mm_loadu_ps(in[3] + 4 * i); - __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44); - __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee); - __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44); - __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee); - out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88); - out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd); - out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88); - out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd); + t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2); + t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3); + t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2); + t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3); + t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2); + t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2); + t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3); + + __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44); + __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee); + __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44); + __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee); + + out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88); + out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd); + out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88); + out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd); } for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 4; ++v) - out[i][v] = in[v][i]; + gather(out[i], in, i); } /** - * Specialization for float and SSE2. + * Specialization for float and AVX-512. */ template <> inline DEAL_II_ALWAYS_INLINE void -vectorized_transpose_and_store(const bool add_into, - const unsigned int n_entries, - const VectorizedArray *in, - const unsigned int * offsets, - float * out) +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + const unsigned int * offsets, + float * out) { const unsigned int n_chunks = n_entries / 4; for (unsigned int i = 0; i < n_chunks; ++i) { - __m128 u0 = in[4 * i + 0].data; - __m128 u1 = in[4 * i + 1].data; - __m128 u2 = in[4 * i + 2].data; - __m128 u3 = in[4 * i + 3].data; - __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44); - __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee); - __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44); - __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee); - u0 = _mm_shuffle_ps(t0, t2, 0x88); - u1 = _mm_shuffle_ps(t0, t2, 0xdd); - u2 = _mm_shuffle_ps(t1, t3, 0x88); - u3 = _mm_shuffle_ps(t1, t3, 0xdd); + __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44); + __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee); + __m512 t2 = + _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44); + __m512 t3 = + _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee); + __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88); + __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd); + __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88); + __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd); + + __m128 res0 = _mm512_extractf32x4_ps(u0, 0); + __m128 res4 = _mm512_extractf32x4_ps(u0, 1); + __m128 res8 = _mm512_extractf32x4_ps(u0, 2); + __m128 res12 = _mm512_extractf32x4_ps(u0, 3); + __m128 res1 = _mm512_extractf32x4_ps(u1, 0); + __m128 res5 = _mm512_extractf32x4_ps(u1, 1); + __m128 res9 = _mm512_extractf32x4_ps(u1, 2); + __m128 res13 = _mm512_extractf32x4_ps(u1, 3); + __m128 res2 = _mm512_extractf32x4_ps(u2, 0); + __m128 res6 = _mm512_extractf32x4_ps(u2, 1); + __m128 res10 = _mm512_extractf32x4_ps(u2, 2); + __m128 res14 = _mm512_extractf32x4_ps(u2, 3); + __m128 res3 = _mm512_extractf32x4_ps(u3, 0); + __m128 res7 = _mm512_extractf32x4_ps(u3, 1); + __m128 res11 = _mm512_extractf32x4_ps(u3, 2); + __m128 res15 = _mm512_extractf32x4_ps(u3, 3); // Cannot use the same store instructions in both paths of the 'if' // because the compiler cannot know that there is no aliasing between // pointers if (add_into) { - u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0); - _mm_storeu_ps(out + 4 * i + offsets[0], u0); - u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1); - _mm_storeu_ps(out + 4 * i + offsets[1], u1); - u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2); - _mm_storeu_ps(out + 4 * i + offsets[2], u2); - u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3); - _mm_storeu_ps(out + 4 * i + offsets[3], u3); + res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0); + _mm_storeu_ps(out + 4 * i + offsets[0], res0); + res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1); + _mm_storeu_ps(out + 4 * i + offsets[1], res1); + res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2); + _mm_storeu_ps(out + 4 * i + offsets[2], res2); + res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3); + _mm_storeu_ps(out + 4 * i + offsets[3], res3); + res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4); + _mm_storeu_ps(out + 4 * i + offsets[4], res4); + res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5); + _mm_storeu_ps(out + 4 * i + offsets[5], res5); + res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6); + _mm_storeu_ps(out + 4 * i + offsets[6], res6); + res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7); + _mm_storeu_ps(out + 4 * i + offsets[7], res7); + res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8); + _mm_storeu_ps(out + 4 * i + offsets[8], res8); + res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9); + _mm_storeu_ps(out + 4 * i + offsets[9], res9); + res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10); + _mm_storeu_ps(out + 4 * i + offsets[10], res10); + res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11); + _mm_storeu_ps(out + 4 * i + offsets[11], res11); + res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12); + _mm_storeu_ps(out + 4 * i + offsets[12], res12); + res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13); + _mm_storeu_ps(out + 4 * i + offsets[13], res13); + res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14); + _mm_storeu_ps(out + 4 * i + offsets[14], res14); + res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15); + _mm_storeu_ps(out + 4 * i + offsets[15], res15); } else { - _mm_storeu_ps(out + 4 * i + offsets[0], u0); - _mm_storeu_ps(out + 4 * i + offsets[1], u1); - _mm_storeu_ps(out + 4 * i + offsets[2], u2); - _mm_storeu_ps(out + 4 * i + offsets[3], u3); + _mm_storeu_ps(out + 4 * i + offsets[0], res0); + _mm_storeu_ps(out + 4 * i + offsets[1], res1); + _mm_storeu_ps(out + 4 * i + offsets[2], res2); + _mm_storeu_ps(out + 4 * i + offsets[3], res3); + _mm_storeu_ps(out + 4 * i + offsets[4], res4); + _mm_storeu_ps(out + 4 * i + offsets[5], res5); + _mm_storeu_ps(out + 4 * i + offsets[6], res6); + _mm_storeu_ps(out + 4 * i + offsets[7], res7); + _mm_storeu_ps(out + 4 * i + offsets[8], res8); + _mm_storeu_ps(out + 4 * i + offsets[9], res9); + _mm_storeu_ps(out + 4 * i + offsets[10], res10); + _mm_storeu_ps(out + 4 * i + offsets[11], res11); + _mm_storeu_ps(out + 4 * i + offsets[12], res12); + _mm_storeu_ps(out + 4 * i + offsets[13], res13); + _mm_storeu_ps(out + 4 * i + offsets[14], res14); + _mm_storeu_ps(out + 4 * i + offsets[15], res15); } } // remainder loop of work that does not divide by 4 if (add_into) for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 4; ++v) + for (unsigned int v = 0; v < 16; ++v) out[offsets[v] + i] += in[i][v]; else for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 4; ++v) + for (unsigned int v = 0; v < 16; ++v) out[offsets[v] + i] = in[i][v]; } /** - * Specialization for float and SSE2. + * Specialization for float and AVX-512. */ template <> inline DEAL_II_ALWAYS_INLINE void -vectorized_transpose_and_store(const bool add_into, - const unsigned int n_entries, - const VectorizedArray *in, - std::array & out) +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + std::array & out) { // see the comments in the vectorized_transpose_and_store above const unsigned int n_chunks = n_entries / 4; for (unsigned int i = 0; i < n_chunks; ++i) { - __m128 u0 = in[4 * i + 0].data; - __m128 u1 = in[4 * i + 1].data; - __m128 u2 = in[4 * i + 2].data; - __m128 u3 = in[4 * i + 3].data; - __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44); - __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee); - __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44); - __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee); - u0 = _mm_shuffle_ps(t0, t2, 0x88); - u1 = _mm_shuffle_ps(t0, t2, 0xdd); - u2 = _mm_shuffle_ps(t1, t3, 0x88); - u3 = _mm_shuffle_ps(t1, t3, 0xdd); + __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44); + __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee); + __m512 t2 = + _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44); + __m512 t3 = + _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee); + __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88); + __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd); + __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88); + __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd); + + __m128 res0 = _mm512_extractf32x4_ps(u0, 0); + __m128 res4 = _mm512_extractf32x4_ps(u0, 1); + __m128 res8 = _mm512_extractf32x4_ps(u0, 2); + __m128 res12 = _mm512_extractf32x4_ps(u0, 3); + __m128 res1 = _mm512_extractf32x4_ps(u1, 0); + __m128 res5 = _mm512_extractf32x4_ps(u1, 1); + __m128 res9 = _mm512_extractf32x4_ps(u1, 2); + __m128 res13 = _mm512_extractf32x4_ps(u1, 3); + __m128 res2 = _mm512_extractf32x4_ps(u2, 0); + __m128 res6 = _mm512_extractf32x4_ps(u2, 1); + __m128 res10 = _mm512_extractf32x4_ps(u2, 2); + __m128 res14 = _mm512_extractf32x4_ps(u2, 3); + __m128 res3 = _mm512_extractf32x4_ps(u3, 0); + __m128 res7 = _mm512_extractf32x4_ps(u3, 1); + __m128 res11 = _mm512_extractf32x4_ps(u3, 2); + __m128 res15 = _mm512_extractf32x4_ps(u3, 3); if (add_into) { - u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0); - _mm_storeu_ps(out[0] + 4 * i, u0); - u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1); - _mm_storeu_ps(out[1] + 4 * i, u1); - u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2); - _mm_storeu_ps(out[2] + 4 * i, u2); - u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3); - _mm_storeu_ps(out[3] + 4 * i, u3); + res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0); + _mm_storeu_ps(out[0] + 4 * i, res0); + res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1); + _mm_storeu_ps(out[1] + 4 * i, res1); + res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2); + _mm_storeu_ps(out[2] + 4 * i, res2); + res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3); + _mm_storeu_ps(out[3] + 4 * i, res3); + res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4); + _mm_storeu_ps(out[4] + 4 * i, res4); + res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5); + _mm_storeu_ps(out[5] + 4 * i, res5); + res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6); + _mm_storeu_ps(out[6] + 4 * i, res6); + res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7); + _mm_storeu_ps(out[7] + 4 * i, res7); + res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8); + _mm_storeu_ps(out[8] + 4 * i, res8); + res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9); + _mm_storeu_ps(out[9] + 4 * i, res9); + res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10); + _mm_storeu_ps(out[10] + 4 * i, res10); + res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11); + _mm_storeu_ps(out[11] + 4 * i, res11); + res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12); + _mm_storeu_ps(out[12] + 4 * i, res12); + res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13); + _mm_storeu_ps(out[13] + 4 * i, res13); + res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14); + _mm_storeu_ps(out[14] + 4 * i, res14); + res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15); + _mm_storeu_ps(out[15] + 4 * i, res15); } else { - _mm_storeu_ps(out[0] + 4 * i, u0); - _mm_storeu_ps(out[1] + 4 * i, u1); - _mm_storeu_ps(out[2] + 4 * i, u2); - _mm_storeu_ps(out[3] + 4 * i, u3); + _mm_storeu_ps(out[0] + 4 * i, res0); + _mm_storeu_ps(out[1] + 4 * i, res1); + _mm_storeu_ps(out[2] + 4 * i, res2); + _mm_storeu_ps(out[3] + 4 * i, res3); + _mm_storeu_ps(out[4] + 4 * i, res4); + _mm_storeu_ps(out[5] + 4 * i, res5); + _mm_storeu_ps(out[6] + 4 * i, res6); + _mm_storeu_ps(out[7] + 4 * i, res7); + _mm_storeu_ps(out[8] + 4 * i, res8); + _mm_storeu_ps(out[9] + 4 * i, res9); + _mm_storeu_ps(out[10] + 4 * i, res10); + _mm_storeu_ps(out[11] + 4 * i, res11); + _mm_storeu_ps(out[12] + 4 * i, res12); + _mm_storeu_ps(out[13] + 4 * i, res13); + _mm_storeu_ps(out[14] + 4 * i, res14); + _mm_storeu_ps(out[15] + 4 * i, res15); } } if (add_into) for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 4; ++v) + for (unsigned int v = 0; v < 16; ++v) out[v][i] += in[i][v]; else for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) - for (unsigned int v = 0; v < 4; ++v) + for (unsigned int v = 0; v < 16; ++v) out[v][i] = in[i][v]; } - - -# endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__) +# endif # if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \ defined(__VSX__) @@ -4945,76 +4977,6 @@ private: #endif // DOXYGEN -/** - * sum() functions. - */ - -#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__) -inline double -VectorizedArray::sum() -{ - __m128d t1 = _mm_unpackhi_pd(data, data); - __m128d t2 = _mm_add_pd(data, t1); - return _mm_cvtsd_f64(t2); -} - - - -inline float -VectorizedArray::sum() -{ - __m128 t1 = _mm_movehl_ps(data, data); - __m128 t2 = _mm_add_ps(data, t1); - __m128 t3 = _mm_shuffle_ps(t2, t2, 1); - __m128 t4 = _mm_add_ss(t2, t3); - return _mm_cvtss_f32(t4); -} -#endif - - - -#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__) -inline double -VectorizedArray::sum() -{ - VectorizedArray t1; - t1.data = _mm_add_pd(this->get_lower(), this->get_upper()); - return t1.sum(); -} - - - -inline float -VectorizedArray::sum() -{ - VectorizedArray t1; - t1.data = _mm_add_ps(this->get_lower(), this->get_upper()); - return t1.sum(); -} -#endif - - - -#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__) -inline double -VectorizedArray::sum() -{ - VectorizedArray t1; - t1.data = _mm256_add_pd(this->get_lower(), this->get_upper()); - return t1.sum(); -} - - - -inline float -VectorizedArray::sum() -{ - VectorizedArray t1; - t1.data = _mm256_add_ps(this->get_lower(), this->get_upper()); - return t1.sum(); -} -#endif - /**