From: Martin Kronbichler Date: Wed, 11 Jan 2017 10:46:30 +0000 (+0100) Subject: Gather and scatter operations in vectorized array X-Git-Tag: v8.5.0-rc1~247^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F3766%2Fhead;p=dealii.git Gather and scatter operations in vectorized array --- diff --git a/doc/news/changes/minor/20170111MartinKronbichler b/doc/news/changes/minor/20170111MartinKronbichler new file mode 100644 index 0000000000..cfc786cd17 --- /dev/null +++ b/doc/news/changes/minor/20170111MartinKronbichler @@ -0,0 +1,7 @@ +New: The class VectorizedArray has gained two new access functions, gather and +scatter, that map to more efficient hardware versions on new architectures +(AVX2 or AVX-512). Furthermore, support for AVX-512 has been improved by +providing optimized vectorized_load_and_transpose() and +vectorized_transpose_and_store(). +
+(Martin Kronbichler, 2017/01/11) diff --git a/include/deal.II/base/vectorization.h b/include/deal.II/base/vectorization.h index c651ca79ea..3e09d1d417 100644 --- a/include/deal.II/base/vectorization.h +++ b/include/deal.II/base/vectorization.h @@ -54,13 +54,13 @@ DEAL_II_NAMESPACE_CLOSE namespace std { - template ::dealii::VectorizedArray + template DEAL_II_ALWAYS_INLINE ::dealii::VectorizedArray sqrt(const ::dealii::VectorizedArray &); - template ::dealii::VectorizedArray + template DEAL_II_ALWAYS_INLINE ::dealii::VectorizedArray abs(const ::dealii::VectorizedArray &); - template ::dealii::VectorizedArray + template DEAL_II_ALWAYS_INLINE ::dealii::VectorizedArray max(const ::dealii::VectorizedArray &, const ::dealii::VectorizedArray &); - template ::dealii::VectorizedArray + template DEAL_II_ALWAYS_INLINE ::dealii::VectorizedArray min (const ::dealii::VectorizedArray &, const ::dealii::VectorizedArray &); } @@ -143,6 +143,7 @@ public: /** * This function assigns a scalar to this class. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator = (const Number scalar) { @@ -153,6 +154,7 @@ public: /** * Access operator (only valid with component 0) */ + DEAL_II_ALWAYS_INLINE Number & operator [] (const unsigned int comp) { @@ -164,6 +166,7 @@ public: /** * Constant access operator (only valid with component 0) */ + DEAL_II_ALWAYS_INLINE const Number & operator [] (const unsigned int comp) const { @@ -175,6 +178,7 @@ public: /** * Addition */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator += (const VectorizedArray &vec) { @@ -185,6 +189,7 @@ public: /** * Subtraction */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator -= (const VectorizedArray &vec) { @@ -195,6 +200,7 @@ public: /** * Multiplication */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator *= (const VectorizedArray &vec) { @@ -205,6 +211,7 @@ public: /** * Division */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator /= (const VectorizedArray &vec) { @@ -218,6 +225,7 @@ public: * in the vectorized array, as opposed to casting a double address to * VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void load (const Number *ptr) { data = *ptr; @@ -229,11 +237,50 @@ public: * the amount of bytes in the vectorized array, as opposed to casting a * double address to VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void store (Number *ptr) const { *ptr = data; } + /** + * Load @p n_array_elements from memory into the calling class, starting at + * the given address and with given offsets, each entry from the offset + * providing one element of the vectorized array. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * this->operator[](v) = base_ptr[offsets[v]]; + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void gather (const Number *base_ptr, + const unsigned int *offsets) + { + data = base_ptr[offsets[0]]; + } + + /** + * Write the content of the calling class into memory in form of @p + * n_array_elements to the given address and the given offsets, filling the + * elements of the vectorized array into each offset. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * base_ptr[offsets[v]] = this->operator[](v); + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void scatter (const unsigned int *offsets, + Number *base_ptr) const + { + base_ptr[offsets[0]] = data; + } + /** * Actual data field. Since this class represents a POD data type, it is * declared public. @@ -245,6 +292,7 @@ private: * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt () const { @@ -257,6 +305,7 @@ private: * Return the absolute value of this field. Not for use in user code. Use * abs(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_abs () const { @@ -269,6 +318,7 @@ private: * Return the component-wise maximum of this field and another one. Not for * use in user code. Use max(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_max (const VectorizedArray &other) const { @@ -281,6 +331,7 @@ private: * Return the component-wise minimum of this field and another one. Not for * use in user code. Use min(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_min (const VectorizedArray &other) const { @@ -311,7 +362,7 @@ private: * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray make_vectorized_array (const Number &u) { @@ -441,6 +492,7 @@ public: /** * This function can be used to set all data fields to a given scalar. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator = (const double x) { @@ -451,6 +503,7 @@ public: /** * Access operator. */ + DEAL_II_ALWAYS_INLINE double & operator [] (const unsigned int comp) { @@ -461,6 +514,7 @@ public: /** * Constant access operator. */ + DEAL_II_ALWAYS_INLINE const double & operator [] (const unsigned int comp) const { @@ -471,6 +525,7 @@ public: /** * Addition. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator += (const VectorizedArray &vec) { @@ -490,6 +545,7 @@ public: /** * Subtraction. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator -= (const VectorizedArray &vec) { @@ -503,6 +559,7 @@ public: /** * Multiplication. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator *= (const VectorizedArray &vec) { @@ -517,6 +574,7 @@ public: /** * Division. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator /= (const VectorizedArray &vec) { @@ -533,6 +591,7 @@ public: * the given address. The memory need not be aligned by 64 bytes, as opposed * to casting a double address to VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void load (const double *ptr) { data = _mm512_loadu_pd (ptr); @@ -544,11 +603,66 @@ public: * 64 bytes, as opposed to casting a double address to * VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void store (double *ptr) const { _mm512_storeu_pd (ptr, data); } + /** + * Load @p n_array_elements from memory into the calling class, starting at + * the given address and with given offsets, each entry from the offset + * providing one element of the vectorized array. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * this->operator[](v) = base_ptr[offsets[v]]; + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void gather (const double *base_ptr, + const unsigned int *offsets) + { + // unfortunately, there does not appear to be a 256 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m256 index_val = _mm256_loadu_ps((const float *)offsets); + const __m256i index = *((__m256i *)(&index_val)); + data = _mm512_i32gather_pd(index, base_ptr, 8); + } + + /** + * Write the content of the calling class into memory in form of @p + * n_array_elements to the given address and the given offsets, filling the + * elements of the vectorized array into each offset. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * base_ptr[offsets[v]] = this->operator[](v); + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void scatter (const unsigned int *offsets, + double *base_ptr) const + { + for (unsigned int i=0; i<8; ++i) + for (unsigned int j=i+1; j<8; ++j) + Assert(offsets[i] != offsets[j], + ExcMessage("Result of scatter undefined if two offset elements" + " point to the same position")); + + // unfortunately, there does not appear to be a 256 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m256 index_val = _mm256_loadu_ps((const float *)offsets); + const __m256i index = *((__m256i *)(&index_val)); + _mm512_i32scatter_pd(base_ptr, index, data, 8); + } + /** * Actual data field. Since this class represents a POD data type, it * remains public. @@ -560,6 +674,7 @@ private: * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt () const { @@ -572,6 +687,7 @@ private: * Return the absolute value of this field. Not for use in user code. Use * abs(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_abs () const { @@ -590,6 +706,7 @@ private: * Return the component-wise maximum of this field and another one. Not for * use in user code. Use max(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_max (const VectorizedArray &other) const { @@ -602,6 +719,7 @@ private: * Return the component-wise minimum of this field and another one. Not for * use in user code. Use min(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_min (const VectorizedArray &other) const { @@ -626,7 +744,121 @@ private: /** - * Specialization for float and AVX. + * Specialization for double and AVX-512. + */ +template <> +inline +void +vectorized_load_and_transpose(const unsigned int n_entries, + const double *in, + const unsigned int *offsets, + VectorizedArray *out) +{ + const unsigned int n_chunks = n_entries/4; + for (unsigned int outer=0; outer<8; outer += 4) + { + const double *in0 = in + offsets[0+outer]; + const double *in1 = in + offsets[1+outer]; + const double *in2 = in + offsets[2+outer]; + const double *in3 = in + offsets[3+outer]; + + for (unsigned int i=0; i +inline +void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + const unsigned int *offsets, + double *out) +{ + const unsigned int n_chunks = n_entries/4; + // do not do full transpose because the code is too long and will most + // likely not pay off. rather do the transposition on the vectorized array + // on size smaller, mm256d + for (unsigned int outer=0; outer<8; outer += 4) + { + double *out0 = out + offsets[0+outer]; + double *out1 = out + offsets[1+outer]; + double *out2 = out + offsets[2+outer]; + double *out3 = out + offsets[3+outer]; + for (unsigned int i=0; i class VectorizedArray @@ -640,6 +872,7 @@ public: /** * This function can be used to set all data fields to a given scalar. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator = (const float x) { @@ -650,6 +883,7 @@ public: /** * Access operator. */ + DEAL_II_ALWAYS_INLINE float & operator [] (const unsigned int comp) { @@ -660,6 +894,7 @@ public: /** * Constant access operator. */ + DEAL_II_ALWAYS_INLINE const float & operator [] (const unsigned int comp) const { @@ -670,6 +905,7 @@ public: /** * Addition. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator += (const VectorizedArray &vec) { @@ -689,6 +925,7 @@ public: /** * Subtraction. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator -= (const VectorizedArray &vec) { @@ -702,6 +939,7 @@ public: /** * Multiplication. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator *= (const VectorizedArray &vec) { @@ -716,6 +954,7 @@ public: /** * Division. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator /= (const VectorizedArray &vec) { @@ -732,6 +971,7 @@ public: * the given address. The memory need not be aligned by 64 bytes, as opposed * to casting a float address to VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void load (const float *ptr) { data = _mm512_loadu_ps (ptr); @@ -743,11 +983,66 @@ public: * 64 bytes, as opposed to casting a float address to * VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void store (float *ptr) const { _mm512_storeu_ps (ptr, data); } + /** + * Load @p n_array_elements from memory into the calling class, starting at + * the given address and with given offsets, each entry from the offset + * providing one element of the vectorized array. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * this->operator[](v) = base_ptr[offsets[v]]; + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void gather (const float *base_ptr, + const unsigned int *offsets) + { + // unfortunately, there does not appear to be a 512 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m512 index_val = _mm512_loadu_ps((const float *)offsets); + const __m512i index = *((__m512i *)(&index_val)); + data = _mm512_i32gather_ps(index, base_ptr, 4); + } + + /** + * Write the content of the calling class into memory in form of @p + * n_array_elements to the given address and the given offsets, filling the + * elements of the vectorized array into each offset. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * base_ptr[offsets[v]] = this->operator[](v); + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void scatter (const unsigned int *offsets, + float *base_ptr) const + { + for (unsigned int i=0; i<16; ++i) + for (unsigned int j=i+1; j<16; ++j) + Assert(offsets[i] != offsets[j], + ExcMessage("Result of scatter undefined if two offset elements" + " point to the same position")); + + // unfortunately, there does not appear to be a 512 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m512 index_val = _mm512_loadu_ps((const float *)offsets); + const __m512i index = *((__m512i *)(&index_val)); + _mm512_i32scatter_ps(base_ptr, index, data, 4); + } + /** * Actual data field. Since this class represents a POD data type, it * remains public. @@ -760,6 +1055,7 @@ private: * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt () const { @@ -772,6 +1068,7 @@ private: * Return the absolute value of this field. Not for use in user code. Use * abs(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_abs () const { @@ -790,6 +1087,7 @@ private: * Return the component-wise maximum of this field and another one. Not for * use in user code. Use max(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_max (const VectorizedArray &other) const { @@ -802,6 +1100,7 @@ private: * Return the component-wise minimum of this field and another one. Not for * use in user code. Use min(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_min (const VectorizedArray &other) const { @@ -825,6 +1124,143 @@ private: +/** + * Specialization for float and AVX-512. + */ +template <> +inline +void +vectorized_load_and_transpose(const unsigned int n_entries, + const float *in, + const unsigned int *offsets, + VectorizedArray *out) +{ + const unsigned int n_chunks = n_entries/4; + for (unsigned int outer = 0; outer<16; outer += 8) + { + for (unsigned int i=0; i +inline +void +vectorized_transpose_and_store(const bool add_into, + const unsigned int n_entries, + const VectorizedArray *in, + const unsigned int *offsets, + float *out) +{ + const unsigned int n_chunks = n_entries/4; + for (unsigned int outer = 0; outer<16; outer += 8) + { + for (unsigned int i=0; i= 2 && defined(__AVX__) /** @@ -842,6 +1278,7 @@ public: /** * This function can be used to set all data fields to a given scalar. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator = (const double x) { @@ -852,6 +1289,7 @@ public: /** * Access operator. */ + DEAL_II_ALWAYS_INLINE double & operator [] (const unsigned int comp) { @@ -862,6 +1300,7 @@ public: /** * Constant access operator. */ + DEAL_II_ALWAYS_INLINE const double & operator [] (const unsigned int comp) const { @@ -872,6 +1311,7 @@ public: /** * Addition. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator += (const VectorizedArray &vec) { @@ -891,6 +1331,7 @@ public: /** * Subtraction. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator -= (const VectorizedArray &vec) { @@ -904,6 +1345,7 @@ public: /** * Multiplication. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator *= (const VectorizedArray &vec) { @@ -918,6 +1360,7 @@ public: /** * Division. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator /= (const VectorizedArray &vec) { @@ -934,6 +1377,7 @@ public: * the given address. The memory need not be aligned by 32 bytes, as opposed * to casting a double address to VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void load (const double *ptr) { data = _mm256_loadu_pd (ptr); @@ -945,11 +1389,68 @@ public: * 32 bytes, as opposed to casting a double address to * VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void store (double *ptr) const { _mm256_storeu_pd (ptr, data); } + /** + * Load @p n_array_elements from memory into the calling class, starting at + * the given address and with given offsets, each entry from the offset + * providing one element of the vectorized array. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * this->operator[](v) = base_ptr[offsets[v]]; + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void gather (const double *base_ptr, + const unsigned int *offsets) + { +#ifdef __AVX2__ + // unfortunately, there does not appear to be a 128 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m128 index_val = _mm_loadu_ps((const float *)offsets); + const __m128i index = *((__m128i *)(&index_val)); + data = _mm256_i32gather_pd(base_ptr, index, 8); +#else + for (unsigned int i=0; i<4; ++i) + *(reinterpret_cast(&data)+i) = base_ptr[offsets[i]]; +#endif + } + + /** + * Write the content of the calling class into memory in form of @p + * n_array_elements to the given address and the given offsets, filling the + * elements of the vectorized array into each offset. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * base_ptr[offsets[v]] = this->operator[](v); + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void scatter (const unsigned int *offsets, + double *base_ptr) const + { + for (unsigned int i=0; i<4; ++i) + for (unsigned int j=i+1; j<4; ++j) + Assert(offsets[i] != offsets[j], + ExcMessage("Result of scatter undefined if two offset elements" + " point to the same position")); + + // no scatter operation in AVX/AVX2 + for (unsigned int i=0; i<4; ++i) + base_ptr[offsets[i]] = *(reinterpret_cast(&data)+i); + } + /** * Actual data field. Since this class represents a POD data type, it * remains public. @@ -961,6 +1462,7 @@ private: * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt () const { @@ -973,6 +1475,7 @@ private: * Return the absolute value of this field. Not for use in user code. Use * abs(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_abs () const { @@ -989,6 +1492,7 @@ private: * Return the component-wise maximum of this field and another one. Not for * use in user code. Use max(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_max (const VectorizedArray &other) const { @@ -1001,6 +1505,7 @@ private: * Return the component-wise minimum of this field and another one. Not for * use in user code. Use min(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_min (const VectorizedArray &other) const { @@ -1035,13 +1540,18 @@ vectorized_load_and_transpose(const unsigned int n_entries, const unsigned int *offsets, VectorizedArray *out) { - const unsigned int n_chunks = n_entries/4, remainder = n_entries%4; + const unsigned int n_chunks = n_entries/4; + const double *in0 = in + offsets[0]; + const double *in1 = in + offsets[1]; + const double *in2 = in + offsets[2]; + const double *in3 = in + offsets[3]; + for (unsigned int i=0; i 0 && n_chunks > 0) - { - // simple re-load all data in the last slot - const unsigned int final_pos = n_chunks*4-4+remainder; - Assert(final_pos+4 == n_entries, ExcInternalError()); - __m256d u0 = _mm256_loadu_pd(in+final_pos+offsets[0]); - __m256d u1 = _mm256_loadu_pd(in+final_pos+offsets[1]); - __m256d u2 = _mm256_loadu_pd(in+final_pos+offsets[2]); - __m256d u3 = _mm256_loadu_pd(in+final_pos+offsets[3]); - __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20); - __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20); - __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31); - __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31); - out[final_pos+0].data = _mm256_unpacklo_pd (t0, t1); - out[final_pos+1].data = _mm256_unpackhi_pd (t0, t1); - out[final_pos+2].data = _mm256_unpacklo_pd (t2, t3); - out[final_pos+3].data = _mm256_unpackhi_pd (t2, t3); - } - else if (remainder > 0) - for (unsigned int i=0; i*. */ + DEAL_II_ALWAYS_INLINE void load (const float *ptr) { data = _mm256_loadu_ps (ptr); @@ -1258,11 +1760,68 @@ public: * 32 bytes, as opposed to casting a float address to * VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void store (float *ptr) const { _mm256_storeu_ps (ptr, data); } + /** + * Load @p n_array_elements from memory into the calling class, starting at + * the given address and with given offsets, each entry from the offset + * providing one element of the vectorized array. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * this->operator[](v) = base_ptr[offsets[v]]; + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void gather (const float *base_ptr, + const unsigned int *offsets) + { +#ifdef __AVX2__ + // unfortunately, there does not appear to be a 256 bit integer load, so + // do it by some reinterpret casts here. this is allowed because the Intel + // API allows aliasing between different vector types. + const __m256 index_val = _mm256_loadu_ps((const float *)offsets); + const __m256i index = *((__m256i *)(&index_val)); + data = _mm256_i32gather_ps(base_ptr, index, 4); +#else + for (unsigned int i=0; i<8; ++i) + *(reinterpret_cast(&data)+i) = base_ptr[offsets[i]]; +#endif + } + + /** + * Write the content of the calling class into memory in form of @p + * n_array_elements to the given address and the given offsets, filling the + * elements of the vectorized array into each offset. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * base_ptr[offsets[v]] = this->operator[](v); + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void scatter (const unsigned int *offsets, + float *base_ptr) const + { + for (unsigned int i=0; i<8; ++i) + for (unsigned int j=i+1; j<8; ++j) + Assert(offsets[i] != offsets[j], + ExcMessage("Result of scatter undefined if two offset elements" + " point to the same position")); + + // no scatter operation in AVX/AVX2 + for (unsigned int i=0; i<8; ++i) + base_ptr[offsets[i]] = *(reinterpret_cast(&data)+i); + } + /** * Actual data field. Since this class represents a POD data type, it * remains public. @@ -1275,6 +1834,7 @@ private: * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt () const { @@ -1287,6 +1847,7 @@ private: * Return the absolute value of this field. Not for use in user code. Use * abs(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_abs () const { @@ -1303,6 +1864,7 @@ private: * Return the component-wise maximum of this field and another one. Not for * use in user code. Use max(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_max (const VectorizedArray &other) const { @@ -1315,6 +1877,7 @@ private: * Return the component-wise minimum of this field and another one. Not for * use in user code. Use min(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_min (const VectorizedArray &other) const { @@ -1339,7 +1902,7 @@ private: /** - * Specialization for double and AVX. + * Specialization for float and AVX. */ template <> inline @@ -1349,7 +1912,7 @@ vectorized_load_and_transpose(const unsigned int n_entries, const unsigned int *offsets, VectorizedArray *out) { - const unsigned int n_chunks = n_entries/4, remainder = n_entries%4; + const unsigned int n_chunks = n_entries/4; for (unsigned int i=0; i 0 && n_chunks > 0) - { - // simple re-load all data in the last slot - const unsigned int final_pos = n_chunks*4-4+remainder; - Assert(final_pos+4 == n_entries, ExcInternalError()); - __m128 u0 = _mm_loadu_ps(in+final_pos+offsets[0]); - __m128 u1 = _mm_loadu_ps(in+final_pos+offsets[1]); - __m128 u2 = _mm_loadu_ps(in+final_pos+offsets[2]); - __m128 u3 = _mm_loadu_ps(in+final_pos+offsets[3]); - __m128 u4 = _mm_loadu_ps(in+final_pos+offsets[4]); - __m128 u5 = _mm_loadu_ps(in+final_pos+offsets[5]); - __m128 u6 = _mm_loadu_ps(in+final_pos+offsets[6]); - __m128 u7 = _mm_loadu_ps(in+final_pos+offsets[7]); - __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F); - t0 = _mm256_insertf128_ps (t3, u0, 0); - t0 = _mm256_insertf128_ps (t0, u4, 1); - t1 = _mm256_insertf128_ps (t3, u1, 0); - t1 = _mm256_insertf128_ps (t1, u5, 1); - t2 = _mm256_insertf128_ps (t3, u2, 0); - t2 = _mm256_insertf128_ps (t2, u6, 1); - t3 = _mm256_insertf128_ps (t3, u3, 0); - t3 = _mm256_insertf128_ps (t3, u7, 1); - __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44); - __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee); - __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44); - __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee); - out[final_pos+0].data = _mm256_shuffle_ps (v0, v2, 0x88); - out[final_pos+1].data = _mm256_shuffle_ps (v0, v2, 0xdd); - out[final_pos+2].data = _mm256_shuffle_ps (v1, v3, 0x88); - out[final_pos+3].data = _mm256_shuffle_ps (v1, v3, 0xdd); - } - else if (remainder > 0) - for (unsigned int i=0; i inline @@ -1522,6 +2053,7 @@ public: /** * This function can be used to set all data fields to a given scalar. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator = (const double x) { @@ -1532,6 +2064,7 @@ public: /** * Access operator. */ + DEAL_II_ALWAYS_INLINE double & operator [] (const unsigned int comp) { @@ -1542,6 +2075,7 @@ public: /** * Constant access operator. */ + DEAL_II_ALWAYS_INLINE const double & operator [] (const unsigned int comp) const { @@ -1552,6 +2086,7 @@ public: /** * Addition. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator += (const VectorizedArray &vec) { @@ -1566,6 +2101,7 @@ public: /** * Subtraction. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator -= (const VectorizedArray &vec) { @@ -1576,9 +2112,11 @@ public: #endif return *this; } + /** * Multiplication. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator *= (const VectorizedArray &vec) { @@ -1593,6 +2131,7 @@ public: /** * Division. */ + DEAL_II_ALWAYS_INLINE VectorizedArray & operator /= (const VectorizedArray &vec) { @@ -1609,6 +2148,7 @@ public: * the given address. The memory need not be aligned by 16 bytes, as opposed * to casting a double address to VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void load (const double *ptr) { data = _mm_loadu_pd (ptr); @@ -1620,11 +2160,56 @@ public: * 16 bytes, as opposed to casting a double address to * VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void store (double *ptr) const { _mm_storeu_pd (ptr, data); } + /** + * Load @p n_array_elements from memory into the calling class, starting at + * the given address and with given offsets, each entry from the offset + * providing one element of the vectorized array. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * this->operator[](v) = base_ptr[offsets[v]]; + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void gather (const double *base_ptr, + const unsigned int *offsets) + { + for (unsigned int i=0; i<2; ++i) + *(reinterpret_cast(&data)+i) = base_ptr[offsets[i]]; + } + + /** + * Write the content of the calling class into memory in form of @p + * n_array_elements to the given address and the given offsets, filling the + * elements of the vectorized array into each offset. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * base_ptr[offsets[v]] = this->operator[](v); + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void scatter (const unsigned int *offsets, + double *base_ptr) const + { + Assert(offsets[0] != offsets[1], + ExcMessage("Result of scatter undefined if two offset elements" + " point to the same position")); + + for (unsigned int i=0; i<2; ++i) + base_ptr[offsets[i]] = *(reinterpret_cast(&data)+i); + } + /** * Actual data field. Since this class represents a POD data type, it * remains public. @@ -1636,6 +2221,7 @@ private: * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt () const { @@ -1648,6 +2234,7 @@ private: * Return the absolute value of this field. Not for use in user code. Use * abs(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_abs () const { @@ -1665,6 +2252,7 @@ private: * Return the component-wise maximum of this field and another one. Not for * use in user code. Use max(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_max (const VectorizedArray &other) const { @@ -1677,6 +2265,7 @@ private: * Return the component-wise minimum of this field and another one. Not for * use in user code. Use min(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_min (const VectorizedArray &other) const { @@ -1710,7 +2299,7 @@ void vectorized_load_and_transpose(const unsigned int n_entries, const unsigned int *offsets, VectorizedArray *out) { - const unsigned int n_chunks = n_entries/2, remainder = n_entries%2; + const unsigned int n_chunks = n_entries/2; for (unsigned int i=0; i 0) - for (unsigned int i=0; i inline @@ -1750,8 +2338,7 @@ vectorized_transpose_and_store(const bool add_into, _mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0)); _mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1)); } - const unsigned int shift = n_chunks * 2; - for (unsigned int i=shift; i*. */ + DEAL_II_ALWAYS_INLINE void load (const float *ptr) { data = _mm_loadu_ps (ptr); @@ -1890,11 +2484,58 @@ public: * 16 bytes, as opposed to casting a float address to * VectorizedArray*. */ + DEAL_II_ALWAYS_INLINE void store (float *ptr) const { _mm_storeu_ps (ptr, data); } + /** + * Load @p n_array_elements from memory into the calling class, starting at + * the given address and with given offsets, each entry from the offset + * providing one element of the vectorized array. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * this->operator[](v) = base_ptr[offsets[v]]; + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void gather (const float *base_ptr, + const unsigned int *offsets) + { + for (unsigned int i=0; i<4; ++i) + *(reinterpret_cast(&data)+i) = base_ptr[offsets[i]]; + } + + /** + * Write the content of the calling class into memory in form of @p + * n_array_elements to the given address and the given offsets, filling the + * elements of the vectorized array into each offset. + * + * This operation corresponds to the following code (but uses a more + * efficient implementation in case the hardware allows for that): + * @code + * for (unsigned int v=0; v::n_array_elements; ++v) + * base_ptr[offsets[v]] = this->operator[](v); + * @endcode + */ + DEAL_II_ALWAYS_INLINE + void scatter (const unsigned int *offsets, + float *base_ptr) const + { + for (unsigned int i=0; i<4; ++i) + for (unsigned int j=i+1; j<4; ++j) + Assert(offsets[i] != offsets[j], + ExcMessage("Result of scatter undefined if two offset elements" + " point to the same position")); + + for (unsigned int i=0; i<4; ++i) + base_ptr[offsets[i]] = *(reinterpret_cast(&data)+i); + } + /** * Actual data field. Since this class represents a POD data type, it * remains public. @@ -1906,6 +2547,7 @@ private: * Return the square root of this field. Not for use in user code. Use * sqrt(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_sqrt () const { @@ -1918,6 +2560,7 @@ private: * Return the absolute value of this field. Not for use in user code. Use * abs(x) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_abs () const { @@ -1934,6 +2577,7 @@ private: * Return the component-wise maximum of this field and another one. Not for * use in user code. Use max(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_max (const VectorizedArray &other) const { @@ -1946,6 +2590,7 @@ private: * Return the component-wise minimum of this field and another one. Not for * use in user code. Use min(x,y) instead. */ + DEAL_II_ALWAYS_INLINE VectorizedArray get_min (const VectorizedArray &other) const { @@ -1979,7 +2624,7 @@ void vectorized_load_and_transpose(const unsigned int n_entries, const unsigned int *offsets, VectorizedArray *out) { - const unsigned int n_chunks = n_entries/4, remainder = n_entries%4; + const unsigned int n_chunks = n_entries/4; for (unsigned int i=0; i 0 && n_chunks > 0) - { - // simple re-load all data in the last slot - const unsigned int final_pos = n_chunks*4-4+remainder; - Assert(final_pos+4 == n_entries, ExcInternalError()); - __m128 u0 = _mm_loadu_ps(in+final_pos+offsets[0]); - __m128 u1 = _mm_loadu_ps(in+final_pos+offsets[1]); - __m128 u2 = _mm_loadu_ps(in+final_pos+offsets[2]); - __m128 u3 = _mm_loadu_ps(in+final_pos+offsets[3]); - __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44); - __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee); - __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44); - __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee); - out[final_pos+0].data = _mm_shuffle_ps (v0, v2, 0x88); - out[final_pos+1].data = _mm_shuffle_ps (v0, v2, 0xdd); - out[final_pos+2].data = _mm_shuffle_ps (v1, v3, 0x88); - out[final_pos+3].data = _mm_shuffle_ps (v1, v3, 0xdd); - } - else if (remainder > 0) - for (unsigned int i=0; i 0) + for (unsigned int i=4*n_chunks; i inline @@ -2093,7 +2720,7 @@ vectorized_transpose_and_store(const bool add_into, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator + (const VectorizedArray &u, const VectorizedArray &v) @@ -2108,7 +2735,7 @@ operator + (const VectorizedArray &u, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator - (const VectorizedArray &u, const VectorizedArray &v) @@ -2123,7 +2750,7 @@ operator - (const VectorizedArray &u, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator * (const VectorizedArray &u, const VectorizedArray &v) @@ -2138,7 +2765,7 @@ operator * (const VectorizedArray &u, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator / (const VectorizedArray &u, const VectorizedArray &v) @@ -2154,7 +2781,7 @@ operator / (const VectorizedArray &u, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator + (const Number &u, const VectorizedArray &v) @@ -2172,7 +2799,7 @@ operator + (const Number &u, * * @relates VectorizedArray */ -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator + (const double &u, const VectorizedArray &v) @@ -2189,7 +2816,7 @@ operator + (const double &u, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator + (const VectorizedArray &v, const Number &u) @@ -2205,7 +2832,7 @@ operator + (const VectorizedArray &v, * * @relates VectorizedArray */ -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator + (const VectorizedArray &v, const double &u) @@ -2220,7 +2847,7 @@ operator + (const VectorizedArray &v, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator - (const Number &u, const VectorizedArray &v) @@ -2238,7 +2865,7 @@ operator - (const Number &u, * * @relates VectorizedArray */ -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator - (const double &u, const VectorizedArray &v) @@ -2255,7 +2882,7 @@ operator - (const double &u, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator - (const VectorizedArray &v, const Number &u) @@ -2273,7 +2900,7 @@ operator - (const VectorizedArray &v, * * @relates VectorizedArray */ -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator - (const VectorizedArray &v, const double &u) @@ -2290,7 +2917,7 @@ operator - (const VectorizedArray &v, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator * (const Number &u, const VectorizedArray &v) @@ -2308,7 +2935,7 @@ operator * (const Number &u, * * @relates VectorizedArray */ -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator * (const double &u, const VectorizedArray &v) @@ -2325,7 +2952,7 @@ operator * (const double &u, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator * (const VectorizedArray &v, const Number &u) @@ -2341,7 +2968,7 @@ operator * (const VectorizedArray &v, * * @relates VectorizedArray */ -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator * (const VectorizedArray &v, const double &u) @@ -2356,7 +2983,7 @@ operator * (const VectorizedArray &v, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator / (const Number &u, const VectorizedArray &v) @@ -2374,7 +3001,7 @@ operator / (const Number &u, * * @relates VectorizedArray */ -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator / (const double &u, const VectorizedArray &v) @@ -2391,7 +3018,7 @@ operator / (const double &u, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator / (const VectorizedArray &v, const Number &u) @@ -2409,7 +3036,7 @@ operator / (const VectorizedArray &v, * * @relates VectorizedArray */ -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator / (const VectorizedArray &v, const double &u) @@ -2425,7 +3052,7 @@ operator / (const VectorizedArray &v, * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator + (const VectorizedArray &u) { @@ -2438,7 +3065,7 @@ operator + (const VectorizedArray &u) * @relates VectorizedArray */ template -inline +inline DEAL_II_ALWAYS_INLINE VectorizedArray operator - (const VectorizedArray &u) { diff --git a/tests/base/vectorization_05.cc b/tests/base/vectorization_05.cc index 6654fd2cec..c711972991 100644 --- a/tests/base/vectorization_05.cc +++ b/tests/base/vectorization_05.cc @@ -105,7 +105,7 @@ int main() deallog.pop(); deallog.push("float"); test (); - test (); + test (); test (); deallog.pop(); diff --git a/tests/base/vectorization_05.output b/tests/base/vectorization_05.output index a345a3ff17..e3084abc99 100644 --- a/tests/base/vectorization_05.output +++ b/tests/base/vectorization_05.output @@ -11,9 +11,9 @@ DEAL:double::transpose_and_store (noadd) at n=32: #errors: 0 DEAL:float::load_and_transpose at n=1: #errors: 0 DEAL:float::transpose_and_store ( add) at n=1: #errors: 0 DEAL:float::transpose_and_store (noadd) at n=1: #errors: 0 -DEAL:float::load_and_transpose at n=9: #errors: 0 -DEAL:float::transpose_and_store ( add) at n=9: #errors: 0 -DEAL:float::transpose_and_store (noadd) at n=9: #errors: 0 +DEAL:float::load_and_transpose at n=17: #errors: 0 +DEAL:float::transpose_and_store ( add) at n=17: #errors: 0 +DEAL:float::transpose_and_store (noadd) at n=17: #errors: 0 DEAL:float::load_and_transpose at n=32: #errors: 0 DEAL:float::transpose_and_store ( add) at n=32: #errors: 0 DEAL:float::transpose_and_store (noadd) at n=32: #errors: 0 diff --git a/tests/base/vectorization_06.cc b/tests/base/vectorization_06.cc new file mode 100644 index 0000000000..dec85a9477 --- /dev/null +++ b/tests/base/vectorization_06.cc @@ -0,0 +1,143 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// test gather and scatter operations of vectorized array + +#include "../tests.h" +#include +#include + +#include +#include + + +template +void test () +{ + // since the number of array elements is system dependent, it is not a good + // idea to print them to an output file. Instead, check the values manually + std::vector vec(200); + for (unsigned int i=0; i::n_array_elements; + unsigned int indices[n_vectors]; + for (unsigned int i=0; i arr; + arr = Number(-1000); + + arr.gather(&vec[0], indices); + unsigned int n_errors = 0; + for (unsigned int i=0; i 0) + { + for (unsigned int j=0; j 0) + { + for (unsigned int j=0; j 0) + { + for (unsigned int j=0; j 0) + { + for (unsigned int j=0; j 0) + { + for (unsigned int i=0; i (); + deallog.pop(); + deallog.push("float"); + test (); + deallog.pop(); + + // test long double: in that case, the default + // path of VectorizedArray is taken no matter + // what was done for double or float + deallog.push("long double"); + test (); + deallog.pop(); +} diff --git a/tests/base/vectorization_06.output b/tests/base/vectorization_06.output new file mode 100644 index 0000000000..61b74fa817 --- /dev/null +++ b/tests/base/vectorization_06.output @@ -0,0 +1,16 @@ + +DEAL:double::gather contiguous #errors: 0 +DEAL:double::gather contiguous #errors: 0 +DEAL:double::gather non-contiguous #errors: 0 +DEAL:double::gather non-contiguous #errors: 0 +DEAL:double::scatter non-contiguous #errors: 0 +DEAL:float::gather contiguous #errors: 0 +DEAL:float::gather contiguous #errors: 0 +DEAL:float::gather non-contiguous #errors: 0 +DEAL:float::gather non-contiguous #errors: 0 +DEAL:float::scatter non-contiguous #errors: 0 +DEAL:long double::gather contiguous #errors: 0 +DEAL:long double::gather contiguous #errors: 0 +DEAL:long double::gather non-contiguous #errors: 0 +DEAL:long double::gather non-contiguous #errors: 0 +DEAL:long double::scatter non-contiguous #errors: 0