namespace std
{
- template <typename Number> ::dealii::VectorizedArray<Number>
+ template <typename Number> DEAL_II_ALWAYS_INLINE ::dealii::VectorizedArray<Number>
sqrt(const ::dealii::VectorizedArray<Number> &);
- template <typename Number> ::dealii::VectorizedArray<Number>
+ template <typename Number> DEAL_II_ALWAYS_INLINE ::dealii::VectorizedArray<Number>
abs(const ::dealii::VectorizedArray<Number> &);
- template <typename Number> ::dealii::VectorizedArray<Number>
+ template <typename Number> DEAL_II_ALWAYS_INLINE ::dealii::VectorizedArray<Number>
max(const ::dealii::VectorizedArray<Number> &, const ::dealii::VectorizedArray<Number> &);
- template <typename Number> ::dealii::VectorizedArray<Number>
+ template <typename Number> DEAL_II_ALWAYS_INLINE ::dealii::VectorizedArray<Number>
min (const ::dealii::VectorizedArray<Number> &, const ::dealii::VectorizedArray<Number> &);
}
/**
* This function assigns a scalar to this class.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator = (const Number scalar)
{
/**
* Access operator (only valid with component 0)
*/
+ DEAL_II_ALWAYS_INLINE
Number &
operator [] (const unsigned int comp)
{
/**
* Constant access operator (only valid with component 0)
*/
+ DEAL_II_ALWAYS_INLINE
const Number &
operator [] (const unsigned int comp) const
{
/**
* Addition
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator += (const VectorizedArray<Number> &vec)
{
/**
* Subtraction
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator -= (const VectorizedArray<Number> &vec)
{
/**
* Multiplication
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator *= (const VectorizedArray<Number> &vec)
{
/**
* Division
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator /= (const VectorizedArray<Number> &vec)
{
* in the vectorized array, as opposed to casting a double address to
* VectorizedArray<double>*.
*/
+ DEAL_II_ALWAYS_INLINE
void load (const Number *ptr)
{
data = *ptr;
* the amount of bytes in the vectorized array, as opposed to casting a
* double address to VectorizedArray<double>*.
*/
+ DEAL_II_ALWAYS_INLINE
void store (Number *ptr) const
{
*ptr = data;
}
+ /**
+ * Load @p n_array_elements from memory into the calling class, starting at
+ * the given address and with given offsets, each entry from the offset
+ * providing one element of the vectorized array.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * this->operator[](v) = base_ptr[offsets[v]];
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void gather (const Number *base_ptr,
+ const unsigned int *offsets)
+ {
+ data = base_ptr[offsets[0]];
+ }
+
+ /**
+ * Write the content of the calling class into memory in form of @p
+ * n_array_elements to the given address and the given offsets, filling the
+ * elements of the vectorized array into each offset.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * base_ptr[offsets[v]] = this->operator[](v);
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void scatter (const unsigned int *offsets,
+ Number *base_ptr) const
+ {
+ base_ptr[offsets[0]] = data;
+ }
+
/**
* Actual data field. Since this class represents a POD data type, it is
* declared public.
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_sqrt () const
{
* Return the absolute value of this field. Not for use in user code. Use
* abs(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_abs () const
{
* Return the component-wise maximum of this field and another one. Not for
* use in user code. Use max(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_max (const VectorizedArray &other) const
{
* Return the component-wise minimum of this field and another one. Not for
* use in user code. Use min(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_min (const VectorizedArray &other) const
{
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
make_vectorized_array (const Number &u)
{
/**
* This function can be used to set all data fields to a given scalar.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator = (const double x)
{
/**
* Access operator.
*/
+ DEAL_II_ALWAYS_INLINE
double &
operator [] (const unsigned int comp)
{
/**
* Constant access operator.
*/
+ DEAL_II_ALWAYS_INLINE
const double &
operator [] (const unsigned int comp) const
{
/**
* Addition.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator += (const VectorizedArray &vec)
{
/**
* Subtraction.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator -= (const VectorizedArray &vec)
{
/**
* Multiplication.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator *= (const VectorizedArray &vec)
{
/**
* Division.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator /= (const VectorizedArray &vec)
{
* the given address. The memory need not be aligned by 64 bytes, as opposed
* to casting a double address to VectorizedArray<double>*.
*/
+ DEAL_II_ALWAYS_INLINE
void load (const double *ptr)
{
data = _mm512_loadu_pd (ptr);
* 64 bytes, as opposed to casting a double address to
* VectorizedArray<double>*.
*/
+ DEAL_II_ALWAYS_INLINE
void store (double *ptr) const
{
_mm512_storeu_pd (ptr, data);
}
+ /**
+ * Load @p n_array_elements from memory into the calling class, starting at
+ * the given address and with given offsets, each entry from the offset
+ * providing one element of the vectorized array.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * this->operator[](v) = base_ptr[offsets[v]];
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void gather (const double *base_ptr,
+ const unsigned int *offsets)
+ {
+ // unfortunately, there does not appear to be a 256 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
+ const __m256i index = *((__m256i *)(&index_val));
+ data = _mm512_i32gather_pd(index, base_ptr, 8);
+ }
+
+ /**
+ * Write the content of the calling class into memory in form of @p
+ * n_array_elements to the given address and the given offsets, filling the
+ * elements of the vectorized array into each offset.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * base_ptr[offsets[v]] = this->operator[](v);
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void scatter (const unsigned int *offsets,
+ double *base_ptr) const
+ {
+ for (unsigned int i=0; i<8; ++i)
+ for (unsigned int j=i+1; j<8; ++j)
+ Assert(offsets[i] != offsets[j],
+ ExcMessage("Result of scatter undefined if two offset elements"
+ " point to the same position"));
+
+ // unfortunately, there does not appear to be a 256 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
+ const __m256i index = *((__m256i *)(&index_val));
+ _mm512_i32scatter_pd(base_ptr, index, data, 8);
+ }
+
/**
* Actual data field. Since this class represents a POD data type, it
* remains public.
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_sqrt () const
{
* Return the absolute value of this field. Not for use in user code. Use
* abs(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_abs () const
{
* Return the component-wise maximum of this field and another one. Not for
* use in user code. Use max(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_max (const VectorizedArray &other) const
{
* Return the component-wise minimum of this field and another one. Not for
* use in user code. Use min(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_min (const VectorizedArray &other) const
{
/**
- * Specialization for float and AVX.
+ * Specialization for double and AVX-512.
+ */
+template <>
+inline
+void
+vectorized_load_and_transpose(const unsigned int n_entries,
+ const double *in,
+ const unsigned int *offsets,
+ VectorizedArray<double> *out)
+{
+ const unsigned int n_chunks = n_entries/4;
+ for (unsigned int outer=0; outer<8; outer += 4)
+ {
+ const double *in0 = in + offsets[0+outer];
+ const double *in1 = in + offsets[1+outer];
+ const double *in2 = in + offsets[2+outer];
+ const double *in3 = in + offsets[3+outer];
+
+ for (unsigned int i=0; i<n_chunks; ++i)
+ {
+ __m256d u0 = _mm256_loadu_pd(in0+4*i);
+ __m256d u1 = _mm256_loadu_pd(in1+4*i);
+ __m256d u2 = _mm256_loadu_pd(in2+4*i);
+ __m256d u3 = _mm256_loadu_pd(in3+4*i);
+ __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
+ __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
+ __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
+ __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
+ *(__m256d *)((double *)(&out[4*i+0].data)+outer) = _mm256_unpacklo_pd (t0, t1);
+ *(__m256d *)((double *)(&out[4*i+1].data)+outer) = _mm256_unpackhi_pd (t0, t1);
+ *(__m256d *)((double *)(&out[4*i+2].data)+outer) = _mm256_unpacklo_pd (t2, t3);
+ *(__m256d *)((double *)(&out[4*i+3].data)+outer) = _mm256_unpackhi_pd (t2, t3);
+ }
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<4; ++v)
+ out[i][outer+v] = in[offsets[v+outer]+i];
+ }
+}
+
+
+
+/**
+ * Specialization for double and AVX-512.
+ */
+template <>
+inline
+void
+vectorized_transpose_and_store(const bool add_into,
+ const unsigned int n_entries,
+ const VectorizedArray<double> *in,
+ const unsigned int *offsets,
+ double *out)
+{
+ const unsigned int n_chunks = n_entries/4;
+ // do not do full transpose because the code is too long and will most
+ // likely not pay off. rather do the transposition on the vectorized array
+ // on size smaller, mm256d
+ for (unsigned int outer=0; outer<8; outer += 4)
+ {
+ double *out0 = out + offsets[0+outer];
+ double *out1 = out + offsets[1+outer];
+ double *out2 = out + offsets[2+outer];
+ double *out3 = out + offsets[3+outer];
+ for (unsigned int i=0; i<n_chunks; ++i)
+ {
+ __m256d u0 = *(const __m256d *)((const double *)(&in[4*i+0].data)+outer);
+ __m256d u1 = *(const __m256d *)((const double *)(&in[4*i+1].data)+outer);
+ __m256d u2 = *(const __m256d *)((const double *)(&in[4*i+2].data)+outer);
+ __m256d u3 = *(const __m256d *)((const double *)(&in[4*i+3].data)+outer);
+ __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
+ __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
+ __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
+ __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
+ __m256d res0 = _mm256_unpacklo_pd (t0, t1);
+ __m256d res1 = _mm256_unpackhi_pd (t0, t1);
+ __m256d res2 = _mm256_unpacklo_pd (t2, t3);
+ __m256d res3 = _mm256_unpackhi_pd (t2, t3);
+
+ // Cannot use the same store instructions in both paths of the 'if'
+ // because the compiler cannot know that there is no aliasing between
+ // pointers
+ if (add_into)
+ {
+ res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
+ _mm256_storeu_pd(out0+4*i, res0);
+ res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
+ _mm256_storeu_pd(out1+4*i, res1);
+ res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
+ _mm256_storeu_pd(out2+4*i, res2);
+ res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
+ _mm256_storeu_pd(out3+4*i, res3);
+ }
+ else
+ {
+ _mm256_storeu_pd(out0+4*i, res0);
+ _mm256_storeu_pd(out1+4*i, res1);
+ _mm256_storeu_pd(out2+4*i, res2);
+ _mm256_storeu_pd(out3+4*i, res3);
+ }
+ }
+ if (add_into)
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<4; ++v)
+ out[offsets[v+outer]+i] += in[i][v+outer];
+ else
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<4; ++v)
+ out[offsets[v+outer]+i] = in[i][v+outer];
+ }
+}
+
+
+
+/**
+ * Specialization for float and AVX512.
*/
template<>
class VectorizedArray<float>
/**
* This function can be used to set all data fields to a given scalar.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator = (const float x)
{
/**
* Access operator.
*/
+ DEAL_II_ALWAYS_INLINE
float &
operator [] (const unsigned int comp)
{
/**
* Constant access operator.
*/
+ DEAL_II_ALWAYS_INLINE
const float &
operator [] (const unsigned int comp) const
{
/**
* Addition.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator += (const VectorizedArray &vec)
{
/**
* Subtraction.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator -= (const VectorizedArray &vec)
{
/**
* Multiplication.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator *= (const VectorizedArray &vec)
{
/**
* Division.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator /= (const VectorizedArray &vec)
{
* the given address. The memory need not be aligned by 64 bytes, as opposed
* to casting a float address to VectorizedArray<float>*.
*/
+ DEAL_II_ALWAYS_INLINE
void load (const float *ptr)
{
data = _mm512_loadu_ps (ptr);
* 64 bytes, as opposed to casting a float address to
* VectorizedArray<float>*.
*/
+ DEAL_II_ALWAYS_INLINE
void store (float *ptr) const
{
_mm512_storeu_ps (ptr, data);
}
+ /**
+ * Load @p n_array_elements from memory into the calling class, starting at
+ * the given address and with given offsets, each entry from the offset
+ * providing one element of the vectorized array.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * this->operator[](v) = base_ptr[offsets[v]];
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void gather (const float *base_ptr,
+ const unsigned int *offsets)
+ {
+ // unfortunately, there does not appear to be a 512 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
+ const __m512i index = *((__m512i *)(&index_val));
+ data = _mm512_i32gather_ps(index, base_ptr, 4);
+ }
+
+ /**
+ * Write the content of the calling class into memory in form of @p
+ * n_array_elements to the given address and the given offsets, filling the
+ * elements of the vectorized array into each offset.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * base_ptr[offsets[v]] = this->operator[](v);
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void scatter (const unsigned int *offsets,
+ float *base_ptr) const
+ {
+ for (unsigned int i=0; i<16; ++i)
+ for (unsigned int j=i+1; j<16; ++j)
+ Assert(offsets[i] != offsets[j],
+ ExcMessage("Result of scatter undefined if two offset elements"
+ " point to the same position"));
+
+ // unfortunately, there does not appear to be a 512 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
+ const __m512i index = *((__m512i *)(&index_val));
+ _mm512_i32scatter_ps(base_ptr, index, data, 4);
+ }
+
/**
* Actual data field. Since this class represents a POD data type, it
* remains public.
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_sqrt () const
{
* Return the absolute value of this field. Not for use in user code. Use
* abs(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_abs () const
{
* Return the component-wise maximum of this field and another one. Not for
* use in user code. Use max(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_max (const VectorizedArray &other) const
{
* Return the component-wise minimum of this field and another one. Not for
* use in user code. Use min(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_min (const VectorizedArray &other) const
{
+/**
+ * Specialization for float and AVX-512.
+ */
+template <>
+inline
+void
+vectorized_load_and_transpose(const unsigned int n_entries,
+ const float *in,
+ const unsigned int *offsets,
+ VectorizedArray<float> *out)
+{
+ const unsigned int n_chunks = n_entries/4;
+ for (unsigned int outer = 0; outer<16; outer += 8)
+ {
+ for (unsigned int i=0; i<n_chunks; ++i)
+ {
+ __m128 u0 = _mm_loadu_ps(in+4*i+offsets[0+outer]);
+ __m128 u1 = _mm_loadu_ps(in+4*i+offsets[1+outer]);
+ __m128 u2 = _mm_loadu_ps(in+4*i+offsets[2+outer]);
+ __m128 u3 = _mm_loadu_ps(in+4*i+offsets[3+outer]);
+ __m128 u4 = _mm_loadu_ps(in+4*i+offsets[4+outer]);
+ __m128 u5 = _mm_loadu_ps(in+4*i+offsets[5+outer]);
+ __m128 u6 = _mm_loadu_ps(in+4*i+offsets[6+outer]);
+ __m128 u7 = _mm_loadu_ps(in+4*i+offsets[7+outer]);
+ // To avoid warnings about uninitialized variables, need to initialize
+ // one variable with zero before using it.
+ __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
+ t0 = _mm256_insertf128_ps (t3, u0, 0);
+ t0 = _mm256_insertf128_ps (t0, u4, 1);
+ t1 = _mm256_insertf128_ps (t3, u1, 0);
+ t1 = _mm256_insertf128_ps (t1, u5, 1);
+ t2 = _mm256_insertf128_ps (t3, u2, 0);
+ t2 = _mm256_insertf128_ps (t2, u6, 1);
+ t3 = _mm256_insertf128_ps (t3, u3, 0);
+ t3 = _mm256_insertf128_ps (t3, u7, 1);
+ __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
+ __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
+ __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
+ __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
+ *(__m256 *)((float *)(&out[4*i+0].data)+outer) = _mm256_shuffle_ps (v0, v2, 0x88);
+ *(__m256 *)((float *)(&out[4*i+1].data)+outer) = _mm256_shuffle_ps (v0, v2, 0xdd);
+ *(__m256 *)((float *)(&out[4*i+2].data)+outer) = _mm256_shuffle_ps (v1, v3, 0x88);
+ *(__m256 *)((float *)(&out[4*i+3].data)+outer) = _mm256_shuffle_ps (v1, v3, 0xdd);
+ }
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<8; ++v)
+ out[i][v+outer] = in[offsets[v+outer]+i];
+ }
+}
+
+
+
+/**
+ * Specialization for float and AVX-512.
+ */
+template <>
+inline
+void
+vectorized_transpose_and_store(const bool add_into,
+ const unsigned int n_entries,
+ const VectorizedArray<float> *in,
+ const unsigned int *offsets,
+ float *out)
+{
+ const unsigned int n_chunks = n_entries/4;
+ for (unsigned int outer = 0; outer<16; outer += 8)
+ {
+ for (unsigned int i=0; i<n_chunks; ++i)
+ {
+ __m256 u0 = *(const __m256 *)((const float *)(&in[4*i+0].data)+outer);
+ __m256 u1 = *(const __m256 *)((const float *)(&in[4*i+1].data)+outer);
+ __m256 u2 = *(const __m256 *)((const float *)(&in[4*i+2].data)+outer);
+ __m256 u3 = *(const __m256 *)((const float *)(&in[4*i+3].data)+outer);
+ __m256 t0 = _mm256_shuffle_ps (u0, u1, 0x44);
+ __m256 t1 = _mm256_shuffle_ps (u0, u1, 0xee);
+ __m256 t2 = _mm256_shuffle_ps (u2, u3, 0x44);
+ __m256 t3 = _mm256_shuffle_ps (u2, u3, 0xee);
+ u0 = _mm256_shuffle_ps (t0, t2, 0x88);
+ u1 = _mm256_shuffle_ps (t0, t2, 0xdd);
+ u2 = _mm256_shuffle_ps (t1, t3, 0x88);
+ u3 = _mm256_shuffle_ps (t1, t3, 0xdd);
+ __m128 res0 = _mm256_extractf128_ps (u0, 0);
+ __m128 res4 = _mm256_extractf128_ps (u0, 1);
+ __m128 res1 = _mm256_extractf128_ps (u1, 0);
+ __m128 res5 = _mm256_extractf128_ps (u1, 1);
+ __m128 res2 = _mm256_extractf128_ps (u2, 0);
+ __m128 res6 = _mm256_extractf128_ps (u2, 1);
+ __m128 res3 = _mm256_extractf128_ps (u3, 0);
+ __m128 res7 = _mm256_extractf128_ps (u3, 1);
+
+ // Cannot use the same store instructions in both paths of the 'if'
+ // because the compiler cannot know that there is no aliasing between
+ // pointers
+ if (add_into)
+ {
+ res0 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[0+outer]), res0);
+ _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
+ res1 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[1+outer]), res1);
+ _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
+ res2 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[2+outer]), res2);
+ _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
+ res3 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[3+outer]), res3);
+ _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
+ res4 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[4+outer]), res4);
+ _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
+ res5 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[5+outer]), res5);
+ _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
+ res6 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[6+outer]), res6);
+ _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
+ res7 = _mm_add_ps(_mm_loadu_ps(out+4*i+offsets[7+outer]), res7);
+ _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
+ }
+ else
+ {
+ _mm_storeu_ps(out+4*i+offsets[0+outer], res0);
+ _mm_storeu_ps(out+4*i+offsets[1+outer], res1);
+ _mm_storeu_ps(out+4*i+offsets[2+outer], res2);
+ _mm_storeu_ps(out+4*i+offsets[3+outer], res3);
+ _mm_storeu_ps(out+4*i+offsets[4+outer], res4);
+ _mm_storeu_ps(out+4*i+offsets[5+outer], res5);
+ _mm_storeu_ps(out+4*i+offsets[6+outer], res6);
+ _mm_storeu_ps(out+4*i+offsets[7+outer], res7);
+ }
+ }
+ if (add_into)
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<8; ++v)
+ out[offsets[v+outer]+i] += in[i][v+outer];
+ else
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<8; ++v)
+ out[offsets[v+outer]+i] = in[i][v+outer];
+ }
+}
+
+
+
#elif DEAL_II_COMPILER_VECTORIZATION_LEVEL >= 2 && defined(__AVX__)
/**
/**
* This function can be used to set all data fields to a given scalar.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator = (const double x)
{
/**
* Access operator.
*/
+ DEAL_II_ALWAYS_INLINE
double &
operator [] (const unsigned int comp)
{
/**
* Constant access operator.
*/
+ DEAL_II_ALWAYS_INLINE
const double &
operator [] (const unsigned int comp) const
{
/**
* Addition.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator += (const VectorizedArray &vec)
{
/**
* Subtraction.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator -= (const VectorizedArray &vec)
{
/**
* Multiplication.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator *= (const VectorizedArray &vec)
{
/**
* Division.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator /= (const VectorizedArray &vec)
{
* the given address. The memory need not be aligned by 32 bytes, as opposed
* to casting a double address to VectorizedArray<double>*.
*/
+ DEAL_II_ALWAYS_INLINE
void load (const double *ptr)
{
data = _mm256_loadu_pd (ptr);
* 32 bytes, as opposed to casting a double address to
* VectorizedArray<double>*.
*/
+ DEAL_II_ALWAYS_INLINE
void store (double *ptr) const
{
_mm256_storeu_pd (ptr, data);
}
+ /**
+ * Load @p n_array_elements from memory into the calling class, starting at
+ * the given address and with given offsets, each entry from the offset
+ * providing one element of the vectorized array.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * this->operator[](v) = base_ptr[offsets[v]];
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void gather (const double *base_ptr,
+ const unsigned int *offsets)
+ {
+#ifdef __AVX2__
+ // unfortunately, there does not appear to be a 128 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m128 index_val = _mm_loadu_ps((const float *)offsets);
+ const __m128i index = *((__m128i *)(&index_val));
+ data = _mm256_i32gather_pd(base_ptr, index, 8);
+#else
+ for (unsigned int i=0; i<4; ++i)
+ *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
+#endif
+ }
+
+ /**
+ * Write the content of the calling class into memory in form of @p
+ * n_array_elements to the given address and the given offsets, filling the
+ * elements of the vectorized array into each offset.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * base_ptr[offsets[v]] = this->operator[](v);
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void scatter (const unsigned int *offsets,
+ double *base_ptr) const
+ {
+ for (unsigned int i=0; i<4; ++i)
+ for (unsigned int j=i+1; j<4; ++j)
+ Assert(offsets[i] != offsets[j],
+ ExcMessage("Result of scatter undefined if two offset elements"
+ " point to the same position"));
+
+ // no scatter operation in AVX/AVX2
+ for (unsigned int i=0; i<4; ++i)
+ base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
+ }
+
/**
* Actual data field. Since this class represents a POD data type, it
* remains public.
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_sqrt () const
{
* Return the absolute value of this field. Not for use in user code. Use
* abs(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_abs () const
{
* Return the component-wise maximum of this field and another one. Not for
* use in user code. Use max(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_max (const VectorizedArray &other) const
{
* Return the component-wise minimum of this field and another one. Not for
* use in user code. Use min(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_min (const VectorizedArray &other) const
{
const unsigned int *offsets,
VectorizedArray<double> *out)
{
- const unsigned int n_chunks = n_entries/4, remainder = n_entries%4;
+ const unsigned int n_chunks = n_entries/4;
+ const double *in0 = in + offsets[0];
+ const double *in1 = in + offsets[1];
+ const double *in2 = in + offsets[2];
+ const double *in3 = in + offsets[3];
+
for (unsigned int i=0; i<n_chunks; ++i)
{
- __m256d u0 = _mm256_loadu_pd(in+4*i+offsets[0]);
- __m256d u1 = _mm256_loadu_pd(in+4*i+offsets[1]);
- __m256d u2 = _mm256_loadu_pd(in+4*i+offsets[2]);
- __m256d u3 = _mm256_loadu_pd(in+4*i+offsets[3]);
+ __m256d u0 = _mm256_loadu_pd(in0+4*i);
+ __m256d u1 = _mm256_loadu_pd(in1+4*i);
+ __m256d u2 = _mm256_loadu_pd(in2+4*i);
+ __m256d u3 = _mm256_loadu_pd(in3+4*i);
__m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
__m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
__m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
out[4*i+2].data = _mm256_unpacklo_pd (t2, t3);
out[4*i+3].data = _mm256_unpackhi_pd (t2, t3);
}
- if (remainder > 0 && n_chunks > 0)
- {
- // simple re-load all data in the last slot
- const unsigned int final_pos = n_chunks*4-4+remainder;
- Assert(final_pos+4 == n_entries, ExcInternalError());
- __m256d u0 = _mm256_loadu_pd(in+final_pos+offsets[0]);
- __m256d u1 = _mm256_loadu_pd(in+final_pos+offsets[1]);
- __m256d u2 = _mm256_loadu_pd(in+final_pos+offsets[2]);
- __m256d u3 = _mm256_loadu_pd(in+final_pos+offsets[3]);
- __m256d t0 = _mm256_permute2f128_pd (u0, u2, 0x20);
- __m256d t1 = _mm256_permute2f128_pd (u1, u3, 0x20);
- __m256d t2 = _mm256_permute2f128_pd (u0, u2, 0x31);
- __m256d t3 = _mm256_permute2f128_pd (u1, u3, 0x31);
- out[final_pos+0].data = _mm256_unpacklo_pd (t0, t1);
- out[final_pos+1].data = _mm256_unpackhi_pd (t0, t1);
- out[final_pos+2].data = _mm256_unpacklo_pd (t2, t3);
- out[final_pos+3].data = _mm256_unpackhi_pd (t2, t3);
- }
- else if (remainder > 0)
- for (unsigned int i=0; i<n_entries; ++i)
- for (unsigned int v=0; v<4; ++v)
- out[i][v] = in[offsets[v]+i];
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<4; ++v)
+ out[i][v] = in[offsets[v]+i];
}
double *out)
{
const unsigned int n_chunks = n_entries/4;
+ double *out0 = out + offsets[0];
+ double *out1 = out + offsets[1];
+ double *out2 = out + offsets[2];
+ double *out3 = out + offsets[3];
for (unsigned int i=0; i<n_chunks; ++i)
{
__m256d u0 = in[4*i+0].data;
// pointers
if (add_into)
{
- res0 = _mm256_add_pd(_mm256_loadu_pd(out+4*i+offsets[0]), res0);
- _mm256_storeu_pd(out+4*i+offsets[0], res0);
- res1 = _mm256_add_pd(_mm256_loadu_pd(out+4*i+offsets[1]), res1);
- _mm256_storeu_pd(out+4*i+offsets[1], res1);
- res2 = _mm256_add_pd(_mm256_loadu_pd(out+4*i+offsets[2]), res2);
- _mm256_storeu_pd(out+4*i+offsets[2], res2);
- res3 = _mm256_add_pd(_mm256_loadu_pd(out+4*i+offsets[3]), res3);
- _mm256_storeu_pd(out+4*i+offsets[3], res3);
+ res0 = _mm256_add_pd(_mm256_loadu_pd(out0+4*i), res0);
+ _mm256_storeu_pd(out0+4*i, res0);
+ res1 = _mm256_add_pd(_mm256_loadu_pd(out1+4*i), res1);
+ _mm256_storeu_pd(out1+4*i, res1);
+ res2 = _mm256_add_pd(_mm256_loadu_pd(out2+4*i), res2);
+ _mm256_storeu_pd(out2+4*i, res2);
+ res3 = _mm256_add_pd(_mm256_loadu_pd(out3+4*i), res3);
+ _mm256_storeu_pd(out3+4*i, res3);
}
else
{
- _mm256_storeu_pd(out+4*i+offsets[0], res0);
- _mm256_storeu_pd(out+4*i+offsets[1], res1);
- _mm256_storeu_pd(out+4*i+offsets[2], res2);
- _mm256_storeu_pd(out+4*i+offsets[3], res3);
+ _mm256_storeu_pd(out0+4*i, res0);
+ _mm256_storeu_pd(out1+4*i, res1);
+ _mm256_storeu_pd(out2+4*i, res2);
+ _mm256_storeu_pd(out3+4*i, res3);
}
}
- const unsigned int shift = n_chunks * 4;
if (add_into)
- for (unsigned int i=shift; i<n_entries; ++i)
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
for (unsigned int v=0; v<4; ++v)
out[offsets[v]+i] += in[i][v];
else
- for (unsigned int i=shift; i<n_entries; ++i)
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
for (unsigned int v=0; v<4; ++v)
out[offsets[v]+i] = in[i][v];
}
/**
* This function can be used to set all data fields to a given scalar.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator = (const float x)
{
/**
* Access operator.
*/
+ DEAL_II_ALWAYS_INLINE
float &
operator [] (const unsigned int comp)
{
/**
* Constant access operator.
*/
+ DEAL_II_ALWAYS_INLINE
const float &
operator [] (const unsigned int comp) const
{
/**
* Addition.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator += (const VectorizedArray &vec)
{
/**
* Subtraction.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator -= (const VectorizedArray &vec)
{
/**
* Multiplication.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator *= (const VectorizedArray &vec)
{
/**
* Division.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator /= (const VectorizedArray &vec)
{
* the given address. The memory need not be aligned by 32 bytes, as opposed
* to casting a float address to VectorizedArray<float>*.
*/
+ DEAL_II_ALWAYS_INLINE
void load (const float *ptr)
{
data = _mm256_loadu_ps (ptr);
* 32 bytes, as opposed to casting a float address to
* VectorizedArray<float>*.
*/
+ DEAL_II_ALWAYS_INLINE
void store (float *ptr) const
{
_mm256_storeu_ps (ptr, data);
}
+ /**
+ * Load @p n_array_elements from memory into the calling class, starting at
+ * the given address and with given offsets, each entry from the offset
+ * providing one element of the vectorized array.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * this->operator[](v) = base_ptr[offsets[v]];
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void gather (const float *base_ptr,
+ const unsigned int *offsets)
+ {
+#ifdef __AVX2__
+ // unfortunately, there does not appear to be a 256 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
+ const __m256i index = *((__m256i *)(&index_val));
+ data = _mm256_i32gather_ps(base_ptr, index, 4);
+#else
+ for (unsigned int i=0; i<8; ++i)
+ *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
+#endif
+ }
+
+ /**
+ * Write the content of the calling class into memory in form of @p
+ * n_array_elements to the given address and the given offsets, filling the
+ * elements of the vectorized array into each offset.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * base_ptr[offsets[v]] = this->operator[](v);
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void scatter (const unsigned int *offsets,
+ float *base_ptr) const
+ {
+ for (unsigned int i=0; i<8; ++i)
+ for (unsigned int j=i+1; j<8; ++j)
+ Assert(offsets[i] != offsets[j],
+ ExcMessage("Result of scatter undefined if two offset elements"
+ " point to the same position"));
+
+ // no scatter operation in AVX/AVX2
+ for (unsigned int i=0; i<8; ++i)
+ base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
+ }
+
/**
* Actual data field. Since this class represents a POD data type, it
* remains public.
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_sqrt () const
{
* Return the absolute value of this field. Not for use in user code. Use
* abs(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_abs () const
{
* Return the component-wise maximum of this field and another one. Not for
* use in user code. Use max(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_max (const VectorizedArray &other) const
{
* Return the component-wise minimum of this field and another one. Not for
* use in user code. Use min(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_min (const VectorizedArray &other) const
{
/**
- * Specialization for double and AVX.
+ * Specialization for float and AVX.
*/
template <>
inline
const unsigned int *offsets,
VectorizedArray<float> *out)
{
- const unsigned int n_chunks = n_entries/4, remainder = n_entries%4;
+ const unsigned int n_chunks = n_entries/4;
for (unsigned int i=0; i<n_chunks; ++i)
{
__m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
out[4*i+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
out[4*i+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
}
- if (remainder > 0 && n_chunks > 0)
- {
- // simple re-load all data in the last slot
- const unsigned int final_pos = n_chunks*4-4+remainder;
- Assert(final_pos+4 == n_entries, ExcInternalError());
- __m128 u0 = _mm_loadu_ps(in+final_pos+offsets[0]);
- __m128 u1 = _mm_loadu_ps(in+final_pos+offsets[1]);
- __m128 u2 = _mm_loadu_ps(in+final_pos+offsets[2]);
- __m128 u3 = _mm_loadu_ps(in+final_pos+offsets[3]);
- __m128 u4 = _mm_loadu_ps(in+final_pos+offsets[4]);
- __m128 u5 = _mm_loadu_ps(in+final_pos+offsets[5]);
- __m128 u6 = _mm_loadu_ps(in+final_pos+offsets[6]);
- __m128 u7 = _mm_loadu_ps(in+final_pos+offsets[7]);
- __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
- t0 = _mm256_insertf128_ps (t3, u0, 0);
- t0 = _mm256_insertf128_ps (t0, u4, 1);
- t1 = _mm256_insertf128_ps (t3, u1, 0);
- t1 = _mm256_insertf128_ps (t1, u5, 1);
- t2 = _mm256_insertf128_ps (t3, u2, 0);
- t2 = _mm256_insertf128_ps (t2, u6, 1);
- t3 = _mm256_insertf128_ps (t3, u3, 0);
- t3 = _mm256_insertf128_ps (t3, u7, 1);
- __m256 v0 = _mm256_shuffle_ps (t0, t1, 0x44);
- __m256 v1 = _mm256_shuffle_ps (t0, t1, 0xee);
- __m256 v2 = _mm256_shuffle_ps (t2, t3, 0x44);
- __m256 v3 = _mm256_shuffle_ps (t2, t3, 0xee);
- out[final_pos+0].data = _mm256_shuffle_ps (v0, v2, 0x88);
- out[final_pos+1].data = _mm256_shuffle_ps (v0, v2, 0xdd);
- out[final_pos+2].data = _mm256_shuffle_ps (v1, v3, 0x88);
- out[final_pos+3].data = _mm256_shuffle_ps (v1, v3, 0xdd);
- }
- else if (remainder > 0)
- for (unsigned int i=0; i<n_entries; ++i)
- for (unsigned int v=0; v<8; ++v)
- out[i][v] = in[offsets[v]+i];
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<8; ++v)
+ out[i][v] = in[offsets[v]+i];
}
/**
- * Specialization for double and AVX.
+ * Specialization for float and AVX.
*/
template <>
inline
/**
* This function can be used to set all data fields to a given scalar.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator = (const double x)
{
/**
* Access operator.
*/
+ DEAL_II_ALWAYS_INLINE
double &
operator [] (const unsigned int comp)
{
/**
* Constant access operator.
*/
+ DEAL_II_ALWAYS_INLINE
const double &
operator [] (const unsigned int comp) const
{
/**
* Addition.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator += (const VectorizedArray &vec)
{
/**
* Subtraction.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator -= (const VectorizedArray &vec)
{
#endif
return *this;
}
+
/**
* Multiplication.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator *= (const VectorizedArray &vec)
{
/**
* Division.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator /= (const VectorizedArray &vec)
{
* the given address. The memory need not be aligned by 16 bytes, as opposed
* to casting a double address to VectorizedArray<double>*.
*/
+ DEAL_II_ALWAYS_INLINE
void load (const double *ptr)
{
data = _mm_loadu_pd (ptr);
* 16 bytes, as opposed to casting a double address to
* VectorizedArray<double>*.
*/
+ DEAL_II_ALWAYS_INLINE
void store (double *ptr) const
{
_mm_storeu_pd (ptr, data);
}
+ /**
+ * Load @p n_array_elements from memory into the calling class, starting at
+ * the given address and with given offsets, each entry from the offset
+ * providing one element of the vectorized array.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * this->operator[](v) = base_ptr[offsets[v]];
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void gather (const double *base_ptr,
+ const unsigned int *offsets)
+ {
+ for (unsigned int i=0; i<2; ++i)
+ *(reinterpret_cast<double *>(&data)+i) = base_ptr[offsets[i]];
+ }
+
+ /**
+ * Write the content of the calling class into memory in form of @p
+ * n_array_elements to the given address and the given offsets, filling the
+ * elements of the vectorized array into each offset.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * base_ptr[offsets[v]] = this->operator[](v);
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void scatter (const unsigned int *offsets,
+ double *base_ptr) const
+ {
+ Assert(offsets[0] != offsets[1],
+ ExcMessage("Result of scatter undefined if two offset elements"
+ " point to the same position"));
+
+ for (unsigned int i=0; i<2; ++i)
+ base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data)+i);
+ }
+
/**
* Actual data field. Since this class represents a POD data type, it
* remains public.
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_sqrt () const
{
* Return the absolute value of this field. Not for use in user code. Use
* abs(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_abs () const
{
* Return the component-wise maximum of this field and another one. Not for
* use in user code. Use max(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_max (const VectorizedArray &other) const
{
* Return the component-wise minimum of this field and another one. Not for
* use in user code. Use min(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_min (const VectorizedArray &other) const
{
const unsigned int *offsets,
VectorizedArray<double> *out)
{
- const unsigned int n_chunks = n_entries/2, remainder = n_entries%2;
+ const unsigned int n_chunks = n_entries/2;
for (unsigned int i=0; i<n_chunks; ++i)
{
__m128d u0 = _mm_loadu_pd(in+2*i+offsets[0]);
out[2*i+0].data = _mm_unpacklo_pd (u0, u1);
out[2*i+1].data = _mm_unpackhi_pd (u0, u1);
}
- if (remainder > 0)
- for (unsigned int i=0; i<n_entries; ++i)
- for (unsigned int v=0; v<2; ++v)
- out[i][v] = in[offsets[v]+i];
+ for (unsigned int i=2*n_chunks; i<n_entries; ++i)
+ for (unsigned int v=0; v<2; ++v)
+ out[i][v] = in[offsets[v]+i];
}
/**
- * Specialization for double and AVX.
+ * Specialization for double and SSE2.
*/
template <>
inline
_mm_storeu_pd(out+2*i+offsets[0], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[0]), res0));
_mm_storeu_pd(out+2*i+offsets[1], _mm_add_pd(_mm_loadu_pd(out+2*i+offsets[1]), res1));
}
- const unsigned int shift = n_chunks * 2;
- for (unsigned int i=shift; i<n_entries; ++i)
+ for (unsigned int i=2*n_chunks; i<n_entries; ++i)
for (unsigned int v=0; v<2; ++v)
out[offsets[v]+i] += in[i][v];
}
_mm_storeu_pd(out+2*i+offsets[0], res0);
_mm_storeu_pd(out+2*i+offsets[1], res1);
}
- const unsigned int shift = n_chunks * 2;
- for (unsigned int i=shift; i<n_entries; ++i)
+ for (unsigned int i=2*n_chunks; i<n_entries; ++i)
for (unsigned int v=0; v<2; ++v)
out[offsets[v]+i] = in[i][v];
}
* This function can be used to set all data fields to a given scalar.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator = (const float x)
{
/**
* Access operator.
*/
+ DEAL_II_ALWAYS_INLINE
float &
operator [] (const unsigned int comp)
{
/**
* Constant access operator.
*/
+ DEAL_II_ALWAYS_INLINE
const float &
operator [] (const unsigned int comp) const
{
/**
* Addition.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator += (const VectorizedArray &vec)
{
/**
* Subtraction.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator -= (const VectorizedArray &vec)
{
/**
* Multiplication.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator *= (const VectorizedArray &vec)
{
/**
* Division.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator /= (const VectorizedArray &vec)
{
* the given address. The memory need not be aligned by 16 bytes, as opposed
* to casting a float address to VectorizedArray<float>*.
*/
+ DEAL_II_ALWAYS_INLINE
void load (const float *ptr)
{
data = _mm_loadu_ps (ptr);
* 16 bytes, as opposed to casting a float address to
* VectorizedArray<float>*.
*/
+ DEAL_II_ALWAYS_INLINE
void store (float *ptr) const
{
_mm_storeu_ps (ptr, data);
}
+ /**
+ * Load @p n_array_elements from memory into the calling class, starting at
+ * the given address and with given offsets, each entry from the offset
+ * providing one element of the vectorized array.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * this->operator[](v) = base_ptr[offsets[v]];
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void gather (const float *base_ptr,
+ const unsigned int *offsets)
+ {
+ for (unsigned int i=0; i<4; ++i)
+ *(reinterpret_cast<float *>(&data)+i) = base_ptr[offsets[i]];
+ }
+
+ /**
+ * Write the content of the calling class into memory in form of @p
+ * n_array_elements to the given address and the given offsets, filling the
+ * elements of the vectorized array into each offset.
+ *
+ * This operation corresponds to the following code (but uses a more
+ * efficient implementation in case the hardware allows for that):
+ * @code
+ * for (unsigned int v=0; v<VectorizedArray<Number>::n_array_elements; ++v)
+ * base_ptr[offsets[v]] = this->operator[](v);
+ * @endcode
+ */
+ DEAL_II_ALWAYS_INLINE
+ void scatter (const unsigned int *offsets,
+ float *base_ptr) const
+ {
+ for (unsigned int i=0; i<4; ++i)
+ for (unsigned int j=i+1; j<4; ++j)
+ Assert(offsets[i] != offsets[j],
+ ExcMessage("Result of scatter undefined if two offset elements"
+ " point to the same position"));
+
+ for (unsigned int i=0; i<4; ++i)
+ base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data)+i);
+ }
+
/**
* Actual data field. Since this class represents a POD data type, it
* remains public.
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_sqrt () const
{
* Return the absolute value of this field. Not for use in user code. Use
* abs(x) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_abs () const
{
* Return the component-wise maximum of this field and another one. Not for
* use in user code. Use max(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_max (const VectorizedArray &other) const
{
* Return the component-wise minimum of this field and another one. Not for
* use in user code. Use min(x,y) instead.
*/
+ DEAL_II_ALWAYS_INLINE
VectorizedArray
get_min (const VectorizedArray &other) const
{
const unsigned int *offsets,
VectorizedArray<float> *out)
{
- const unsigned int n_chunks = n_entries/4, remainder = n_entries%4;
+ const unsigned int n_chunks = n_entries/4;
for (unsigned int i=0; i<n_chunks; ++i)
{
__m128 u0 = _mm_loadu_ps(in+4*i+offsets[0]);
out[4*i+2].data = _mm_shuffle_ps (v1, v3, 0x88);
out[4*i+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
}
- if (remainder > 0 && n_chunks > 0)
- {
- // simple re-load all data in the last slot
- const unsigned int final_pos = n_chunks*4-4+remainder;
- Assert(final_pos+4 == n_entries, ExcInternalError());
- __m128 u0 = _mm_loadu_ps(in+final_pos+offsets[0]);
- __m128 u1 = _mm_loadu_ps(in+final_pos+offsets[1]);
- __m128 u2 = _mm_loadu_ps(in+final_pos+offsets[2]);
- __m128 u3 = _mm_loadu_ps(in+final_pos+offsets[3]);
- __m128 v0 = _mm_shuffle_ps (u0, u1, 0x44);
- __m128 v1 = _mm_shuffle_ps (u0, u1, 0xee);
- __m128 v2 = _mm_shuffle_ps (u2, u3, 0x44);
- __m128 v3 = _mm_shuffle_ps (u2, u3, 0xee);
- out[final_pos+0].data = _mm_shuffle_ps (v0, v2, 0x88);
- out[final_pos+1].data = _mm_shuffle_ps (v0, v2, 0xdd);
- out[final_pos+2].data = _mm_shuffle_ps (v1, v3, 0x88);
- out[final_pos+3].data = _mm_shuffle_ps (v1, v3, 0xdd);
- }
- else if (remainder > 0)
- for (unsigned int i=0; i<n_entries; ++i)
+ if (remainder > 0)
+ for (unsigned int i=4*n_chunks; i<n_entries; ++i)
for (unsigned int v=0; v<4; ++v)
out[i][v] = in[offsets[v]+i];
}
/**
- * Specialization for double and AVX.
+ * Specialization for float and SSE2.
*/
template <>
inline
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator + (const VectorizedArray<Number> &u,
const VectorizedArray<Number> &v)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator - (const VectorizedArray<Number> &u,
const VectorizedArray<Number> &v)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator * (const VectorizedArray<Number> &u,
const VectorizedArray<Number> &v)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator / (const VectorizedArray<Number> &u,
const VectorizedArray<Number> &v)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator + (const Number &u,
const VectorizedArray<Number> &v)
*
* @relates VectorizedArray
*/
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<float>
operator + (const double &u,
const VectorizedArray<float> &v)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator + (const VectorizedArray<Number> &v,
const Number &u)
*
* @relates VectorizedArray
*/
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<float>
operator + (const VectorizedArray<float> &v,
const double &u)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator - (const Number &u,
const VectorizedArray<Number> &v)
*
* @relates VectorizedArray
*/
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<float>
operator - (const double &u,
const VectorizedArray<float> &v)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator - (const VectorizedArray<Number> &v,
const Number &u)
*
* @relates VectorizedArray
*/
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<float>
operator - (const VectorizedArray<float> &v,
const double &u)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator * (const Number &u,
const VectorizedArray<Number> &v)
*
* @relates VectorizedArray
*/
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<float>
operator * (const double &u,
const VectorizedArray<float> &v)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator * (const VectorizedArray<Number> &v,
const Number &u)
*
* @relates VectorizedArray
*/
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<float>
operator * (const VectorizedArray<float> &v,
const double &u)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator / (const Number &u,
const VectorizedArray<Number> &v)
*
* @relates VectorizedArray
*/
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<float>
operator / (const double &u,
const VectorizedArray<float> &v)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator / (const VectorizedArray<Number> &v,
const Number &u)
*
* @relates VectorizedArray
*/
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<float>
operator / (const VectorizedArray<float> &v,
const double &u)
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator + (const VectorizedArray<Number> &u)
{
* @relates VectorizedArray
*/
template <typename Number>
-inline
+inline DEAL_II_ALWAYS_INLINE
VectorizedArray<Number>
operator - (const VectorizedArray<Number> &u)
{