#ifndef DOXYGEN
-// for safety, also check that __AVX512F__ is defined in case the user manually
-// set some conflicting compile flags which prevent compilation
-
-# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
+# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
/**
- * Specialization of VectorizedArray class for double and AVX-512.
+ * Specialization for double and SSE2.
*/
template <>
-class VectorizedArray<double, 8>
- : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
+class VectorizedArray<double, 2>
+ : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
{
public:
/**
*/
template <typename U>
VectorizedArray(const std::initializer_list<U> &list)
- : VectorizedArrayBase<VectorizedArray<double, 8>, 8>(list)
+ : VectorizedArrayBase<VectorizedArray<double, 2>, 2>(list)
{}
/**
VectorizedArray &
operator=(const double x) &
{
- data = _mm512_set1_pd(x);
+ data = _mm_set1_pd(x);
return *this;
}
-
/**
* Assign a scalar to the current object. This overload is used for
* rvalue references; because it does not make sense to assign
double &
operator[](const unsigned int comp)
{
- AssertIndexRange(comp, 8);
+ AssertIndexRange(comp, 2);
return *(reinterpret_cast<double *>(&data) + comp);
}
const double &
operator[](const unsigned int comp) const
{
- AssertIndexRange(comp, 8);
+ AssertIndexRange(comp, 2);
return *(reinterpret_cast<const double *>(&data) + comp);
}
VectorizedArray &
operator+=(const VectorizedArray &vec)
{
- // if the compiler supports vector arithmetic, we can simply use +=
- // operator on the given data type. this allows the compiler to combine
- // additions with multiplication (fused multiply-add) if those
- // instructions are available. Otherwise, we need to use the built-in
- // intrinsic command for __m512d
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data += vec.data;
# else
- data = _mm512_add_pd(data, vec.data);
+ data = _mm_add_pd(data, vec.data);
# endif
return *this;
}
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data -= vec.data;
# else
- data = _mm512_sub_pd(data, vec.data);
+ data = _mm_sub_pd(data, vec.data);
# endif
return *this;
}
+
/**
* Multiplication.
*/
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data *= vec.data;
# else
- data = _mm512_mul_pd(data, vec.data);
+ data = _mm_mul_pd(data, vec.data);
# endif
return *this;
}
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data /= vec.data;
# else
- data = _mm512_div_pd(data, vec.data);
+ data = _mm_div_pd(data, vec.data);
# endif
return *this;
}
/**
- * Load size() data items from memory into the calling class, starting at
- * the given address. The memory need not be aligned by 64 bytes, as opposed
+ * Load @p size() from memory into the calling class, starting at
+ * the given address. The memory need not be aligned by 16 bytes, as opposed
* to casting a double address to VectorizedArray<double>*.
*/
DEAL_II_ALWAYS_INLINE
void
load(const double *ptr)
{
- data = _mm512_loadu_pd(ptr);
+ data = _mm_loadu_pd(ptr);
}
DEAL_II_ALWAYS_INLINE
void
load(const float *ptr)
{
- data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int i = 0; i < 2; ++i)
+ data[i] = ptr[i];
}
/**
* Write the content of the calling class into memory in form of @p
* size() to the given address. The memory need not be aligned by
- * 64 bytes, as opposed to casting a double address to
+ * 16 bytes, as opposed to casting a double address to
* VectorizedArray<double>*.
*/
DEAL_II_ALWAYS_INLINE
void
store(double *ptr) const
{
- _mm512_storeu_pd(ptr, data);
+ _mm_storeu_pd(ptr, data);
}
DEAL_II_ALWAYS_INLINE
void
store(float *ptr) const
{
- _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data));
+ DEAL_II_OPENMP_SIMD_PRAGMA
+ for (unsigned int i = 0; i < 2; ++i)
+ ptr[i] = data[i];
}
/**
* @copydoc VectorizedArray<Number>::streaming_store()
- * @note Memory must be aligned by 64 bytes.
+ * @note Memory must be aligned by 16 bytes.
*/
DEAL_II_ALWAYS_INLINE
void
streaming_store(double *ptr) const
{
- Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
+ Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
ExcMessage("Memory not aligned"));
- _mm512_stream_pd(ptr, data);
+ _mm_stream_pd(ptr, data);
}
/**
void
gather(const double *base_ptr, const unsigned int *offsets)
{
- // unfortunately, there does not appear to be a 256 bit integer load, so
- // do it by some reinterpret casts here. this is allowed because the Intel
- // API allows aliasing between different vector types.
- const __m256 index_val =
- _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
- const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
-
- // work around a warning with gcc-12 about an uninitialized initial state
- // for gather by starting with a zero guess, even though all lanes will be
- // overwritten
- __m512d zero = {};
- __mmask8 mask = 0xFF;
-
- data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
+ for (unsigned int i = 0; i < 2; ++i)
+ *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
}
/**
void
scatter(const unsigned int *offsets, double *base_ptr) const
{
- for (unsigned int i = 0; i < 8; ++i)
- for (unsigned int j = i + 1; j < 8; ++j)
- Assert(offsets[i] != offsets[j],
- ExcMessage("Result of scatter undefined if two offset elements"
- " point to the same position"));
-
- // unfortunately, there does not appear to be a 256 bit integer load, so
- // do it by some reinterpret casts here. this is allowed because the Intel
- // API allows aliasing between different vector types.
- const __m256 index_val =
- _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
- const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
- _mm512_i32scatter_pd(base_ptr, index, data, 8);
+ for (unsigned int i = 0; i < 2; ++i)
+ base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
}
/**
* this->data[i]$.
*/
double
- sum();
+ sum()
+ {
+ __m128d t1 = _mm_unpackhi_pd(data, data);
+ __m128d t2 = _mm_add_pd(data, t1);
+ return _mm_cvtsd_f64(t2);
+ }
/**
* Actual data field. To be consistent with the standard layout type and to
* enable interaction with external SIMD functionality, this member is
* declared public.
*/
- __m512d data;
+ __m128d data;
private:
- /**
- * Extract lower half of data field.
- */
- DEAL_II_ALWAYS_INLINE
- __m256d
- get_lower() const
- {
- return _mm512_castpd512_pd256(data);
- }
-
- /**
- * Extract upper half of data field.
- */
- DEAL_II_ALWAYS_INLINE
- __m256d
- get_upper() const
- {
- return _mm512_extractf64x4_pd(data, 1);
- }
-
/**
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
get_sqrt() const
{
VectorizedArray res;
- res.data = _mm512_sqrt_pd(data);
+ res.data = _mm_sqrt_pd(data);
return res;
}
VectorizedArray
get_abs() const
{
- // to compute the absolute value, perform bitwise andnot with -0. This
- // will leave all value and exponent bits unchanged but force the sign
- // value to +. Since there is no andnot for AVX512, we interpret the data
- // as 64 bit integers and do the andnot on those types (note that andnot
- // is a bitwise operation so the data type does not matter)
- __m512d mask = _mm512_set1_pd(-0.);
+ // to compute the absolute value, perform
+ // bitwise andnot with -0. This will leave all
+ // value and exponent bits unchanged but force
+ // the sign value to +.
+ __m128d mask = _mm_set1_pd(-0.);
VectorizedArray res;
- res.data = reinterpret_cast<__m512d>(
- _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
- reinterpret_cast<__m512i>(data)));
+ res.data = _mm_andnot_pd(mask, data);
return res;
}
get_max(const VectorizedArray &other) const
{
VectorizedArray res;
- res.data = _mm512_max_pd(data, other.data);
+ res.data = _mm_max_pd(data, other.data);
return res;
}
get_min(const VectorizedArray &other) const
{
VectorizedArray res;
- res.data = _mm512_min_pd(data, other.data);
+ res.data = _mm_min_pd(data, other.data);
return res;
}
/**
- * Specialization for double and AVX-512.
+ * Specialization for double and SSE2.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const double * in,
const unsigned int * offsets,
- VectorizedArray<double, 8> *out)
+ VectorizedArray<double, 2> *out)
{
- // do not do full transpose because the code is long and will most
- // likely not pay off because many processors have two load units
- // (for the top 8 instructions) but only 1 permute unit (for the 8
- // shuffle/unpack instructions). rather start the transposition on the
- // vectorized array of half the size with 256 bits
- const unsigned int n_chunks = n_entries / 4;
+ const unsigned int n_chunks = n_entries / 2;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m512d t0, t1, t2, t3 = {};
-
- t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
- t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
- t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
- t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
- t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
- t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
- t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
- t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
-
- __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
- __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
- __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
- __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
- out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
- out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
- out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
- out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
+ __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
+ __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
+ out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
+ out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
}
- // remainder loop of work that does not divide by 4
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- out[i].gather(in + i, offsets);
+
+ // remainder loop of work that does not divide by 2
+ for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 2; ++v)
+ out[i][v] = in[offsets[v] + i];
}
/**
- * Specialization for double and AVX-512.
+ * Specialization for double and SSE2.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
- const std::array<double *, 8> &in,
- VectorizedArray<double, 8> * out)
+ const std::array<double *, 2> &in,
+ VectorizedArray<double, 2> * out)
{
- const unsigned int n_chunks = n_entries / 4;
+ // see the comments in the vectorized_load_and_transpose above
+
+ const unsigned int n_chunks = n_entries / 2;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m512d t0, t1, t2, t3 = {};
-
- t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
- t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
- t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
- t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
- t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
- t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
- t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
- t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
-
- __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
- __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
- __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
- __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
- out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
- out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
- out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
- out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
+ __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
+ __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
+ out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
+ out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
}
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- gather(out[i], in, i);
+ for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 2; ++v)
+ out[i][v] = in[v][i];
}
/**
- * Specialization for double and AVX-512.
+ * Specialization for double and SSE2.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
- const VectorizedArray<double, 8> *in,
+ const VectorizedArray<double, 2> *in,
const unsigned int * offsets,
double * out)
{
- // as for the load, we split the store operations into 256 bit units to
- // better balance between code size, shuffle instructions, and stores
- const unsigned int n_chunks = n_entries / 4;
- __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
- __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
- for (unsigned int i = 0; i < n_chunks; ++i)
+ const unsigned int n_chunks = n_entries / 2;
+ if (add_into)
{
- __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
- __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
- __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
- __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
- __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
- __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
- __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
- __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
- __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
- __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
- __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
- __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
- __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
- __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
- __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
- __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
-
- // Cannot use the same store instructions in both paths of the 'if'
- // because the compiler cannot know that there is no aliasing
- // between pointers
- if (add_into)
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
- _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
- res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
- _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
- res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
- _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
- res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
- _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
- res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
- _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
- res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
- _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
- res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
- _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
- res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
- _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
+ __m128d u0 = in[2 * i + 0].data;
+ __m128d u1 = in[2 * i + 1].data;
+ __m128d res0 = _mm_unpacklo_pd(u0, u1);
+ __m128d res1 = _mm_unpackhi_pd(u0, u1);
+ _mm_storeu_pd(out + 2 * i + offsets[0],
+ _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
+ res0));
+ _mm_storeu_pd(out + 2 * i + offsets[1],
+ _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
+ res1));
}
- else
+ // remainder loop of work that does not divide by 2
+ for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 2; ++v)
+ out[offsets[v] + i] += in[i][v];
+ }
+ else
+ {
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
- _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
- _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
- _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
- _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
- _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
- _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
- _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
+ __m128d u0 = in[2 * i + 0].data;
+ __m128d u1 = in[2 * i + 1].data;
+ __m128d res0 = _mm_unpacklo_pd(u0, u1);
+ __m128d res1 = _mm_unpackhi_pd(u0, u1);
+ _mm_storeu_pd(out + 2 * i + offsets[0], res0);
+ _mm_storeu_pd(out + 2 * i + offsets[1], res1);
}
+ // remainder loop of work that does not divide by 2
+ for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 2; ++v)
+ out[offsets[v] + i] = in[i][v];
}
-
- // remainder loop of work that does not divide by 4
- if (add_into)
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 8; ++v)
- out[offsets[v] + i] += in[i][v];
- else
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 8; ++v)
- out[offsets[v] + i] = in[i][v];
}
/**
- * Specialization for double and AVX-512.
+ * Specialization for double and SSE2.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
- const VectorizedArray<double, 8> *in,
- std::array<double *, 8> & out)
+ const VectorizedArray<double, 2> *in,
+ std::array<double *, 2> & out)
{
// see the comments in the vectorized_transpose_and_store above
- const unsigned int n_chunks = n_entries / 4;
- __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
- __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
- for (unsigned int i = 0; i < n_chunks; ++i)
+ const unsigned int n_chunks = n_entries / 2;
+ if (add_into)
{
- __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
- __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
- __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
- __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
- __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
- __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
- __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
- __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
- __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
- __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
- __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
- __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
- __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
- __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
- __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
- __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
-
- if (add_into)
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
- _mm256_storeu_pd(out[0] + 4 * i, res0);
- res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
- _mm256_storeu_pd(out[1] + 4 * i, res1);
- res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
- _mm256_storeu_pd(out[2] + 4 * i, res2);
- res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
- _mm256_storeu_pd(out[3] + 4 * i, res3);
- res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
- _mm256_storeu_pd(out[4] + 4 * i, res4);
- res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
- _mm256_storeu_pd(out[5] + 4 * i, res5);
- res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
- _mm256_storeu_pd(out[6] + 4 * i, res6);
- res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
- _mm256_storeu_pd(out[7] + 4 * i, res7);
+ __m128d u0 = in[2 * i + 0].data;
+ __m128d u1 = in[2 * i + 1].data;
+ __m128d res0 = _mm_unpacklo_pd(u0, u1);
+ __m128d res1 = _mm_unpackhi_pd(u0, u1);
+ _mm_storeu_pd(out[0] + 2 * i,
+ _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
+ _mm_storeu_pd(out[1] + 2 * i,
+ _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
}
- else
+
+ for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 2; ++v)
+ out[v][i] += in[i][v];
+ }
+ else
+ {
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- _mm256_storeu_pd(out[0] + 4 * i, res0);
- _mm256_storeu_pd(out[1] + 4 * i, res1);
- _mm256_storeu_pd(out[2] + 4 * i, res2);
- _mm256_storeu_pd(out[3] + 4 * i, res3);
- _mm256_storeu_pd(out[4] + 4 * i, res4);
- _mm256_storeu_pd(out[5] + 4 * i, res5);
- _mm256_storeu_pd(out[6] + 4 * i, res6);
- _mm256_storeu_pd(out[7] + 4 * i, res7);
+ __m128d u0 = in[2 * i + 0].data;
+ __m128d u1 = in[2 * i + 1].data;
+ __m128d res0 = _mm_unpacklo_pd(u0, u1);
+ __m128d res1 = _mm_unpackhi_pd(u0, u1);
+ _mm_storeu_pd(out[0] + 2 * i, res0);
+ _mm_storeu_pd(out[1] + 2 * i, res1);
}
- }
- if (add_into)
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 8; ++v)
- out[v][i] += in[i][v];
- else
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 8; ++v)
- out[v][i] = in[i][v];
+ for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 2; ++v)
+ out[v][i] = in[i][v];
+ }
}
/**
- * Specialization for float and AVX512.
+ * Specialization for float and SSE2.
*/
template <>
-class VectorizedArray<float, 16>
- : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
+class VectorizedArray<float, 4>
+ : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
{
public:
/**
*/
using value_type = float;
+ /**
+ * This function can be used to set all data fields to a given scalar.
+ */
+
/**
* Default empty constructor, leaving the data in an uninitialized state
* similar to float/double.
*/
template <typename U>
VectorizedArray(const std::initializer_list<U> &list)
- : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
+ : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
{}
- /**
- * This function can be used to set all data fields to a given scalar.
- */
DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator=(const float x) &
{
- data = _mm512_set1_ps(x);
+ data = _mm_set1_ps(x);
return *this;
}
float &
operator[](const unsigned int comp)
{
- AssertIndexRange(comp, 16);
+ AssertIndexRange(comp, 4);
return *(reinterpret_cast<float *>(&data) + comp);
}
const float &
operator[](const unsigned int comp) const
{
- AssertIndexRange(comp, 16);
+ AssertIndexRange(comp, 4);
return *(reinterpret_cast<const float *>(&data) + comp);
}
VectorizedArray &
operator+=(const VectorizedArray &vec)
{
- // if the compiler supports vector arithmetic, we can simply use +=
- // operator on the given data type. this allows the compiler to combine
- // additions with multiplication (fused multiply-add) if those
- // instructions are available. Otherwise, we need to use the built-in
- // intrinsic command for __m512d
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data += vec.data;
# else
- data = _mm512_add_ps(data, vec.data);
+ data = _mm_add_ps(data, vec.data);
# endif
return *this;
}
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data -= vec.data;
# else
- data = _mm512_sub_ps(data, vec.data);
+ data = _mm_sub_ps(data, vec.data);
# endif
return *this;
}
+
/**
* Multiplication.
*/
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data *= vec.data;
# else
- data = _mm512_mul_ps(data, vec.data);
+ data = _mm_mul_ps(data, vec.data);
# endif
return *this;
}
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data /= vec.data;
# else
- data = _mm512_div_ps(data, vec.data);
+ data = _mm_div_ps(data, vec.data);
# endif
return *this;
}
/**
* Load @p size() from memory into the calling class, starting at
- * the given address. The memory need not be aligned by 64 bytes, as opposed
+ * the given address. The memory need not be aligned by 16 bytes, as opposed
* to casting a float address to VectorizedArray<float>*.
*/
DEAL_II_ALWAYS_INLINE
void
load(const float *ptr)
{
- data = _mm512_loadu_ps(ptr);
+ data = _mm_loadu_ps(ptr);
}
/**
* Write the content of the calling class into memory in form of @p
* size() to the given address. The memory need not be aligned by
- * 64 bytes, as opposed to casting a float address to
+ * 16 bytes, as opposed to casting a float address to
* VectorizedArray<float>*.
*/
DEAL_II_ALWAYS_INLINE
void
store(float *ptr) const
{
- _mm512_storeu_ps(ptr, data);
+ _mm_storeu_ps(ptr, data);
}
/**
* @copydoc VectorizedArray<Number>::streaming_store()
- * @note Memory must be aligned by 64 bytes.
+ * @note Memory must be aligned by 16 bytes.
*/
DEAL_II_ALWAYS_INLINE
void
streaming_store(float *ptr) const
{
- Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
+ Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
ExcMessage("Memory not aligned"));
- _mm512_stream_ps(ptr, data);
+ _mm_stream_ps(ptr, data);
}
/**
void
gather(const float *base_ptr, const unsigned int *offsets)
{
- // unfortunately, there does not appear to be a 512 bit integer load, so
- // do it by some reinterpret casts here. this is allowed because the Intel
- // API allows aliasing between different vector types.
- const __m512 index_val =
- _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
- const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
-
- // work around a warning with gcc-12 about an uninitialized initial state
- // for gather by starting with a zero guess, even though all lanes will be
- // overwritten
- __m512 zero = {};
- __mmask16 mask = 0xFFFF;
-
- data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
+ for (unsigned int i = 0; i < 4; ++i)
+ *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
}
/**
void
scatter(const unsigned int *offsets, float *base_ptr) const
{
- for (unsigned int i = 0; i < 16; ++i)
- for (unsigned int j = i + 1; j < 16; ++j)
- Assert(offsets[i] != offsets[j],
- ExcMessage("Result of scatter undefined if two offset elements"
- " point to the same position"));
-
- // unfortunately, there does not appear to be a 512 bit integer load, so
- // do it by some reinterpret casts here. this is allowed because the Intel
- // API allows aliasing between different vector types.
- const __m512 index_val =
- _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
- const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
- _mm512_i32scatter_ps(base_ptr, index, data, 4);
+ for (unsigned int i = 0; i < 4; ++i)
+ base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
}
/**
* this->data[i]$.
*/
float
- sum();
+ sum()
+ {
+ __m128 t1 = _mm_movehl_ps(data, data);
+ __m128 t2 = _mm_add_ps(data, t1);
+ __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
+ __m128 t4 = _mm_add_ss(t2, t3);
+ return _mm_cvtss_f32(t4);
+ }
/**
* Actual data field. To be consistent with the standard layout type and to
* enable interaction with external SIMD functionality, this member is
* declared public.
*/
- __m512 data;
+ __m128 data;
private:
- /**
- * Extract lower half of data field.
- */
- DEAL_II_ALWAYS_INLINE
- __m256
- get_lower() const
- {
- return _mm512_castps512_ps256(data);
- }
-
- /**
- * Extract upper half of data field.
- */
- DEAL_II_ALWAYS_INLINE
- __m256
- get_upper() const
- {
- return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(data), 1));
- }
-
/**
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
get_sqrt() const
{
VectorizedArray res;
- res.data = _mm512_sqrt_ps(data);
+ res.data = _mm_sqrt_ps(data);
return res;
}
{
// to compute the absolute value, perform bitwise andnot with -0. This
// will leave all value and exponent bits unchanged but force the sign
- // value to +. Since there is no andnot for AVX512, we interpret the data
- // as 32 bit integers and do the andnot on those types (note that andnot
- // is a bitwise operation so the data type does not matter)
- __m512 mask = _mm512_set1_ps(-0.f);
+ // value to +.
+ __m128 mask = _mm_set1_ps(-0.f);
VectorizedArray res;
- res.data = reinterpret_cast<__m512>(
- _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
- reinterpret_cast<__m512i>(data)));
+ res.data = _mm_andnot_ps(mask, data);
return res;
}
get_max(const VectorizedArray &other) const
{
VectorizedArray res;
- res.data = _mm512_max_ps(data, other.data);
+ res.data = _mm_max_ps(data, other.data);
return res;
}
get_min(const VectorizedArray &other) const
{
VectorizedArray res;
- res.data = _mm512_min_ps(data, other.data);
+ res.data = _mm_min_ps(data, other.data);
return res;
}
/**
- * Specialization for float and AVX-512.
+ * Specialization for float and SSE2.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
-vectorized_load_and_transpose(const unsigned int n_entries,
- const float * in,
- const unsigned int * offsets,
- VectorizedArray<float, 16> *out)
+vectorized_load_and_transpose(const unsigned int n_entries,
+ const float * in,
+ const unsigned int * offsets,
+ VectorizedArray<float, 4> *out)
{
- // Similar to the double case, we perform the work on smaller entities. In
- // this case, we start from 128 bit arrays and insert them into a full 512
- // bit index. This reduces the code size and register pressure because we do
- // shuffles on 4 numbers rather than 16.
const unsigned int n_chunks = n_entries / 4;
-
- // To avoid warnings about uninitialized variables, need to initialize one
- // variable to a pre-exisiting value in out, which will never get used in
- // the end. Keep the initialization outside the loop because of a bug in
- // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
- // case t3 is initialized to zero (inside/outside of loop), see
- // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
- __m512 t0, t1, t2, t3;
- if (n_chunks > 0)
- t3 = out[0].data;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
- t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
- t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
- t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
- t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
- t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
- t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
- t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
- t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
- t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
- t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
- t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
- t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
- t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
- t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
- t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
-
- __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
- __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
- __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
- __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
-
- out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
- out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
- out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
- out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
+ __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
+ __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
+ __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
+ __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
+ __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
+ __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
+ __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
+ __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
+ out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
+ out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
+ out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
+ out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
}
// remainder loop of work that does not divide by 4
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- out[i].gather(in + i, offsets);
+ for (unsigned int v = 0; v < 4; ++v)
+ out[i][v] = in[offsets[v] + i];
}
/**
- * Specialization for float and AVX-512.
+ * Specialization for float and SSE2.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
-vectorized_load_and_transpose(const unsigned int n_entries,
- const std::array<float *, 16> &in,
- VectorizedArray<float, 16> * out)
+vectorized_load_and_transpose(const unsigned int n_entries,
+ const std::array<float *, 4> &in,
+ VectorizedArray<float, 4> * out)
{
// see the comments in the vectorized_load_and_transpose above
const unsigned int n_chunks = n_entries / 4;
-
- __m512 t0, t1, t2, t3;
- if (n_chunks > 0)
- t3 = out[0].data;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
- t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
- t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
- t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
- t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
- t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
- t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
- t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
- t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
- t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
- t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
- t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
- t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
- t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
- t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
- t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
-
- __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
- __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
- __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
- __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
-
- out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
- out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
- out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
- out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
+ __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
+ __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
+ __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
+ __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
+ __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
+ __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
+ __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
+ __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
+ out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
+ out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
+ out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
+ out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
}
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- gather(out[i], in, i);
+ for (unsigned int v = 0; v < 4; ++v)
+ out[i][v] = in[v][i];
}
/**
- * Specialization for float and AVX-512.
+ * Specialization for float and SSE2.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
-vectorized_transpose_and_store(const bool add_into,
- const unsigned int n_entries,
- const VectorizedArray<float, 16> *in,
- const unsigned int * offsets,
- float * out)
+vectorized_transpose_and_store(const bool add_into,
+ const unsigned int n_entries,
+ const VectorizedArray<float, 4> *in,
+ const unsigned int * offsets,
+ float * out)
{
const unsigned int n_chunks = n_entries / 4;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
- __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
- __m512 t2 =
- _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
- __m512 t3 =
- _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
- __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
- __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
- __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
- __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
-
- __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
- __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
- __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
- __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
- __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
- __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
- __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
- __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
- __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
- __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
- __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
- __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
- __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
- __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
- __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
- __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
+ __m128 u0 = in[4 * i + 0].data;
+ __m128 u1 = in[4 * i + 1].data;
+ __m128 u2 = in[4 * i + 2].data;
+ __m128 u3 = in[4 * i + 3].data;
+ __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
+ __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
+ __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
+ __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
+ u0 = _mm_shuffle_ps(t0, t2, 0x88);
+ u1 = _mm_shuffle_ps(t0, t2, 0xdd);
+ u2 = _mm_shuffle_ps(t1, t3, 0x88);
+ u3 = _mm_shuffle_ps(t1, t3, 0xdd);
// Cannot use the same store instructions in both paths of the 'if'
// because the compiler cannot know that there is no aliasing between
// pointers
if (add_into)
{
- res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
- _mm_storeu_ps(out + 4 * i + offsets[0], res0);
- res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
- _mm_storeu_ps(out + 4 * i + offsets[1], res1);
- res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
- _mm_storeu_ps(out + 4 * i + offsets[2], res2);
- res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
- _mm_storeu_ps(out + 4 * i + offsets[3], res3);
- res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
- _mm_storeu_ps(out + 4 * i + offsets[4], res4);
- res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
- _mm_storeu_ps(out + 4 * i + offsets[5], res5);
- res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
- _mm_storeu_ps(out + 4 * i + offsets[6], res6);
- res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
- _mm_storeu_ps(out + 4 * i + offsets[7], res7);
- res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
- _mm_storeu_ps(out + 4 * i + offsets[8], res8);
- res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
- _mm_storeu_ps(out + 4 * i + offsets[9], res9);
- res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
- _mm_storeu_ps(out + 4 * i + offsets[10], res10);
- res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
- _mm_storeu_ps(out + 4 * i + offsets[11], res11);
- res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
- _mm_storeu_ps(out + 4 * i + offsets[12], res12);
- res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
- _mm_storeu_ps(out + 4 * i + offsets[13], res13);
- res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
- _mm_storeu_ps(out + 4 * i + offsets[14], res14);
- res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
- _mm_storeu_ps(out + 4 * i + offsets[15], res15);
+ u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
+ _mm_storeu_ps(out + 4 * i + offsets[0], u0);
+ u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
+ _mm_storeu_ps(out + 4 * i + offsets[1], u1);
+ u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
+ _mm_storeu_ps(out + 4 * i + offsets[2], u2);
+ u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
+ _mm_storeu_ps(out + 4 * i + offsets[3], u3);
}
else
{
- _mm_storeu_ps(out + 4 * i + offsets[0], res0);
- _mm_storeu_ps(out + 4 * i + offsets[1], res1);
- _mm_storeu_ps(out + 4 * i + offsets[2], res2);
- _mm_storeu_ps(out + 4 * i + offsets[3], res3);
- _mm_storeu_ps(out + 4 * i + offsets[4], res4);
- _mm_storeu_ps(out + 4 * i + offsets[5], res5);
- _mm_storeu_ps(out + 4 * i + offsets[6], res6);
- _mm_storeu_ps(out + 4 * i + offsets[7], res7);
- _mm_storeu_ps(out + 4 * i + offsets[8], res8);
- _mm_storeu_ps(out + 4 * i + offsets[9], res9);
- _mm_storeu_ps(out + 4 * i + offsets[10], res10);
- _mm_storeu_ps(out + 4 * i + offsets[11], res11);
- _mm_storeu_ps(out + 4 * i + offsets[12], res12);
- _mm_storeu_ps(out + 4 * i + offsets[13], res13);
- _mm_storeu_ps(out + 4 * i + offsets[14], res14);
- _mm_storeu_ps(out + 4 * i + offsets[15], res15);
+ _mm_storeu_ps(out + 4 * i + offsets[0], u0);
+ _mm_storeu_ps(out + 4 * i + offsets[1], u1);
+ _mm_storeu_ps(out + 4 * i + offsets[2], u2);
+ _mm_storeu_ps(out + 4 * i + offsets[3], u3);
}
}
// remainder loop of work that does not divide by 4
if (add_into)
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 16; ++v)
+ for (unsigned int v = 0; v < 4; ++v)
out[offsets[v] + i] += in[i][v];
else
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 16; ++v)
+ for (unsigned int v = 0; v < 4; ++v)
out[offsets[v] + i] = in[i][v];
}
/**
- * Specialization for float and AVX-512.
+ * Specialization for float and SSE2.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
-vectorized_transpose_and_store(const bool add_into,
- const unsigned int n_entries,
- const VectorizedArray<float, 16> *in,
- std::array<float *, 16> & out)
+vectorized_transpose_and_store(const bool add_into,
+ const unsigned int n_entries,
+ const VectorizedArray<float, 4> *in,
+ std::array<float *, 4> & out)
{
// see the comments in the vectorized_transpose_and_store above
const unsigned int n_chunks = n_entries / 4;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
- __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
- __m512 t2 =
- _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
- __m512 t3 =
- _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
- __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
- __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
- __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
- __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
-
- __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
- __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
- __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
- __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
- __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
- __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
- __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
- __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
- __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
- __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
- __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
- __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
- __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
- __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
- __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
- __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
+ __m128 u0 = in[4 * i + 0].data;
+ __m128 u1 = in[4 * i + 1].data;
+ __m128 u2 = in[4 * i + 2].data;
+ __m128 u3 = in[4 * i + 3].data;
+ __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
+ __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
+ __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
+ __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
+ u0 = _mm_shuffle_ps(t0, t2, 0x88);
+ u1 = _mm_shuffle_ps(t0, t2, 0xdd);
+ u2 = _mm_shuffle_ps(t1, t3, 0x88);
+ u3 = _mm_shuffle_ps(t1, t3, 0xdd);
if (add_into)
{
- res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
- _mm_storeu_ps(out[0] + 4 * i, res0);
- res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
- _mm_storeu_ps(out[1] + 4 * i, res1);
- res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
- _mm_storeu_ps(out[2] + 4 * i, res2);
- res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
- _mm_storeu_ps(out[3] + 4 * i, res3);
- res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
- _mm_storeu_ps(out[4] + 4 * i, res4);
- res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
- _mm_storeu_ps(out[5] + 4 * i, res5);
- res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
- _mm_storeu_ps(out[6] + 4 * i, res6);
- res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
- _mm_storeu_ps(out[7] + 4 * i, res7);
- res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
- _mm_storeu_ps(out[8] + 4 * i, res8);
- res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
- _mm_storeu_ps(out[9] + 4 * i, res9);
- res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
- _mm_storeu_ps(out[10] + 4 * i, res10);
- res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
- _mm_storeu_ps(out[11] + 4 * i, res11);
- res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
- _mm_storeu_ps(out[12] + 4 * i, res12);
- res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
- _mm_storeu_ps(out[13] + 4 * i, res13);
- res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
- _mm_storeu_ps(out[14] + 4 * i, res14);
- res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
- _mm_storeu_ps(out[15] + 4 * i, res15);
+ u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
+ _mm_storeu_ps(out[0] + 4 * i, u0);
+ u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
+ _mm_storeu_ps(out[1] + 4 * i, u1);
+ u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
+ _mm_storeu_ps(out[2] + 4 * i, u2);
+ u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
+ _mm_storeu_ps(out[3] + 4 * i, u3);
}
else
{
- _mm_storeu_ps(out[0] + 4 * i, res0);
- _mm_storeu_ps(out[1] + 4 * i, res1);
- _mm_storeu_ps(out[2] + 4 * i, res2);
- _mm_storeu_ps(out[3] + 4 * i, res3);
- _mm_storeu_ps(out[4] + 4 * i, res4);
- _mm_storeu_ps(out[5] + 4 * i, res5);
- _mm_storeu_ps(out[6] + 4 * i, res6);
- _mm_storeu_ps(out[7] + 4 * i, res7);
- _mm_storeu_ps(out[8] + 4 * i, res8);
- _mm_storeu_ps(out[9] + 4 * i, res9);
- _mm_storeu_ps(out[10] + 4 * i, res10);
- _mm_storeu_ps(out[11] + 4 * i, res11);
- _mm_storeu_ps(out[12] + 4 * i, res12);
- _mm_storeu_ps(out[13] + 4 * i, res13);
- _mm_storeu_ps(out[14] + 4 * i, res14);
- _mm_storeu_ps(out[15] + 4 * i, res15);
+ _mm_storeu_ps(out[0] + 4 * i, u0);
+ _mm_storeu_ps(out[1] + 4 * i, u1);
+ _mm_storeu_ps(out[2] + 4 * i, u2);
+ _mm_storeu_ps(out[3] + 4 * i, u3);
}
}
if (add_into)
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 16; ++v)
+ for (unsigned int v = 0; v < 4; ++v)
out[v][i] += in[i][v];
else
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 16; ++v)
+ for (unsigned int v = 0; v < 4; ++v)
out[v][i] = in[i][v];
}
-# endif
+
+
+# endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
* this->data[i]$.
*/
double
- sum();
+ sum()
+ {
+ VectorizedArray<double, 2> t1;
+ t1.data = _mm_add_pd(this->get_lower(), this->get_upper());
+ return t1.sum();
+ }
/**
* Actual data field. To be consistent with the standard layout type and to
* this->data[i]$.
*/
float
- sum();
+ sum()
+ {
+ VectorizedArray<float, 4> t1;
+ t1.data = _mm_add_ps(this->get_lower(), this->get_upper());
+ return t1.sum();
+ }
/**
* Actual data field. To be consistent with the standard layout type and to
# endif
-# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
+// for safety, also check that __AVX512F__ is defined in case the user manually
+// set some conflicting compile flags which prevent compilation
+
+# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
/**
- * Specialization for double and SSE2.
+ * Specialization of VectorizedArray class for double and AVX-512.
*/
template <>
-class VectorizedArray<double, 2>
- : public VectorizedArrayBase<VectorizedArray<double, 2>, 2>
+class VectorizedArray<double, 8>
+ : public VectorizedArrayBase<VectorizedArray<double, 8>, 8>
{
public:
/**
*/
template <typename U>
VectorizedArray(const std::initializer_list<U> &list)
- : VectorizedArrayBase<VectorizedArray<double, 2>, 2>(list)
+ : VectorizedArrayBase<VectorizedArray<double, 8>, 8>(list)
{}
/**
VectorizedArray &
operator=(const double x) &
{
- data = _mm_set1_pd(x);
+ data = _mm512_set1_pd(x);
return *this;
}
+
/**
* Assign a scalar to the current object. This overload is used for
* rvalue references; because it does not make sense to assign
double &
operator[](const unsigned int comp)
{
- AssertIndexRange(comp, 2);
+ AssertIndexRange(comp, 8);
return *(reinterpret_cast<double *>(&data) + comp);
}
const double &
operator[](const unsigned int comp) const
{
- AssertIndexRange(comp, 2);
+ AssertIndexRange(comp, 8);
return *(reinterpret_cast<const double *>(&data) + comp);
}
VectorizedArray &
operator+=(const VectorizedArray &vec)
{
+ // if the compiler supports vector arithmetic, we can simply use +=
+ // operator on the given data type. this allows the compiler to combine
+ // additions with multiplication (fused multiply-add) if those
+ // instructions are available. Otherwise, we need to use the built-in
+ // intrinsic command for __m512d
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data += vec.data;
# else
- data = _mm_add_pd(data, vec.data);
+ data = _mm512_add_pd(data, vec.data);
# endif
return *this;
}
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data -= vec.data;
# else
- data = _mm_sub_pd(data, vec.data);
+ data = _mm512_sub_pd(data, vec.data);
# endif
return *this;
}
-
/**
* Multiplication.
*/
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data *= vec.data;
# else
- data = _mm_mul_pd(data, vec.data);
+ data = _mm512_mul_pd(data, vec.data);
# endif
return *this;
}
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data /= vec.data;
# else
- data = _mm_div_pd(data, vec.data);
+ data = _mm512_div_pd(data, vec.data);
# endif
return *this;
}
/**
- * Load @p size() from memory into the calling class, starting at
- * the given address. The memory need not be aligned by 16 bytes, as opposed
+ * Load size() data items from memory into the calling class, starting at
+ * the given address. The memory need not be aligned by 64 bytes, as opposed
* to casting a double address to VectorizedArray<double>*.
*/
DEAL_II_ALWAYS_INLINE
void
load(const double *ptr)
{
- data = _mm_loadu_pd(ptr);
+ data = _mm512_loadu_pd(ptr);
}
DEAL_II_ALWAYS_INLINE
void
load(const float *ptr)
{
- DEAL_II_OPENMP_SIMD_PRAGMA
- for (unsigned int i = 0; i < 2; ++i)
- data[i] = ptr[i];
+ data = _mm512_cvtps_pd(_mm256_loadu_ps(ptr));
}
/**
* Write the content of the calling class into memory in form of @p
* size() to the given address. The memory need not be aligned by
- * 16 bytes, as opposed to casting a double address to
+ * 64 bytes, as opposed to casting a double address to
* VectorizedArray<double>*.
*/
DEAL_II_ALWAYS_INLINE
void
store(double *ptr) const
{
- _mm_storeu_pd(ptr, data);
+ _mm512_storeu_pd(ptr, data);
}
DEAL_II_ALWAYS_INLINE
void
store(float *ptr) const
{
- DEAL_II_OPENMP_SIMD_PRAGMA
- for (unsigned int i = 0; i < 2; ++i)
- ptr[i] = data[i];
+ _mm256_storeu_ps(ptr, _mm512_cvtpd_ps(data));
}
/**
* @copydoc VectorizedArray<Number>::streaming_store()
- * @note Memory must be aligned by 16 bytes.
+ * @note Memory must be aligned by 64 bytes.
*/
DEAL_II_ALWAYS_INLINE
void
streaming_store(double *ptr) const
{
- Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
+ Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
ExcMessage("Memory not aligned"));
- _mm_stream_pd(ptr, data);
+ _mm512_stream_pd(ptr, data);
}
/**
void
gather(const double *base_ptr, const unsigned int *offsets)
{
- for (unsigned int i = 0; i < 2; ++i)
- *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
+ // unfortunately, there does not appear to be a 256 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m256 index_val =
+ _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
+
+ // work around a warning with gcc-12 about an uninitialized initial state
+ // for gather by starting with a zero guess, even though all lanes will be
+ // overwritten
+ __m512d zero = {};
+ __mmask8 mask = 0xFF;
+
+ data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
}
/**
void
scatter(const unsigned int *offsets, double *base_ptr) const
{
- for (unsigned int i = 0; i < 2; ++i)
- base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
+ for (unsigned int i = 0; i < 8; ++i)
+ for (unsigned int j = i + 1; j < 8; ++j)
+ Assert(offsets[i] != offsets[j],
+ ExcMessage("Result of scatter undefined if two offset elements"
+ " point to the same position"));
+
+ // unfortunately, there does not appear to be a 256 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m256 index_val =
+ _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
+ _mm512_i32scatter_pd(base_ptr, index, data, 8);
}
/**
* this->data[i]$.
*/
double
- sum();
+ sum()
+ {
+ VectorizedArray<double, 4> t1;
+ t1.data = _mm256_add_pd(this->get_lower(), this->get_upper());
+ return t1.sum();
+ }
/**
* Actual data field. To be consistent with the standard layout type and to
* enable interaction with external SIMD functionality, this member is
* declared public.
*/
- __m128d data;
+ __m512d data;
private:
+ /**
+ * Extract lower half of data field.
+ */
+ DEAL_II_ALWAYS_INLINE
+ __m256d
+ get_lower() const
+ {
+ return _mm512_castpd512_pd256(data);
+ }
+
+ /**
+ * Extract upper half of data field.
+ */
+ DEAL_II_ALWAYS_INLINE
+ __m256d
+ get_upper() const
+ {
+ return _mm512_extractf64x4_pd(data, 1);
+ }
+
/**
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
get_sqrt() const
{
VectorizedArray res;
- res.data = _mm_sqrt_pd(data);
+ res.data = _mm512_sqrt_pd(data);
return res;
}
VectorizedArray
get_abs() const
{
- // to compute the absolute value, perform
- // bitwise andnot with -0. This will leave all
- // value and exponent bits unchanged but force
- // the sign value to +.
- __m128d mask = _mm_set1_pd(-0.);
+ // to compute the absolute value, perform bitwise andnot with -0. This
+ // will leave all value and exponent bits unchanged but force the sign
+ // value to +. Since there is no andnot for AVX512, we interpret the data
+ // as 64 bit integers and do the andnot on those types (note that andnot
+ // is a bitwise operation so the data type does not matter)
+ __m512d mask = _mm512_set1_pd(-0.);
VectorizedArray res;
- res.data = _mm_andnot_pd(mask, data);
+ res.data = reinterpret_cast<__m512d>(
+ _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
+ reinterpret_cast<__m512i>(data)));
return res;
}
get_max(const VectorizedArray &other) const
{
VectorizedArray res;
- res.data = _mm_max_pd(data, other.data);
+ res.data = _mm512_max_pd(data, other.data);
return res;
}
get_min(const VectorizedArray &other) const
{
VectorizedArray res;
- res.data = _mm_min_pd(data, other.data);
+ res.data = _mm512_min_pd(data, other.data);
return res;
}
/**
- * Specialization for double and SSE2.
+ * Specialization for double and AVX-512.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const double * in,
const unsigned int * offsets,
- VectorizedArray<double, 2> *out)
+ VectorizedArray<double, 8> *out)
{
- const unsigned int n_chunks = n_entries / 2;
+ // do not do full transpose because the code is long and will most
+ // likely not pay off because many processors have two load units
+ // (for the top 8 instructions) but only 1 permute unit (for the 8
+ // shuffle/unpack instructions). rather start the transposition on the
+ // vectorized array of half the size with 256 bits
+ const unsigned int n_chunks = n_entries / 4;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m128d u0 = _mm_loadu_pd(in + 2 * i + offsets[0]);
- __m128d u1 = _mm_loadu_pd(in + 2 * i + offsets[1]);
- out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
- out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
- }
+ __m512d t0, t1, t2, t3 = {};
- // remainder loop of work that does not divide by 2
- for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 2; ++v)
- out[i][v] = in[offsets[v] + i];
+ t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
+ t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
+ t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
+ t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
+ t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
+ t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
+ t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
+ t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
+
+ __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
+ __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
+ __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
+ __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
+ out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
+ out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
+ out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
+ out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
+ }
+ // remainder loop of work that does not divide by 4
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ out[i].gather(in + i, offsets);
}
/**
- * Specialization for double and SSE2.
+ * Specialization for double and AVX-512.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
- const std::array<double *, 2> &in,
- VectorizedArray<double, 2> * out)
+ const std::array<double *, 8> &in,
+ VectorizedArray<double, 8> * out)
{
- // see the comments in the vectorized_load_and_transpose above
-
- const unsigned int n_chunks = n_entries / 2;
+ const unsigned int n_chunks = n_entries / 4;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m128d u0 = _mm_loadu_pd(in[0] + 2 * i);
- __m128d u1 = _mm_loadu_pd(in[1] + 2 * i);
- out[2 * i + 0].data = _mm_unpacklo_pd(u0, u1);
- out[2 * i + 1].data = _mm_unpackhi_pd(u0, u1);
+ __m512d t0, t1, t2, t3 = {};
+
+ t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[0] + 4 * i), 0);
+ t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in[2] + 4 * i), 1);
+ t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[1] + 4 * i), 0);
+ t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in[3] + 4 * i), 1);
+ t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[4] + 4 * i), 0);
+ t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in[6] + 4 * i), 1);
+ t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[5] + 4 * i), 0);
+ t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in[7] + 4 * i), 1);
+
+ __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
+ __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
+ __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
+ __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
+ out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
+ out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
+ out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
+ out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
}
- for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 2; ++v)
- out[i][v] = in[v][i];
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ gather(out[i], in, i);
}
/**
- * Specialization for double and SSE2.
+ * Specialization for double and AVX-512.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
- const VectorizedArray<double, 2> *in,
+ const VectorizedArray<double, 8> *in,
const unsigned int * offsets,
double * out)
{
- const unsigned int n_chunks = n_entries / 2;
- if (add_into)
+ // as for the load, we split the store operations into 256 bit units to
+ // better balance between code size, shuffle instructions, and stores
+ const unsigned int n_chunks = n_entries / 4;
+ __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
+ __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- for (unsigned int i = 0; i < n_chunks; ++i)
+ __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
+ __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
+ __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
+ __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
+ __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
+ __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
+ __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
+ __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
+ __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
+ __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
+ __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
+ __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
+ __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
+ __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
+ __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
+ __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
+
+ // Cannot use the same store instructions in both paths of the 'if'
+ // because the compiler cannot know that there is no aliasing
+ // between pointers
+ if (add_into)
{
- __m128d u0 = in[2 * i + 0].data;
- __m128d u1 = in[2 * i + 1].data;
- __m128d res0 = _mm_unpacklo_pd(u0, u1);
- __m128d res1 = _mm_unpackhi_pd(u0, u1);
- _mm_storeu_pd(out + 2 * i + offsets[0],
- _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[0]),
- res0));
- _mm_storeu_pd(out + 2 * i + offsets[1],
- _mm_add_pd(_mm_loadu_pd(out + 2 * i + offsets[1]),
- res1));
+ res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
+ _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
+ res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
+ _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
+ res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
+ _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
+ res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
+ _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
+ res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
+ _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
+ res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
+ _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
+ res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
+ _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
+ res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
+ _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
}
- // remainder loop of work that does not divide by 2
- for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 2; ++v)
- out[offsets[v] + i] += in[i][v];
- }
- else
- {
- for (unsigned int i = 0; i < n_chunks; ++i)
+ else
{
- __m128d u0 = in[2 * i + 0].data;
- __m128d u1 = in[2 * i + 1].data;
- __m128d res0 = _mm_unpacklo_pd(u0, u1);
- __m128d res1 = _mm_unpackhi_pd(u0, u1);
- _mm_storeu_pd(out + 2 * i + offsets[0], res0);
- _mm_storeu_pd(out + 2 * i + offsets[1], res1);
+ _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
+ _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
+ _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
+ _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
+ _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
+ _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
+ _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
+ _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
}
- // remainder loop of work that does not divide by 2
- for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 2; ++v)
- out[offsets[v] + i] = in[i][v];
}
+
+ // remainder loop of work that does not divide by 4
+ if (add_into)
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 8; ++v)
+ out[offsets[v] + i] += in[i][v];
+ else
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 8; ++v)
+ out[offsets[v] + i] = in[i][v];
}
/**
- * Specialization for double and SSE2.
+ * Specialization for double and AVX-512.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
- const VectorizedArray<double, 2> *in,
- std::array<double *, 2> & out)
+ const VectorizedArray<double, 8> *in,
+ std::array<double *, 8> & out)
{
// see the comments in the vectorized_transpose_and_store above
- const unsigned int n_chunks = n_entries / 2;
- if (add_into)
+ const unsigned int n_chunks = n_entries / 4;
+ __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
+ __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- for (unsigned int i = 0; i < n_chunks; ++i)
+ __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
+ __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
+ __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
+ __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
+ __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
+ __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
+ __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
+ __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
+ __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
+ __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
+ __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
+ __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
+ __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
+ __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
+ __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
+ __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
+
+ if (add_into)
{
- __m128d u0 = in[2 * i + 0].data;
- __m128d u1 = in[2 * i + 1].data;
- __m128d res0 = _mm_unpacklo_pd(u0, u1);
- __m128d res1 = _mm_unpackhi_pd(u0, u1);
- _mm_storeu_pd(out[0] + 2 * i,
- _mm_add_pd(_mm_loadu_pd(out[0] + 2 * i), res0));
- _mm_storeu_pd(out[1] + 2 * i,
- _mm_add_pd(_mm_loadu_pd(out[1] + 2 * i), res1));
+ res0 = _mm256_add_pd(_mm256_loadu_pd(out[0] + 4 * i), res0);
+ _mm256_storeu_pd(out[0] + 4 * i, res0);
+ res1 = _mm256_add_pd(_mm256_loadu_pd(out[1] + 4 * i), res1);
+ _mm256_storeu_pd(out[1] + 4 * i, res1);
+ res2 = _mm256_add_pd(_mm256_loadu_pd(out[2] + 4 * i), res2);
+ _mm256_storeu_pd(out[2] + 4 * i, res2);
+ res3 = _mm256_add_pd(_mm256_loadu_pd(out[3] + 4 * i), res3);
+ _mm256_storeu_pd(out[3] + 4 * i, res3);
+ res4 = _mm256_add_pd(_mm256_loadu_pd(out[4] + 4 * i), res4);
+ _mm256_storeu_pd(out[4] + 4 * i, res4);
+ res5 = _mm256_add_pd(_mm256_loadu_pd(out[5] + 4 * i), res5);
+ _mm256_storeu_pd(out[5] + 4 * i, res5);
+ res6 = _mm256_add_pd(_mm256_loadu_pd(out[6] + 4 * i), res6);
+ _mm256_storeu_pd(out[6] + 4 * i, res6);
+ res7 = _mm256_add_pd(_mm256_loadu_pd(out[7] + 4 * i), res7);
+ _mm256_storeu_pd(out[7] + 4 * i, res7);
}
-
- for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 2; ++v)
- out[v][i] += in[i][v];
- }
- else
- {
- for (unsigned int i = 0; i < n_chunks; ++i)
+ else
{
- __m128d u0 = in[2 * i + 0].data;
- __m128d u1 = in[2 * i + 1].data;
- __m128d res0 = _mm_unpacklo_pd(u0, u1);
- __m128d res1 = _mm_unpackhi_pd(u0, u1);
- _mm_storeu_pd(out[0] + 2 * i, res0);
- _mm_storeu_pd(out[1] + 2 * i, res1);
+ _mm256_storeu_pd(out[0] + 4 * i, res0);
+ _mm256_storeu_pd(out[1] + 4 * i, res1);
+ _mm256_storeu_pd(out[2] + 4 * i, res2);
+ _mm256_storeu_pd(out[3] + 4 * i, res3);
+ _mm256_storeu_pd(out[4] + 4 * i, res4);
+ _mm256_storeu_pd(out[5] + 4 * i, res5);
+ _mm256_storeu_pd(out[6] + 4 * i, res6);
+ _mm256_storeu_pd(out[7] + 4 * i, res7);
}
-
- for (unsigned int i = 2 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 2; ++v)
- out[v][i] = in[i][v];
}
+
+ if (add_into)
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 8; ++v)
+ out[v][i] += in[i][v];
+ else
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 8; ++v)
+ out[v][i] = in[i][v];
}
/**
- * Specialization for float and SSE2.
+ * Specialization for float and AVX512.
*/
template <>
-class VectorizedArray<float, 4>
- : public VectorizedArrayBase<VectorizedArray<float, 4>, 4>
+class VectorizedArray<float, 16>
+ : public VectorizedArrayBase<VectorizedArray<float, 16>, 16>
{
public:
/**
*/
using value_type = float;
- /**
- * This function can be used to set all data fields to a given scalar.
- */
-
/**
* Default empty constructor, leaving the data in an uninitialized state
* similar to float/double.
*/
template <typename U>
VectorizedArray(const std::initializer_list<U> &list)
- : VectorizedArrayBase<VectorizedArray<float, 4>, 4>(list)
+ : VectorizedArrayBase<VectorizedArray<float, 16>, 16>(list)
{}
+ /**
+ * This function can be used to set all data fields to a given scalar.
+ */
DEAL_II_ALWAYS_INLINE
VectorizedArray &
operator=(const float x) &
{
- data = _mm_set1_ps(x);
+ data = _mm512_set1_ps(x);
return *this;
}
float &
operator[](const unsigned int comp)
{
- AssertIndexRange(comp, 4);
+ AssertIndexRange(comp, 16);
return *(reinterpret_cast<float *>(&data) + comp);
}
const float &
operator[](const unsigned int comp) const
{
- AssertIndexRange(comp, 4);
+ AssertIndexRange(comp, 16);
return *(reinterpret_cast<const float *>(&data) + comp);
}
VectorizedArray &
operator+=(const VectorizedArray &vec)
{
+ // if the compiler supports vector arithmetic, we can simply use +=
+ // operator on the given data type. this allows the compiler to combine
+ // additions with multiplication (fused multiply-add) if those
+ // instructions are available. Otherwise, we need to use the built-in
+ // intrinsic command for __m512d
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data += vec.data;
# else
- data = _mm_add_ps(data, vec.data);
+ data = _mm512_add_ps(data, vec.data);
# endif
return *this;
}
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data -= vec.data;
# else
- data = _mm_sub_ps(data, vec.data);
+ data = _mm512_sub_ps(data, vec.data);
# endif
return *this;
}
-
/**
* Multiplication.
*/
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data *= vec.data;
# else
- data = _mm_mul_ps(data, vec.data);
+ data = _mm512_mul_ps(data, vec.data);
# endif
return *this;
}
# ifdef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
data /= vec.data;
# else
- data = _mm_div_ps(data, vec.data);
+ data = _mm512_div_ps(data, vec.data);
# endif
return *this;
}
/**
* Load @p size() from memory into the calling class, starting at
- * the given address. The memory need not be aligned by 16 bytes, as opposed
+ * the given address. The memory need not be aligned by 64 bytes, as opposed
* to casting a float address to VectorizedArray<float>*.
*/
DEAL_II_ALWAYS_INLINE
void
load(const float *ptr)
{
- data = _mm_loadu_ps(ptr);
+ data = _mm512_loadu_ps(ptr);
}
/**
* Write the content of the calling class into memory in form of @p
* size() to the given address. The memory need not be aligned by
- * 16 bytes, as opposed to casting a float address to
+ * 64 bytes, as opposed to casting a float address to
* VectorizedArray<float>*.
*/
DEAL_II_ALWAYS_INLINE
void
store(float *ptr) const
{
- _mm_storeu_ps(ptr, data);
+ _mm512_storeu_ps(ptr, data);
}
/**
* @copydoc VectorizedArray<Number>::streaming_store()
- * @note Memory must be aligned by 16 bytes.
+ * @note Memory must be aligned by 64 bytes.
*/
DEAL_II_ALWAYS_INLINE
void
streaming_store(float *ptr) const
{
- Assert(reinterpret_cast<std::size_t>(ptr) % 16 == 0,
+ Assert(reinterpret_cast<std::size_t>(ptr) % 64 == 0,
ExcMessage("Memory not aligned"));
- _mm_stream_ps(ptr, data);
+ _mm512_stream_ps(ptr, data);
}
/**
void
gather(const float *base_ptr, const unsigned int *offsets)
{
- for (unsigned int i = 0; i < 4; ++i)
- *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
+ // unfortunately, there does not appear to be a 512 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m512 index_val =
+ _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
+
+ // work around a warning with gcc-12 about an uninitialized initial state
+ // for gather by starting with a zero guess, even though all lanes will be
+ // overwritten
+ __m512 zero = {};
+ __mmask16 mask = 0xFFFF;
+
+ data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
}
/**
void
scatter(const unsigned int *offsets, float *base_ptr) const
{
- for (unsigned int i = 0; i < 4; ++i)
- base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
+ for (unsigned int i = 0; i < 16; ++i)
+ for (unsigned int j = i + 1; j < 16; ++j)
+ Assert(offsets[i] != offsets[j],
+ ExcMessage("Result of scatter undefined if two offset elements"
+ " point to the same position"));
+
+ // unfortunately, there does not appear to be a 512 bit integer load, so
+ // do it by some reinterpret casts here. this is allowed because the Intel
+ // API allows aliasing between different vector types.
+ const __m512 index_val =
+ _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
+ _mm512_i32scatter_ps(base_ptr, index, data, 4);
}
/**
* this->data[i]$.
*/
float
- sum();
+ sum()
+ {
+ VectorizedArray<float, 8> t1;
+ t1.data = _mm256_add_ps(this->get_lower(), this->get_upper());
+ return t1.sum();
+ }
/**
* Actual data field. To be consistent with the standard layout type and to
* enable interaction with external SIMD functionality, this member is
* declared public.
*/
- __m128 data;
+ __m512 data;
private:
+ /**
+ * Extract lower half of data field.
+ */
+ DEAL_II_ALWAYS_INLINE
+ __m256
+ get_lower() const
+ {
+ return _mm512_castps512_ps256(data);
+ }
+
+ /**
+ * Extract upper half of data field.
+ */
+ DEAL_II_ALWAYS_INLINE
+ __m256
+ get_upper() const
+ {
+ return _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(data), 1));
+ }
+
/**
* Return the square root of this field. Not for use in user code. Use
* sqrt(x) instead.
get_sqrt() const
{
VectorizedArray res;
- res.data = _mm_sqrt_ps(data);
+ res.data = _mm512_sqrt_ps(data);
return res;
}
{
// to compute the absolute value, perform bitwise andnot with -0. This
// will leave all value and exponent bits unchanged but force the sign
- // value to +.
- __m128 mask = _mm_set1_ps(-0.f);
+ // value to +. Since there is no andnot for AVX512, we interpret the data
+ // as 32 bit integers and do the andnot on those types (note that andnot
+ // is a bitwise operation so the data type does not matter)
+ __m512 mask = _mm512_set1_ps(-0.f);
VectorizedArray res;
- res.data = _mm_andnot_ps(mask, data);
+ res.data = reinterpret_cast<__m512>(
+ _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
+ reinterpret_cast<__m512i>(data)));
return res;
}
get_max(const VectorizedArray &other) const
{
VectorizedArray res;
- res.data = _mm_max_ps(data, other.data);
+ res.data = _mm512_max_ps(data, other.data);
return res;
}
get_min(const VectorizedArray &other) const
{
VectorizedArray res;
- res.data = _mm_min_ps(data, other.data);
+ res.data = _mm512_min_ps(data, other.data);
return res;
}
/**
- * Specialization for float and SSE2.
+ * Specialization for float and AVX-512.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
-vectorized_load_and_transpose(const unsigned int n_entries,
- const float * in,
- const unsigned int * offsets,
- VectorizedArray<float, 4> *out)
+vectorized_load_and_transpose(const unsigned int n_entries,
+ const float * in,
+ const unsigned int * offsets,
+ VectorizedArray<float, 16> *out)
{
+ // Similar to the double case, we perform the work on smaller entities. In
+ // this case, we start from 128 bit arrays and insert them into a full 512
+ // bit index. This reduces the code size and register pressure because we do
+ // shuffles on 4 numbers rather than 16.
const unsigned int n_chunks = n_entries / 4;
+
+ // To avoid warnings about uninitialized variables, need to initialize one
+ // variable to a pre-exisiting value in out, which will never get used in
+ // the end. Keep the initialization outside the loop because of a bug in
+ // gcc-9.1 which generates a "vmovapd" instruction instead of "vmovupd" in
+ // case t3 is initialized to zero (inside/outside of loop), see
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90991
+ __m512 t0, t1, t2, t3;
+ if (n_chunks > 0)
+ t3 = out[0].data;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
- __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
- __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
- __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
- __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
- __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
- __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
- __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
- out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
- out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
- out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
- out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
+ t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
+ t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
+ t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
+
+ __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
+ __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
+ __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
+ __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
+
+ out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
+ out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
+ out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
+ out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
}
// remainder loop of work that does not divide by 4
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
- out[i][v] = in[offsets[v] + i];
+ out[i].gather(in + i, offsets);
}
/**
- * Specialization for float and SSE2.
+ * Specialization for float and AVX-512.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
-vectorized_load_and_transpose(const unsigned int n_entries,
- const std::array<float *, 4> &in,
- VectorizedArray<float, 4> * out)
+vectorized_load_and_transpose(const unsigned int n_entries,
+ const std::array<float *, 16> &in,
+ VectorizedArray<float, 16> * out)
{
// see the comments in the vectorized_load_and_transpose above
const unsigned int n_chunks = n_entries / 4;
+
+ __m512 t0, t1, t2, t3;
+ if (n_chunks > 0)
+ t3 = out[0].data;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m128 u0 = _mm_loadu_ps(in[0] + 4 * i);
- __m128 u1 = _mm_loadu_ps(in[1] + 4 * i);
- __m128 u2 = _mm_loadu_ps(in[2] + 4 * i);
- __m128 u3 = _mm_loadu_ps(in[3] + 4 * i);
- __m128 v0 = _mm_shuffle_ps(u0, u1, 0x44);
- __m128 v1 = _mm_shuffle_ps(u0, u1, 0xee);
- __m128 v2 = _mm_shuffle_ps(u2, u3, 0x44);
- __m128 v3 = _mm_shuffle_ps(u2, u3, 0xee);
- out[4 * i + 0].data = _mm_shuffle_ps(v0, v2, 0x88);
- out[4 * i + 1].data = _mm_shuffle_ps(v0, v2, 0xdd);
- out[4 * i + 2].data = _mm_shuffle_ps(v1, v3, 0x88);
- out[4 * i + 3].data = _mm_shuffle_ps(v1, v3, 0xdd);
+ t0 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[0] + 4 * i), 0);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[4] + 4 * i), 1);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[8] + 4 * i), 2);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in[12] + 4 * i), 3);
+ t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[1] + 4 * i), 0);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[5] + 4 * i), 1);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[9] + 4 * i), 2);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in[13] + 4 * i), 3);
+ t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[2] + 4 * i), 0);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[6] + 4 * i), 1);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[10] + 4 * i), 2);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in[14] + 4 * i), 3);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[3] + 4 * i), 0);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[7] + 4 * i), 1);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[11] + 4 * i), 2);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in[15] + 4 * i), 3);
+
+ __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
+ __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
+ __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
+ __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
+
+ out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
+ out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
+ out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
+ out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
}
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
- out[i][v] = in[v][i];
+ gather(out[i], in, i);
}
/**
- * Specialization for float and SSE2.
+ * Specialization for float and AVX-512.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
-vectorized_transpose_and_store(const bool add_into,
- const unsigned int n_entries,
- const VectorizedArray<float, 4> *in,
- const unsigned int * offsets,
- float * out)
+vectorized_transpose_and_store(const bool add_into,
+ const unsigned int n_entries,
+ const VectorizedArray<float, 16> *in,
+ const unsigned int * offsets,
+ float * out)
{
const unsigned int n_chunks = n_entries / 4;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m128 u0 = in[4 * i + 0].data;
- __m128 u1 = in[4 * i + 1].data;
- __m128 u2 = in[4 * i + 2].data;
- __m128 u3 = in[4 * i + 3].data;
- __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
- __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
- __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
- __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
- u0 = _mm_shuffle_ps(t0, t2, 0x88);
- u1 = _mm_shuffle_ps(t0, t2, 0xdd);
- u2 = _mm_shuffle_ps(t1, t3, 0x88);
- u3 = _mm_shuffle_ps(t1, t3, 0xdd);
+ __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
+ __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
+ __m512 t2 =
+ _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
+ __m512 t3 =
+ _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
+ __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
+ __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
+ __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
+ __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
+
+ __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
+ __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
+ __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
+ __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
+ __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
+ __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
+ __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
+ __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
+ __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
+ __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
+ __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
+ __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
+ __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
+ __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
+ __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
+ __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
// Cannot use the same store instructions in both paths of the 'if'
// because the compiler cannot know that there is no aliasing between
// pointers
if (add_into)
{
- u0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), u0);
- _mm_storeu_ps(out + 4 * i + offsets[0], u0);
- u1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), u1);
- _mm_storeu_ps(out + 4 * i + offsets[1], u1);
- u2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), u2);
- _mm_storeu_ps(out + 4 * i + offsets[2], u2);
- u3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), u3);
- _mm_storeu_ps(out + 4 * i + offsets[3], u3);
+ res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
+ _mm_storeu_ps(out + 4 * i + offsets[0], res0);
+ res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
+ _mm_storeu_ps(out + 4 * i + offsets[1], res1);
+ res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
+ _mm_storeu_ps(out + 4 * i + offsets[2], res2);
+ res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
+ _mm_storeu_ps(out + 4 * i + offsets[3], res3);
+ res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
+ _mm_storeu_ps(out + 4 * i + offsets[4], res4);
+ res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
+ _mm_storeu_ps(out + 4 * i + offsets[5], res5);
+ res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
+ _mm_storeu_ps(out + 4 * i + offsets[6], res6);
+ res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
+ _mm_storeu_ps(out + 4 * i + offsets[7], res7);
+ res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
+ _mm_storeu_ps(out + 4 * i + offsets[8], res8);
+ res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
+ _mm_storeu_ps(out + 4 * i + offsets[9], res9);
+ res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
+ _mm_storeu_ps(out + 4 * i + offsets[10], res10);
+ res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
+ _mm_storeu_ps(out + 4 * i + offsets[11], res11);
+ res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
+ _mm_storeu_ps(out + 4 * i + offsets[12], res12);
+ res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
+ _mm_storeu_ps(out + 4 * i + offsets[13], res13);
+ res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
+ _mm_storeu_ps(out + 4 * i + offsets[14], res14);
+ res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
+ _mm_storeu_ps(out + 4 * i + offsets[15], res15);
}
else
{
- _mm_storeu_ps(out + 4 * i + offsets[0], u0);
- _mm_storeu_ps(out + 4 * i + offsets[1], u1);
- _mm_storeu_ps(out + 4 * i + offsets[2], u2);
- _mm_storeu_ps(out + 4 * i + offsets[3], u3);
+ _mm_storeu_ps(out + 4 * i + offsets[0], res0);
+ _mm_storeu_ps(out + 4 * i + offsets[1], res1);
+ _mm_storeu_ps(out + 4 * i + offsets[2], res2);
+ _mm_storeu_ps(out + 4 * i + offsets[3], res3);
+ _mm_storeu_ps(out + 4 * i + offsets[4], res4);
+ _mm_storeu_ps(out + 4 * i + offsets[5], res5);
+ _mm_storeu_ps(out + 4 * i + offsets[6], res6);
+ _mm_storeu_ps(out + 4 * i + offsets[7], res7);
+ _mm_storeu_ps(out + 4 * i + offsets[8], res8);
+ _mm_storeu_ps(out + 4 * i + offsets[9], res9);
+ _mm_storeu_ps(out + 4 * i + offsets[10], res10);
+ _mm_storeu_ps(out + 4 * i + offsets[11], res11);
+ _mm_storeu_ps(out + 4 * i + offsets[12], res12);
+ _mm_storeu_ps(out + 4 * i + offsets[13], res13);
+ _mm_storeu_ps(out + 4 * i + offsets[14], res14);
+ _mm_storeu_ps(out + 4 * i + offsets[15], res15);
}
}
// remainder loop of work that does not divide by 4
if (add_into)
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
+ for (unsigned int v = 0; v < 16; ++v)
out[offsets[v] + i] += in[i][v];
else
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
+ for (unsigned int v = 0; v < 16; ++v)
out[offsets[v] + i] = in[i][v];
}
/**
- * Specialization for float and SSE2.
+ * Specialization for float and AVX-512.
*/
template <>
inline DEAL_II_ALWAYS_INLINE void
-vectorized_transpose_and_store(const bool add_into,
- const unsigned int n_entries,
- const VectorizedArray<float, 4> *in,
- std::array<float *, 4> & out)
+vectorized_transpose_and_store(const bool add_into,
+ const unsigned int n_entries,
+ const VectorizedArray<float, 16> *in,
+ std::array<float *, 16> & out)
{
// see the comments in the vectorized_transpose_and_store above
const unsigned int n_chunks = n_entries / 4;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m128 u0 = in[4 * i + 0].data;
- __m128 u1 = in[4 * i + 1].data;
- __m128 u2 = in[4 * i + 2].data;
- __m128 u3 = in[4 * i + 3].data;
- __m128 t0 = _mm_shuffle_ps(u0, u1, 0x44);
- __m128 t1 = _mm_shuffle_ps(u0, u1, 0xee);
- __m128 t2 = _mm_shuffle_ps(u2, u3, 0x44);
- __m128 t3 = _mm_shuffle_ps(u2, u3, 0xee);
- u0 = _mm_shuffle_ps(t0, t2, 0x88);
- u1 = _mm_shuffle_ps(t0, t2, 0xdd);
- u2 = _mm_shuffle_ps(t1, t3, 0x88);
- u3 = _mm_shuffle_ps(t1, t3, 0xdd);
+ __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
+ __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
+ __m512 t2 =
+ _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
+ __m512 t3 =
+ _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
+ __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
+ __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
+ __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
+ __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
+
+ __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
+ __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
+ __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
+ __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
+ __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
+ __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
+ __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
+ __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
+ __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
+ __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
+ __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
+ __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
+ __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
+ __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
+ __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
+ __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
if (add_into)
{
- u0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), u0);
- _mm_storeu_ps(out[0] + 4 * i, u0);
- u1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), u1);
- _mm_storeu_ps(out[1] + 4 * i, u1);
- u2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), u2);
- _mm_storeu_ps(out[2] + 4 * i, u2);
- u3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), u3);
- _mm_storeu_ps(out[3] + 4 * i, u3);
+ res0 = _mm_add_ps(_mm_loadu_ps(out[0] + 4 * i), res0);
+ _mm_storeu_ps(out[0] + 4 * i, res0);
+ res1 = _mm_add_ps(_mm_loadu_ps(out[1] + 4 * i), res1);
+ _mm_storeu_ps(out[1] + 4 * i, res1);
+ res2 = _mm_add_ps(_mm_loadu_ps(out[2] + 4 * i), res2);
+ _mm_storeu_ps(out[2] + 4 * i, res2);
+ res3 = _mm_add_ps(_mm_loadu_ps(out[3] + 4 * i), res3);
+ _mm_storeu_ps(out[3] + 4 * i, res3);
+ res4 = _mm_add_ps(_mm_loadu_ps(out[4] + 4 * i), res4);
+ _mm_storeu_ps(out[4] + 4 * i, res4);
+ res5 = _mm_add_ps(_mm_loadu_ps(out[5] + 4 * i), res5);
+ _mm_storeu_ps(out[5] + 4 * i, res5);
+ res6 = _mm_add_ps(_mm_loadu_ps(out[6] + 4 * i), res6);
+ _mm_storeu_ps(out[6] + 4 * i, res6);
+ res7 = _mm_add_ps(_mm_loadu_ps(out[7] + 4 * i), res7);
+ _mm_storeu_ps(out[7] + 4 * i, res7);
+ res8 = _mm_add_ps(_mm_loadu_ps(out[8] + 4 * i), res8);
+ _mm_storeu_ps(out[8] + 4 * i, res8);
+ res9 = _mm_add_ps(_mm_loadu_ps(out[9] + 4 * i), res9);
+ _mm_storeu_ps(out[9] + 4 * i, res9);
+ res10 = _mm_add_ps(_mm_loadu_ps(out[10] + 4 * i), res10);
+ _mm_storeu_ps(out[10] + 4 * i, res10);
+ res11 = _mm_add_ps(_mm_loadu_ps(out[11] + 4 * i), res11);
+ _mm_storeu_ps(out[11] + 4 * i, res11);
+ res12 = _mm_add_ps(_mm_loadu_ps(out[12] + 4 * i), res12);
+ _mm_storeu_ps(out[12] + 4 * i, res12);
+ res13 = _mm_add_ps(_mm_loadu_ps(out[13] + 4 * i), res13);
+ _mm_storeu_ps(out[13] + 4 * i, res13);
+ res14 = _mm_add_ps(_mm_loadu_ps(out[14] + 4 * i), res14);
+ _mm_storeu_ps(out[14] + 4 * i, res14);
+ res15 = _mm_add_ps(_mm_loadu_ps(out[15] + 4 * i), res15);
+ _mm_storeu_ps(out[15] + 4 * i, res15);
}
else
{
- _mm_storeu_ps(out[0] + 4 * i, u0);
- _mm_storeu_ps(out[1] + 4 * i, u1);
- _mm_storeu_ps(out[2] + 4 * i, u2);
- _mm_storeu_ps(out[3] + 4 * i, u3);
+ _mm_storeu_ps(out[0] + 4 * i, res0);
+ _mm_storeu_ps(out[1] + 4 * i, res1);
+ _mm_storeu_ps(out[2] + 4 * i, res2);
+ _mm_storeu_ps(out[3] + 4 * i, res3);
+ _mm_storeu_ps(out[4] + 4 * i, res4);
+ _mm_storeu_ps(out[5] + 4 * i, res5);
+ _mm_storeu_ps(out[6] + 4 * i, res6);
+ _mm_storeu_ps(out[7] + 4 * i, res7);
+ _mm_storeu_ps(out[8] + 4 * i, res8);
+ _mm_storeu_ps(out[9] + 4 * i, res9);
+ _mm_storeu_ps(out[10] + 4 * i, res10);
+ _mm_storeu_ps(out[11] + 4 * i, res11);
+ _mm_storeu_ps(out[12] + 4 * i, res12);
+ _mm_storeu_ps(out[13] + 4 * i, res13);
+ _mm_storeu_ps(out[14] + 4 * i, res14);
+ _mm_storeu_ps(out[15] + 4 * i, res15);
}
}
if (add_into)
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
+ for (unsigned int v = 0; v < 16; ++v)
out[v][i] += in[i][v];
else
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
+ for (unsigned int v = 0; v < 16; ++v)
out[v][i] = in[i][v];
}
-
-
-# endif // if DEAL_II_VECTORIZATION_WIDTH_IN_BITS > 0 && defined(__SSE2__)
+# endif
# if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__ALTIVEC__) && \
defined(__VSX__)
#endif // DOXYGEN
-/**
- * sum() functions.
- */
-
-#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128 && defined(__SSE2__)
-inline double
-VectorizedArray<double, 2>::sum()
-{
- __m128d t1 = _mm_unpackhi_pd(data, data);
- __m128d t2 = _mm_add_pd(data, t1);
- return _mm_cvtsd_f64(t2);
-}
-
-
-
-inline float
-VectorizedArray<float, 4>::sum()
-{
- __m128 t1 = _mm_movehl_ps(data, data);
- __m128 t2 = _mm_add_ps(data, t1);
- __m128 t3 = _mm_shuffle_ps(t2, t2, 1);
- __m128 t4 = _mm_add_ss(t2, t3);
- return _mm_cvtss_f32(t4);
-}
-#endif
-
-
-
-#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256 && defined(__AVX__)
-inline double
-VectorizedArray<double, 4>::sum()
-{
- VectorizedArray<double, 2> t1;
- t1.data = _mm_add_pd(this->get_lower(), this->get_upper());
- return t1.sum();
-}
-
-
-
-inline float
-VectorizedArray<float, 8>::sum()
-{
- VectorizedArray<float, 4> t1;
- t1.data = _mm_add_ps(this->get_lower(), this->get_upper());
- return t1.sum();
-}
-#endif
-
-
-
-#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512 && defined(__AVX512F__)
-inline double
-VectorizedArray<double, 8>::sum()
-{
- VectorizedArray<double, 4> t1;
- t1.data = _mm256_add_pd(this->get_lower(), this->get_upper());
- return t1.sum();
-}
-
-
-
-inline float
-VectorizedArray<float, 16>::sum()
-{
- VectorizedArray<float, 8> t1;
- t1.data = _mm256_add_ps(this->get_lower(), this->get_upper());
- return t1.sum();
-}
-#endif
-
/**