* @relatesalso VectorizedArray
*/
template <typename Number>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const Number * in,
const unsigned int * offsets,
* @relatesalso VectorizedArray
*/
template <typename Number>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
const VectorizedArray<Number> *in,
* Specialization for double and AVX-512.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const double * in,
const unsigned int * offsets,
VectorizedArray<double> *out)
{
+ // do not do full transpose because the code is long and will most
+ // likely not pay off because many processors have two load units
+ // (for the top 16 instructions) but only 1 permute unit (for the 8
+ // shuffle/unpack instructions). rather do the transposition on the
+ // vectorized array of half the size, m256d
const unsigned int n_chunks = n_entries / 4;
- for (unsigned int outer = 0; outer < 8; outer += 4)
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- const double *in0 = in + offsets[0 + outer];
- const double *in1 = in + offsets[1 + outer];
- const double *in2 = in + offsets[2 + outer];
- const double *in3 = in + offsets[3 + outer];
-
- for (unsigned int i = 0; i < n_chunks; ++i)
- {
- __m256d u0 = _mm256_loadu_pd(in0 + 4 * i);
- __m256d u1 = _mm256_loadu_pd(in1 + 4 * i);
- __m256d u2 = _mm256_loadu_pd(in2 + 4 * i);
- __m256d u3 = _mm256_loadu_pd(in3 + 4 * i);
- __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
- __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
- __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
- __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
- *reinterpret_cast<__m256d *>(
- reinterpret_cast<double *>(&out[4 * i + 0].data) + outer) =
- _mm256_unpacklo_pd(t0, t1);
- *reinterpret_cast<__m256d *>(
- reinterpret_cast<double *>(&out[4 * i + 1].data) + outer) =
- _mm256_unpackhi_pd(t0, t1);
- *reinterpret_cast<__m256d *>(
- reinterpret_cast<double *>(&out[4 * i + 2].data) + outer) =
- _mm256_unpacklo_pd(t2, t3);
- *reinterpret_cast<__m256d *>(
- reinterpret_cast<double *>(&out[4 * i + 3].data) + outer) =
- _mm256_unpackhi_pd(t2, t3);
- }
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
- out[i][outer + v] = in[offsets[v + outer] + i];
+ __m512d t0, t1, t2, t3 = {};
+
+ t0 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[0] + 4 * i), 0);
+ t0 = _mm512_insertf64x4(t0, _mm256_loadu_pd(in + offsets[2] + 4 * i), 1);
+ t1 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[1] + 4 * i), 0);
+ t1 = _mm512_insertf64x4(t1, _mm256_loadu_pd(in + offsets[3] + 4 * i), 1);
+ t2 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[4] + 4 * i), 0);
+ t2 = _mm512_insertf64x4(t2, _mm256_loadu_pd(in + offsets[6] + 4 * i), 1);
+ t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[5] + 4 * i), 0);
+ t3 = _mm512_insertf64x4(t3, _mm256_loadu_pd(in + offsets[7] + 4 * i), 1);
+
+ __m512d v0 = _mm512_shuffle_f64x2(t0, t2, 0x88);
+ __m512d v1 = _mm512_shuffle_f64x2(t0, t2, 0xdd);
+ __m512d v2 = _mm512_shuffle_f64x2(t1, t3, 0x88);
+ __m512d v3 = _mm512_shuffle_f64x2(t1, t3, 0xdd);
+ out[4 * i + 0].data = _mm512_unpacklo_pd(v0, v2);
+ out[4 * i + 1].data = _mm512_unpackhi_pd(v0, v2);
+ out[4 * i + 2].data = _mm512_unpacklo_pd(v1, v3);
+ out[4 * i + 3].data = _mm512_unpackhi_pd(v1, v3);
}
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ out[i].gather(in + i, offsets);
}
* Specialization for double and AVX-512.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
const VectorizedArray<double> *in,
const unsigned int * offsets,
double * out)
{
+ // do not do full transpose because the code is long and will most
+ // likely not pay off. rather do the transposition on the vectorized
+ // array of half the size, m256d
const unsigned int n_chunks = n_entries / 4;
- // do not do full transpose because the code is too long and will most
- // likely not pay off. rather do the transposition on the vectorized array
- // on size smaller, mm256d
- for (unsigned int outer = 0; outer < 8; outer += 4)
+ __m512i mask1 = _mm512_set_epi64(0xd, 0xc, 0x5, 0x4, 0x9, 0x8, 0x1, 0x0);
+ __m512i mask2 = _mm512_set_epi64(0xf, 0xe, 0x7, 0x6, 0xb, 0xa, 0x3, 0x2);
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- double *out0 = out + offsets[0 + outer];
- double *out1 = out + offsets[1 + outer];
- double *out2 = out + offsets[2 + outer];
- double *out3 = out + offsets[3 + outer];
- for (unsigned int i = 0; i < n_chunks; ++i)
+ __m512d t0 = _mm512_unpacklo_pd(in[i * 4].data, in[i * 4 + 1].data);
+ __m512d t1 = _mm512_unpackhi_pd(in[i * 4].data, in[i * 4 + 1].data);
+ __m512d t2 = _mm512_unpacklo_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
+ __m512d t3 = _mm512_unpackhi_pd(in[i * 4 + 2].data, in[i * 4 + 3].data);
+ __m512d v0 = _mm512_permutex2var_pd(t0, mask1, t2);
+ __m512d v1 = _mm512_permutex2var_pd(t0, mask2, t2);
+ __m512d v2 = _mm512_permutex2var_pd(t1, mask1, t3);
+ __m512d v3 = _mm512_permutex2var_pd(t1, mask2, t3);
+ __m256d res0 = _mm512_extractf64x4_pd(v0, 0);
+ __m256d res4 = _mm512_extractf64x4_pd(v0, 1);
+ __m256d res1 = _mm512_extractf64x4_pd(v2, 0);
+ __m256d res5 = _mm512_extractf64x4_pd(v2, 1);
+ __m256d res2 = _mm512_extractf64x4_pd(v1, 0);
+ __m256d res6 = _mm512_extractf64x4_pd(v1, 1);
+ __m256d res3 = _mm512_extractf64x4_pd(v3, 0);
+ __m256d res7 = _mm512_extractf64x4_pd(v3, 1);
+
+ // Cannot use the same store instructions in both paths of the 'if'
+ // because the compiler cannot know that there is no aliasing
+ // between pointers
+ if (add_into)
{
- __m256d u0 = *reinterpret_cast<const __m256d *>(
- reinterpret_cast<const double *>(&in[4 * i + 0].data) + outer);
- __m256d u1 = *reinterpret_cast<const __m256d *>(
- reinterpret_cast<const double *>(&in[4 * i + 1].data) + outer);
- __m256d u2 = *reinterpret_cast<const __m256d *>(
- reinterpret_cast<const double *>(&in[4 * i + 2].data) + outer);
- __m256d u3 = *reinterpret_cast<const __m256d *>(
- reinterpret_cast<const double *>(&in[4 * i + 3].data) + outer);
- __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
- __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
- __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
- __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
- __m256d res0 = _mm256_unpacklo_pd(t0, t1);
- __m256d res1 = _mm256_unpackhi_pd(t0, t1);
- __m256d res2 = _mm256_unpacklo_pd(t2, t3);
- __m256d res3 = _mm256_unpackhi_pd(t2, t3);
-
- // Cannot use the same store instructions in both paths of the 'if'
- // because the compiler cannot know that there is no aliasing between
- // pointers
- if (add_into)
- {
- res0 = _mm256_add_pd(_mm256_loadu_pd(out0 + 4 * i), res0);
- _mm256_storeu_pd(out0 + 4 * i, res0);
- res1 = _mm256_add_pd(_mm256_loadu_pd(out1 + 4 * i), res1);
- _mm256_storeu_pd(out1 + 4 * i, res1);
- res2 = _mm256_add_pd(_mm256_loadu_pd(out2 + 4 * i), res2);
- _mm256_storeu_pd(out2 + 4 * i, res2);
- res3 = _mm256_add_pd(_mm256_loadu_pd(out3 + 4 * i), res3);
- _mm256_storeu_pd(out3 + 4 * i, res3);
- }
- else
- {
- _mm256_storeu_pd(out0 + 4 * i, res0);
- _mm256_storeu_pd(out1 + 4 * i, res1);
- _mm256_storeu_pd(out2 + 4 * i, res2);
- _mm256_storeu_pd(out3 + 4 * i, res3);
- }
+ res0 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[0]), res0);
+ _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
+ res1 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[1]), res1);
+ _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
+ res2 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[2]), res2);
+ _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
+ res3 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[3]), res3);
+ _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
+ res4 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[4]), res4);
+ _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
+ res5 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[5]), res5);
+ _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
+ res6 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[6]), res6);
+ _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
+ res7 = _mm256_add_pd(_mm256_loadu_pd(out + 4 * i + offsets[7]), res7);
+ _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
}
- if (add_into)
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
- out[offsets[v + outer] + i] += in[i][v + outer];
else
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 4; ++v)
- out[offsets[v + outer] + i] = in[i][v + outer];
+ {
+ _mm256_storeu_pd(out + 4 * i + offsets[0], res0);
+ _mm256_storeu_pd(out + 4 * i + offsets[1], res1);
+ _mm256_storeu_pd(out + 4 * i + offsets[2], res2);
+ _mm256_storeu_pd(out + 4 * i + offsets[3], res3);
+ _mm256_storeu_pd(out + 4 * i + offsets[4], res4);
+ _mm256_storeu_pd(out + 4 * i + offsets[5], res5);
+ _mm256_storeu_pd(out + 4 * i + offsets[6], res6);
+ _mm256_storeu_pd(out + 4 * i + offsets[7], res7);
+ }
}
+ if (add_into)
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 8; ++v)
+ out[offsets[v] + i] += in[i][v];
+ else
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 8; ++v)
+ out[offsets[v] + i] = in[i][v];
}
* Specialization for float and AVX-512.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const float * in,
const unsigned int * offsets,
VectorizedArray<float> *out)
{
const unsigned int n_chunks = n_entries / 4;
- for (unsigned int outer = 0; outer < 16; outer += 8)
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- for (unsigned int i = 0; i < n_chunks; ++i)
- {
- __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0 + outer]);
- __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1 + outer]);
- __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2 + outer]);
- __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3 + outer]);
- __m128 u4 = _mm_loadu_ps(in + 4 * i + offsets[4 + outer]);
- __m128 u5 = _mm_loadu_ps(in + 4 * i + offsets[5 + outer]);
- __m128 u6 = _mm_loadu_ps(in + 4 * i + offsets[6 + outer]);
- __m128 u7 = _mm_loadu_ps(in + 4 * i + offsets[7 + outer]);
- // To avoid warnings about uninitialized variables, need to initialize
- // one variable with zero before using it.
- __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
- t0 = _mm256_insertf128_ps(t3, u0, 0);
- t0 = _mm256_insertf128_ps(t0, u4, 1);
- t1 = _mm256_insertf128_ps(t3, u1, 0);
- t1 = _mm256_insertf128_ps(t1, u5, 1);
- t2 = _mm256_insertf128_ps(t3, u2, 0);
- t2 = _mm256_insertf128_ps(t2, u6, 1);
- t3 = _mm256_insertf128_ps(t3, u3, 0);
- t3 = _mm256_insertf128_ps(t3, u7, 1);
- __m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
- __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
- __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
- __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
- *reinterpret_cast<__m256 *>(
- reinterpret_cast<float *>(&out[4 * i + 0].data) + outer) =
- _mm256_shuffle_ps(v0, v2, 0x88);
- *reinterpret_cast<__m256 *>(
- reinterpret_cast<float *>(&out[4 * i + 1].data) + outer) =
- _mm256_shuffle_ps(v0, v2, 0xdd);
- *reinterpret_cast<__m256 *>(
- reinterpret_cast<float *>(&out[4 * i + 2].data) + outer) =
- _mm256_shuffle_ps(v1, v3, 0x88);
- *reinterpret_cast<__m256 *>(
- reinterpret_cast<float *>(&out[4 * i + 3].data) + outer) =
- _mm256_shuffle_ps(v1, v3, 0xdd);
- }
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 8; ++v)
- out[i][v + outer] = in[offsets[v + outer] + i];
+ // To avoid warnings about uninitialized variables, need to initialize
+ // one variable with some old value.
+ __m512 t0, t1, t2, t3 = out[0].data;
+
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[0] + 4 * i), 0);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[4] + 4 * i), 1);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[8] + 4 * i), 2);
+ t0 = _mm512_insertf32x4(t0, _mm_loadu_ps(in + offsets[12] + 4 * i), 3);
+
+ t1 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[1] + 4 * i), 0);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[5] + 4 * i), 1);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[9] + 4 * i), 2);
+ t1 = _mm512_insertf32x4(t1, _mm_loadu_ps(in + offsets[13] + 4 * i), 3);
+
+ t2 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[2] + 4 * i), 0);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[6] + 4 * i), 1);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[10] + 4 * i), 2);
+ t2 = _mm512_insertf32x4(t2, _mm_loadu_ps(in + offsets[14] + 4 * i), 3);
+
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[3] + 4 * i), 0);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[7] + 4 * i), 1);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[11] + 4 * i), 2);
+ t3 = _mm512_insertf32x4(t3, _mm_loadu_ps(in + offsets[15] + 4 * i), 3);
+
+ __m512 v0 = _mm512_shuffle_ps(t0, t1, 0x44);
+ __m512 v1 = _mm512_shuffle_ps(t0, t1, 0xee);
+ __m512 v2 = _mm512_shuffle_ps(t2, t3, 0x44);
+ __m512 v3 = _mm512_shuffle_ps(t2, t3, 0xee);
+
+ out[4 * i + 0].data = _mm512_shuffle_ps(v0, v2, 0x88);
+ out[4 * i + 1].data = _mm512_shuffle_ps(v0, v2, 0xdd);
+ out[4 * i + 2].data = _mm512_shuffle_ps(v1, v3, 0x88);
+ out[4 * i + 3].data = _mm512_shuffle_ps(v1, v3, 0xdd);
}
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 8; ++v)
+ out[i].gather(in + i, offsets);
}
* Specialization for float and AVX-512.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
const VectorizedArray<float> *in,
float * out)
{
const unsigned int n_chunks = n_entries / 4;
- for (unsigned int outer = 0; outer < 16; outer += 8)
+ for (unsigned int i = 0; i < n_chunks; ++i)
{
- for (unsigned int i = 0; i < n_chunks; ++i)
+ __m512 t0 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0x44);
+ __m512 t1 = _mm512_shuffle_ps(in[4 * i].data, in[1 + 4 * i].data, 0xee);
+ __m512 t2 =
+ _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0x44);
+ __m512 t3 =
+ _mm512_shuffle_ps(in[2 + 4 * i].data, in[3 + 4 * i].data, 0xee);
+ __m512 u0 = _mm512_shuffle_ps(t0, t2, 0x88);
+ __m512 u1 = _mm512_shuffle_ps(t0, t2, 0xdd);
+ __m512 u2 = _mm512_shuffle_ps(t1, t3, 0x88);
+ __m512 u3 = _mm512_shuffle_ps(t1, t3, 0xdd);
+
+ __m128 res0 = _mm512_extractf32x4_ps(u0, 0);
+ __m128 res4 = _mm512_extractf32x4_ps(u0, 1);
+ __m128 res8 = _mm512_extractf32x4_ps(u0, 2);
+ __m128 res12 = _mm512_extractf32x4_ps(u0, 3);
+ __m128 res1 = _mm512_extractf32x4_ps(u1, 0);
+ __m128 res5 = _mm512_extractf32x4_ps(u1, 1);
+ __m128 res9 = _mm512_extractf32x4_ps(u1, 2);
+ __m128 res13 = _mm512_extractf32x4_ps(u1, 3);
+ __m128 res2 = _mm512_extractf32x4_ps(u2, 0);
+ __m128 res6 = _mm512_extractf32x4_ps(u2, 1);
+ __m128 res10 = _mm512_extractf32x4_ps(u2, 2);
+ __m128 res14 = _mm512_extractf32x4_ps(u2, 3);
+ __m128 res3 = _mm512_extractf32x4_ps(u3, 0);
+ __m128 res7 = _mm512_extractf32x4_ps(u3, 1);
+ __m128 res11 = _mm512_extractf32x4_ps(u3, 2);
+ __m128 res15 = _mm512_extractf32x4_ps(u3, 3);
+
+ // Cannot use the same store instructions in both paths of the 'if'
+ // because the compiler cannot know that there is no aliasing between
+ // pointers
+ if (add_into)
{
- __m256 u0 = *reinterpret_cast<const __m256 *>(
- reinterpret_cast<const float *>(&in[4 * i + 0].data) + outer);
- __m256 u1 = *reinterpret_cast<const __m256 *>(
- reinterpret_cast<const float *>(&in[4 * i + 1].data) + outer);
- __m256 u2 = *reinterpret_cast<const __m256 *>(
- reinterpret_cast<const float *>(&in[4 * i + 2].data) + outer);
- __m256 u3 = *reinterpret_cast<const __m256 *>(
- reinterpret_cast<const float *>(&in[4 * i + 3].data) + outer);
- __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
- __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
- __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
- __m256 t3 = _mm256_shuffle_ps(u2, u3, 0xee);
- u0 = _mm256_shuffle_ps(t0, t2, 0x88);
- u1 = _mm256_shuffle_ps(t0, t2, 0xdd);
- u2 = _mm256_shuffle_ps(t1, t3, 0x88);
- u3 = _mm256_shuffle_ps(t1, t3, 0xdd);
- __m128 res0 = _mm256_extractf128_ps(u0, 0);
- __m128 res4 = _mm256_extractf128_ps(u0, 1);
- __m128 res1 = _mm256_extractf128_ps(u1, 0);
- __m128 res5 = _mm256_extractf128_ps(u1, 1);
- __m128 res2 = _mm256_extractf128_ps(u2, 0);
- __m128 res6 = _mm256_extractf128_ps(u2, 1);
- __m128 res3 = _mm256_extractf128_ps(u3, 0);
- __m128 res7 = _mm256_extractf128_ps(u3, 1);
-
- // Cannot use the same store instructions in both paths of the 'if'
- // because the compiler cannot know that there is no aliasing between
- // pointers
- if (add_into)
- {
- res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0 + outer]),
- res0);
- _mm_storeu_ps(out + 4 * i + offsets[0 + outer], res0);
- res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1 + outer]),
- res1);
- _mm_storeu_ps(out + 4 * i + offsets[1 + outer], res1);
- res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2 + outer]),
- res2);
- _mm_storeu_ps(out + 4 * i + offsets[2 + outer], res2);
- res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3 + outer]),
- res3);
- _mm_storeu_ps(out + 4 * i + offsets[3 + outer], res3);
- res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4 + outer]),
- res4);
- _mm_storeu_ps(out + 4 * i + offsets[4 + outer], res4);
- res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5 + outer]),
- res5);
- _mm_storeu_ps(out + 4 * i + offsets[5 + outer], res5);
- res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6 + outer]),
- res6);
- _mm_storeu_ps(out + 4 * i + offsets[6 + outer], res6);
- res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7 + outer]),
- res7);
- _mm_storeu_ps(out + 4 * i + offsets[7 + outer], res7);
- }
- else
- {
- _mm_storeu_ps(out + 4 * i + offsets[0 + outer], res0);
- _mm_storeu_ps(out + 4 * i + offsets[1 + outer], res1);
- _mm_storeu_ps(out + 4 * i + offsets[2 + outer], res2);
- _mm_storeu_ps(out + 4 * i + offsets[3 + outer], res3);
- _mm_storeu_ps(out + 4 * i + offsets[4 + outer], res4);
- _mm_storeu_ps(out + 4 * i + offsets[5 + outer], res5);
- _mm_storeu_ps(out + 4 * i + offsets[6 + outer], res6);
- _mm_storeu_ps(out + 4 * i + offsets[7 + outer], res7);
- }
+ res0 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[0]), res0);
+ _mm_storeu_ps(out + 4 * i + offsets[0], res0);
+ res1 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[1]), res1);
+ _mm_storeu_ps(out + 4 * i + offsets[1], res1);
+ res2 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[2]), res2);
+ _mm_storeu_ps(out + 4 * i + offsets[2], res2);
+ res3 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[3]), res3);
+ _mm_storeu_ps(out + 4 * i + offsets[3], res3);
+ res4 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[4]), res4);
+ _mm_storeu_ps(out + 4 * i + offsets[4], res4);
+ res5 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[5]), res5);
+ _mm_storeu_ps(out + 4 * i + offsets[5], res5);
+ res6 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[6]), res6);
+ _mm_storeu_ps(out + 4 * i + offsets[6], res6);
+ res7 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[7]), res7);
+ _mm_storeu_ps(out + 4 * i + offsets[7], res7);
+ res8 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[8]), res8);
+ _mm_storeu_ps(out + 4 * i + offsets[8], res8);
+ res9 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[9]), res9);
+ _mm_storeu_ps(out + 4 * i + offsets[9], res9);
+ res10 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[10]), res10);
+ _mm_storeu_ps(out + 4 * i + offsets[10], res10);
+ res11 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[11]), res11);
+ _mm_storeu_ps(out + 4 * i + offsets[11], res11);
+ res12 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[12]), res12);
+ _mm_storeu_ps(out + 4 * i + offsets[12], res12);
+ res13 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[13]), res13);
+ _mm_storeu_ps(out + 4 * i + offsets[13], res13);
+ res14 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[14]), res14);
+ _mm_storeu_ps(out + 4 * i + offsets[14], res14);
+ res15 = _mm_add_ps(_mm_loadu_ps(out + 4 * i + offsets[15]), res15);
+ _mm_storeu_ps(out + 4 * i + offsets[15], res15);
}
- if (add_into)
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 8; ++v)
- out[offsets[v + outer] + i] += in[i][v + outer];
else
- for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
- for (unsigned int v = 0; v < 8; ++v)
- out[offsets[v + outer] + i] = in[i][v + outer];
+ {
+ _mm_storeu_ps(out + 4 * i + offsets[0], res0);
+ _mm_storeu_ps(out + 4 * i + offsets[1], res1);
+ _mm_storeu_ps(out + 4 * i + offsets[2], res2);
+ _mm_storeu_ps(out + 4 * i + offsets[3], res3);
+ _mm_storeu_ps(out + 4 * i + offsets[4], res4);
+ _mm_storeu_ps(out + 4 * i + offsets[5], res5);
+ _mm_storeu_ps(out + 4 * i + offsets[6], res6);
+ _mm_storeu_ps(out + 4 * i + offsets[7], res7);
+ _mm_storeu_ps(out + 4 * i + offsets[8], res8);
+ _mm_storeu_ps(out + 4 * i + offsets[9], res9);
+ _mm_storeu_ps(out + 4 * i + offsets[10], res10);
+ _mm_storeu_ps(out + 4 * i + offsets[11], res11);
+ _mm_storeu_ps(out + 4 * i + offsets[12], res12);
+ _mm_storeu_ps(out + 4 * i + offsets[13], res13);
+ _mm_storeu_ps(out + 4 * i + offsets[14], res14);
+ _mm_storeu_ps(out + 4 * i + offsets[15], res15);
+ }
}
+ if (add_into)
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 16; ++v)
+ out[offsets[v] + i] += in[i][v];
+ else
+ for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
+ for (unsigned int v = 0; v < 16; ++v)
+ out[offsets[v] + i] = in[i][v];
}
* Specialization for double and AVX.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const double * in,
const unsigned int * offsets,
* Specialization for double and AVX.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
const VectorizedArray<double> *in,
* Specialization for float and AVX.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const float * in,
const unsigned int * offsets,
const unsigned int n_chunks = n_entries / 4;
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m128 u0 = _mm_loadu_ps(in + 4 * i + offsets[0]);
- __m128 u1 = _mm_loadu_ps(in + 4 * i + offsets[1]);
- __m128 u2 = _mm_loadu_ps(in + 4 * i + offsets[2]);
- __m128 u3 = _mm_loadu_ps(in + 4 * i + offsets[3]);
- __m128 u4 = _mm_loadu_ps(in + 4 * i + offsets[4]);
- __m128 u5 = _mm_loadu_ps(in + 4 * i + offsets[5]);
- __m128 u6 = _mm_loadu_ps(in + 4 * i + offsets[6]);
- __m128 u7 = _mm_loadu_ps(in + 4 * i + offsets[7]);
// To avoid warnings about uninitialized variables, need to initialize
// one variable with zero before using it.
- __m256 t0, t1, t2, t3 = _mm256_set1_ps(0.F);
- t0 = _mm256_insertf128_ps(t3, u0, 0);
- t0 = _mm256_insertf128_ps(t0, u4, 1);
- t1 = _mm256_insertf128_ps(t3, u1, 0);
- t1 = _mm256_insertf128_ps(t1, u5, 1);
- t2 = _mm256_insertf128_ps(t3, u2, 0);
- t2 = _mm256_insertf128_ps(t2, u6, 1);
- t3 = _mm256_insertf128_ps(t3, u3, 0);
- t3 = _mm256_insertf128_ps(t3, u7, 1);
+ __m256 t0, t1, t2, t3 = {};
+ t0 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[0]), 0);
+ t0 = _mm256_insertf128_ps(t0, _mm_loadu_ps(in + 4 * i + offsets[4]), 1);
+ t1 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[1]), 0);
+ t1 = _mm256_insertf128_ps(t1, _mm_loadu_ps(in + 4 * i + offsets[5]), 1);
+ t2 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[2]), 0);
+ t2 = _mm256_insertf128_ps(t2, _mm_loadu_ps(in + 4 * i + offsets[6]), 1);
+ t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[3]), 0);
+ t3 = _mm256_insertf128_ps(t3, _mm_loadu_ps(in + 4 * i + offsets[7]), 1);
+
__m256 v0 = _mm256_shuffle_ps(t0, t1, 0x44);
__m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
__m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
* Specialization for float and AVX.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
const VectorizedArray<float> *in,
* Specialization for double and SSE2.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const double * in,
const unsigned int * offsets,
* Specialization for double and SSE2.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
const VectorizedArray<double> *in,
* Specialization for float and SSE2.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_load_and_transpose(const unsigned int n_entries,
const float * in,
const unsigned int * offsets,
* Specialization for float and SSE2.
*/
template <>
-inline void
+inline DEAL_II_ALWAYS_INLINE void
vectorized_transpose_and_store(const bool add_into,
const unsigned int n_entries,
const VectorizedArray<float> *in,