// unfortunately, there does not appear to be a 256 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
- const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
- const __m256i index = *((__m256i *)(&index_val));
- data = _mm512_i32gather_pd(index, base_ptr, 8);
+ const __m256 index_val =
+ _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
+ data = _mm512_i32gather_pd(index, base_ptr, 8);
}
/**
// unfortunately, there does not appear to be a 256 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
- const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
- const __m256i index = *((__m256i *)(&index_val));
+ const __m256 index_val =
+ _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
_mm512_i32scatter_pd(base_ptr, index, data, 8);
}
// is a bitwise operation so the data type does not matter)
__m512d mask = _mm512_set1_pd(-0.);
VectorizedArray res;
- res.data = (__m512d)_mm512_andnot_epi64((__m512i)mask, (__m512i)data);
+ res.data = reinterpret_cast<__m512d>(
+ _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask),
+ reinterpret_cast<__m512i>(data)));
return res;
}
__m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
__m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
__m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31);
- *(__m256d *)((double *)(&out[4 * i + 0].data) + outer) =
+ *reinterpret_cast<__m256d *>(
+ reinterpret_cast<double *>(&out[4 * i + 0].data) + outer) =
_mm256_unpacklo_pd(t0, t1);
- *(__m256d *)((double *)(&out[4 * i + 1].data) + outer) =
+ *reinterpret_cast<__m256d *>(
+ reinterpret_cast<double *>(&out[4 * i + 1].data) + outer) =
_mm256_unpackhi_pd(t0, t1);
- *(__m256d *)((double *)(&out[4 * i + 2].data) + outer) =
+ *reinterpret_cast<__m256d *>(
+ reinterpret_cast<double *>(&out[4 * i + 2].data) + outer) =
_mm256_unpacklo_pd(t2, t3);
- *(__m256d *)((double *)(&out[4 * i + 3].data) + outer) =
+ *reinterpret_cast<__m256d *>(
+ reinterpret_cast<double *>(&out[4 * i + 3].data) + outer) =
_mm256_unpackhi_pd(t2, t3);
}
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
double *out3 = out + offsets[3 + outer];
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m256d u0 =
- *(const __m256d *)((const double *)(&in[4 * i + 0].data) + outer);
- __m256d u1 =
- *(const __m256d *)((const double *)(&in[4 * i + 1].data) + outer);
- __m256d u2 =
- *(const __m256d *)((const double *)(&in[4 * i + 2].data) + outer);
- __m256d u3 =
- *(const __m256d *)((const double *)(&in[4 * i + 3].data) + outer);
+ __m256d u0 = *reinterpret_cast<const __m256d *>(
+ reinterpret_cast<const double *>(&in[4 * i + 0].data) + outer);
+ __m256d u1 = *reinterpret_cast<const __m256d *>(
+ reinterpret_cast<const double *>(&in[4 * i + 1].data) + outer);
+ __m256d u2 = *reinterpret_cast<const __m256d *>(
+ reinterpret_cast<const double *>(&in[4 * i + 2].data) + outer);
+ __m256d u3 = *reinterpret_cast<const __m256d *>(
+ reinterpret_cast<const double *>(&in[4 * i + 3].data) + outer);
__m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20);
__m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20);
__m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31);
// unfortunately, there does not appear to be a 512 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
- const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
- const __m512i index = *((__m512i *)(&index_val));
- data = _mm512_i32gather_ps(index, base_ptr, 4);
+ const __m512 index_val =
+ _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
+ data = _mm512_i32gather_ps(index, base_ptr, 4);
}
/**
// unfortunately, there does not appear to be a 512 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
- const __m512 index_val = _mm512_loadu_ps((const float *)offsets);
- const __m512i index = *((__m512i *)(&index_val));
+ const __m512 index_val =
+ _mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
_mm512_i32scatter_ps(base_ptr, index, data, 4);
}
// is a bitwise operation so the data type does not matter)
__m512 mask = _mm512_set1_ps(-0.f);
VectorizedArray res;
- res.data = (__m512)_mm512_andnot_epi32((__m512i)mask, (__m512i)data);
+ res.data = reinterpret_cast<__m512>(
+ _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask),
+ reinterpret_cast<__m512i>(data)));
return res;
}
__m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee);
__m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44);
__m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee);
- *(__m256 *)((float *)(&out[4 * i + 0].data) + outer) =
+ *reinterpret_cast<__m256 *>(
+ reinterpret_cast<float *>(&out[4 * i + 0].data) + outer) =
_mm256_shuffle_ps(v0, v2, 0x88);
- *(__m256 *)((float *)(&out[4 * i + 1].data) + outer) =
+ *reinterpret_cast<__m256 *>(
+ reinterpret_cast<float *>(&out[4 * i + 1].data) + outer) =
_mm256_shuffle_ps(v0, v2, 0xdd);
- *(__m256 *)((float *)(&out[4 * i + 2].data) + outer) =
+ *reinterpret_cast<__m256 *>(
+ reinterpret_cast<float *>(&out[4 * i + 2].data) + outer) =
_mm256_shuffle_ps(v1, v3, 0x88);
- *(__m256 *)((float *)(&out[4 * i + 3].data) + outer) =
+ *reinterpret_cast<__m256 *>(
+ reinterpret_cast<float *>(&out[4 * i + 3].data) + outer) =
_mm256_shuffle_ps(v1, v3, 0xdd);
}
for (unsigned int i = 4 * n_chunks; i < n_entries; ++i)
{
for (unsigned int i = 0; i < n_chunks; ++i)
{
- __m256 u0 =
- *(const __m256 *)((const float *)(&in[4 * i + 0].data) + outer);
- __m256 u1 =
- *(const __m256 *)((const float *)(&in[4 * i + 1].data) + outer);
- __m256 u2 =
- *(const __m256 *)((const float *)(&in[4 * i + 2].data) + outer);
- __m256 u3 =
- *(const __m256 *)((const float *)(&in[4 * i + 3].data) + outer);
+ __m256 u0 = *reinterpret_cast<const __m256 *>(
+ reinterpret_cast<const float *>(&in[4 * i + 0].data) + outer);
+ __m256 u1 = *reinterpret_cast<const __m256 *>(
+ reinterpret_cast<const float *>(&in[4 * i + 1].data) + outer);
+ __m256 u2 = *reinterpret_cast<const __m256 *>(
+ reinterpret_cast<const float *>(&in[4 * i + 2].data) + outer);
+ __m256 u3 = *reinterpret_cast<const __m256 *>(
+ reinterpret_cast<const float *>(&in[4 * i + 3].data) + outer);
__m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44);
__m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee);
__m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44);
// unfortunately, there does not appear to be a 128 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
- const __m128 index_val = _mm_loadu_ps((const float *)offsets);
- const __m128i index = *((__m128i *)(&index_val));
- data = _mm256_i32gather_pd(base_ptr, index, 8);
+ const __m128 index_val =
+ _mm_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m128i index = *reinterpret_cast<const __m128i *>(&index_val);
+ data = _mm256_i32gather_pd(base_ptr, index, 8);
# else
for (unsigned int i = 0; i < 4; ++i)
*(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
// unfortunately, there does not appear to be a 256 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
- const __m256 index_val = _mm256_loadu_ps((const float *)offsets);
- const __m256i index = *((__m256i *)(&index_val));
- data = _mm256_i32gather_ps(base_ptr, index, 4);
+ const __m256 index_val =
+ _mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
+ const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
+ data = _mm256_i32gather_ps(base_ptr, index, 4);
# else
for (unsigned int i = 0; i < 8; ++i)
*(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];