# DEAL_II_DEFINITIONS
# DEAL_II_DEFINITIONS_DEBUG
# DEAL_II_DEFINITIONS_RELEASE
+# DEAL_II_USE_VECTORIZATION_GATHER
#
# Components and miscellaneous options:
#
)
mark_as_advanced(CMAKE_INSTALL_RPATH_USE_LINK_PATH)
+option(DEAL_II_USE_VECTORIZATION_GATHER
+ "For the x86 compilation target, the use of SIMD gather/scatter instructions can be much slower than using scalar loads. This includes a wide range of Intel hardware (in particular, server processors of the Broadwell, Skylake, Cascade Lake, and Ice Lake families released between 2015 and 2021). While the default is to aggressively use these instructions, this variable can be used to disable their use if deemed to give better performance."
+ ON
+ )
+mark_as_advanced(DEAL_II_USE_VECTORIZATION_GATHER)
+
########################################################################
# #
void
gather(const double *base_ptr, const unsigned int *offsets)
{
-# ifdef __AVX2__
+# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
// unfortunately, there does not appear to be a 128 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
void
gather(const float *base_ptr, const unsigned int *offsets)
{
-# ifdef __AVX2__
+# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER)
// unfortunately, there does not appear to be a 256 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
void
gather(const double *base_ptr, const unsigned int *offsets)
{
+# ifdef DEAL_II_USE_VECTORIZATION_GATHER
// unfortunately, there does not appear to be a 256 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
__mmask8 mask = 0xFF;
data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8);
+# else
+ for (unsigned int i = 0; i < 8; ++i)
+ *(reinterpret_cast<double *>(&data) + i) = base_ptr[offsets[i]];
+# endif
}
/**
void
scatter(const unsigned int *offsets, double *base_ptr) const
{
+# ifdef DEAL_II_USE_VECTORIZATION_GATHER
for (unsigned int i = 0; i < 8; ++i)
for (unsigned int j = i + 1; j < 8; ++j)
Assert(offsets[i] != offsets[j],
_mm256_loadu_ps(reinterpret_cast<const float *>(offsets));
const __m256i index = *reinterpret_cast<const __m256i *>(&index_val);
_mm512_i32scatter_pd(base_ptr, index, data, 8);
+# else
+ for (unsigned int i = 0; i < 8; ++i)
+ base_ptr[offsets[i]] = *(reinterpret_cast<const double *>(&data) + i);
+# endif
}
/**
void
gather(const float *base_ptr, const unsigned int *offsets)
{
+# ifdef DEAL_II_USE_VECTORIZATION_GATHER
// unfortunately, there does not appear to be a 512 bit integer load, so
// do it by some reinterpret casts here. this is allowed because the Intel
// API allows aliasing between different vector types.
__mmask16 mask = 0xFFFF;
data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4);
+# else
+ for (unsigned int i = 0; i < 16; ++i)
+ *(reinterpret_cast<float *>(&data) + i) = base_ptr[offsets[i]];
+# endif
}
/**
void
scatter(const unsigned int *offsets, float *base_ptr) const
{
+# ifdef DEAL_II_USE_VECTORIZATION_GATHER
for (unsigned int i = 0; i < 16; ++i)
for (unsigned int j = i + 1; j < 16; ++j)
Assert(offsets[i] != offsets[j],
_mm512_loadu_ps(reinterpret_cast<const float *>(offsets));
const __m512i index = *reinterpret_cast<const __m512i *>(&index_val);
_mm512_i32scatter_ps(base_ptr, index, data, 4);
+# else
+ for (unsigned int i = 0; i < 16; ++i)
+ base_ptr[offsets[i]] = *(reinterpret_cast<const float *>(&data) + i);
+# endif
}
/**