From a235cadfadfd3169616c385083ebd1fad40cd0e3 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Mon, 14 Aug 2023 12:19:20 +0200 Subject: [PATCH] SIMD: Allow to switch off vectorized gather/scatter --- cmake/setup_cached_variables.cmake | 7 +++++++ include/deal.II/base/config.h.in | 1 + include/deal.II/base/vectorization.h | 24 ++++++++++++++++++++++-- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/cmake/setup_cached_variables.cmake b/cmake/setup_cached_variables.cmake index 18f1c1241f..8256b01dd1 100644 --- a/cmake/setup_cached_variables.cmake +++ b/cmake/setup_cached_variables.cmake @@ -46,6 +46,7 @@ # DEAL_II_DEFINITIONS # DEAL_II_DEFINITIONS_DEBUG # DEAL_II_DEFINITIONS_RELEASE +# DEAL_II_USE_VECTORIZATION_GATHER # # Components and miscellaneous options: # @@ -175,6 +176,12 @@ set(CMAKE_INSTALL_RPATH_USE_LINK_PATH "ON" CACHE BOOL ) mark_as_advanced(CMAKE_INSTALL_RPATH_USE_LINK_PATH) +option(DEAL_II_USE_VECTORIZATION_GATHER + "For the x86 compilation target, the use of SIMD gather/scatter instructions can be much slower than using scalar loads. This includes a wide range of Intel hardware (in particular, server processors of the Broadwell, Skylake, Cascade Lake, and Ice Lake families released between 2015 and 2021). While the default is to aggressively use these instructions, this variable can be used to disable their use if deemed to give better performance." + ON + ) +mark_as_advanced(DEAL_II_USE_VECTORIZATION_GATHER) + ######################################################################## # # diff --git a/include/deal.II/base/config.h.in b/include/deal.II/base/config.h.in index 9203616264..35ee310f82 100644 --- a/include/deal.II/base/config.h.in +++ b/include/deal.II/base/config.h.in @@ -70,6 +70,7 @@ #cmakedefine DEAL_II_WITH_TRILINOS #cmakedefine DEAL_II_WITH_UMFPACK #cmakedefine DEAL_II_FEATURE_UMFPACK_BUNDLED_CONFIGURED +#cmakedefine DEAL_II_USE_VECTORIZATION_GATHER #cmakedefine DEAL_II_WITH_VTK #cmakedefine DEAL_II_WITH_ZLIB diff --git a/include/deal.II/base/vectorization.h b/include/deal.II/base/vectorization.h index 20059d4f88..006876ea84 100644 --- a/include/deal.II/base/vectorization.h +++ b/include/deal.II/base/vectorization.h @@ -2170,7 +2170,7 @@ public: void gather(const double *base_ptr, const unsigned int *offsets) { -# ifdef __AVX2__ +# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER) // unfortunately, there does not appear to be a 128 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. @@ -2734,7 +2734,7 @@ public: void gather(const float *base_ptr, const unsigned int *offsets) { -# ifdef __AVX2__ +# if defined(__AVX2__) && defined(DEAL_II_USE_VECTORIZATION_GATHER) // unfortunately, there does not appear to be a 256 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. @@ -3350,6 +3350,7 @@ public: void gather(const double *base_ptr, const unsigned int *offsets) { +# ifdef DEAL_II_USE_VECTORIZATION_GATHER // unfortunately, there does not appear to be a 256 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. @@ -3364,6 +3365,10 @@ public: __mmask8 mask = 0xFF; data = _mm512_mask_i32gather_pd(zero, mask, index, base_ptr, 8); +# else + for (unsigned int i = 0; i < 8; ++i) + *(reinterpret_cast(&data) + i) = base_ptr[offsets[i]]; +# endif } /** @@ -3382,6 +3387,7 @@ public: void scatter(const unsigned int *offsets, double *base_ptr) const { +# ifdef DEAL_II_USE_VECTORIZATION_GATHER for (unsigned int i = 0; i < 8; ++i) for (unsigned int j = i + 1; j < 8; ++j) Assert(offsets[i] != offsets[j], @@ -3395,6 +3401,10 @@ public: _mm256_loadu_ps(reinterpret_cast(offsets)); const __m256i index = *reinterpret_cast(&index_val); _mm512_i32scatter_pd(base_ptr, index, data, 8); +# else + for (unsigned int i = 0; i < 8; ++i) + base_ptr[offsets[i]] = *(reinterpret_cast(&data) + i); +# endif } /** @@ -3955,6 +3965,7 @@ public: void gather(const float *base_ptr, const unsigned int *offsets) { +# ifdef DEAL_II_USE_VECTORIZATION_GATHER // unfortunately, there does not appear to be a 512 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. @@ -3969,6 +3980,10 @@ public: __mmask16 mask = 0xFFFF; data = _mm512_mask_i32gather_ps(zero, mask, index, base_ptr, 4); +# else + for (unsigned int i = 0; i < 16; ++i) + *(reinterpret_cast(&data) + i) = base_ptr[offsets[i]]; +# endif } /** @@ -3987,6 +4002,7 @@ public: void scatter(const unsigned int *offsets, float *base_ptr) const { +# ifdef DEAL_II_USE_VECTORIZATION_GATHER for (unsigned int i = 0; i < 16; ++i) for (unsigned int j = i + 1; j < 16; ++j) Assert(offsets[i] != offsets[j], @@ -4000,6 +4016,10 @@ public: _mm512_loadu_ps(reinterpret_cast(offsets)); const __m512i index = *reinterpret_cast(&index_val); _mm512_i32scatter_ps(base_ptr, index, data, 4); +# else + for (unsigned int i = 0; i < 16; ++i) + base_ptr[offsets[i]] = *(reinterpret_cast(&data) + i); +# endif } /** -- 2.39.5