From: Daniel Arndt Date: Fri, 18 Jan 2019 16:16:28 +0000 (+0100) Subject: Fixup vectorization header X-Git-Tag: v9.1.0-rc1~428^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fc97670ddd7657fcb265a086e22f16fb9ab0f582;p=dealii.git Fixup vectorization header --- diff --git a/include/deal.II/base/vectorization.h b/include/deal.II/base/vectorization.h index 8fc387a5b3..1a6d223e62 100644 --- a/include/deal.II/base/vectorization.h +++ b/include/deal.II/base/vectorization.h @@ -735,9 +735,10 @@ public: // unfortunately, there does not appear to be a 256 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. - const __m256 index_val = _mm256_loadu_ps((const float *)offsets); - const __m256i index = *((__m256i *)(&index_val)); - data = _mm512_i32gather_pd(index, base_ptr, 8); + const __m256 index_val = + _mm256_loadu_ps(reinterpret_cast(offsets)); + const __m256i index = *reinterpret_cast(&index_val); + data = _mm512_i32gather_pd(index, base_ptr, 8); } /** @@ -765,8 +766,9 @@ public: // unfortunately, there does not appear to be a 256 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. - const __m256 index_val = _mm256_loadu_ps((const float *)offsets); - const __m256i index = *((__m256i *)(&index_val)); + const __m256 index_val = + _mm256_loadu_ps(reinterpret_cast(offsets)); + const __m256i index = *reinterpret_cast(&index_val); _mm512_i32scatter_pd(base_ptr, index, data, 8); } @@ -805,7 +807,9 @@ private: // is a bitwise operation so the data type does not matter) __m512d mask = _mm512_set1_pd(-0.); VectorizedArray res; - res.data = (__m512d)_mm512_andnot_epi64((__m512i)mask, (__m512i)data); + res.data = reinterpret_cast<__m512d>( + _mm512_andnot_epi64(reinterpret_cast<__m512i>(mask), + reinterpret_cast<__m512i>(data))); return res; } @@ -882,13 +886,17 @@ vectorized_load_and_transpose(const unsigned int n_entries, __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20); __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31); __m256d t3 = _mm256_permute2f128_pd(u1, u3, 0x31); - *(__m256d *)((double *)(&out[4 * i + 0].data) + outer) = + *reinterpret_cast<__m256d *>( + reinterpret_cast(&out[4 * i + 0].data) + outer) = _mm256_unpacklo_pd(t0, t1); - *(__m256d *)((double *)(&out[4 * i + 1].data) + outer) = + *reinterpret_cast<__m256d *>( + reinterpret_cast(&out[4 * i + 1].data) + outer) = _mm256_unpackhi_pd(t0, t1); - *(__m256d *)((double *)(&out[4 * i + 2].data) + outer) = + *reinterpret_cast<__m256d *>( + reinterpret_cast(&out[4 * i + 2].data) + outer) = _mm256_unpacklo_pd(t2, t3); - *(__m256d *)((double *)(&out[4 * i + 3].data) + outer) = + *reinterpret_cast<__m256d *>( + reinterpret_cast(&out[4 * i + 3].data) + outer) = _mm256_unpackhi_pd(t2, t3); } for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) @@ -922,14 +930,14 @@ vectorized_transpose_and_store(const bool add_into, double *out3 = out + offsets[3 + outer]; for (unsigned int i = 0; i < n_chunks; ++i) { - __m256d u0 = - *(const __m256d *)((const double *)(&in[4 * i + 0].data) + outer); - __m256d u1 = - *(const __m256d *)((const double *)(&in[4 * i + 1].data) + outer); - __m256d u2 = - *(const __m256d *)((const double *)(&in[4 * i + 2].data) + outer); - __m256d u3 = - *(const __m256d *)((const double *)(&in[4 * i + 3].data) + outer); + __m256d u0 = *reinterpret_cast( + reinterpret_cast(&in[4 * i + 0].data) + outer); + __m256d u1 = *reinterpret_cast( + reinterpret_cast(&in[4 * i + 1].data) + outer); + __m256d u2 = *reinterpret_cast( + reinterpret_cast(&in[4 * i + 2].data) + outer); + __m256d u3 = *reinterpret_cast( + reinterpret_cast(&in[4 * i + 3].data) + outer); __m256d t0 = _mm256_permute2f128_pd(u0, u2, 0x20); __m256d t1 = _mm256_permute2f128_pd(u1, u3, 0x20); __m256d t2 = _mm256_permute2f128_pd(u0, u2, 0x31); @@ -1137,9 +1145,10 @@ public: // unfortunately, there does not appear to be a 512 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. - const __m512 index_val = _mm512_loadu_ps((const float *)offsets); - const __m512i index = *((__m512i *)(&index_val)); - data = _mm512_i32gather_ps(index, base_ptr, 4); + const __m512 index_val = + _mm512_loadu_ps(reinterpret_cast(offsets)); + const __m512i index = *reinterpret_cast(&index_val); + data = _mm512_i32gather_ps(index, base_ptr, 4); } /** @@ -1167,8 +1176,9 @@ public: // unfortunately, there does not appear to be a 512 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. - const __m512 index_val = _mm512_loadu_ps((const float *)offsets); - const __m512i index = *((__m512i *)(&index_val)); + const __m512 index_val = + _mm512_loadu_ps(reinterpret_cast(offsets)); + const __m512i index = *reinterpret_cast(&index_val); _mm512_i32scatter_ps(base_ptr, index, data, 4); } @@ -1207,7 +1217,9 @@ private: // is a bitwise operation so the data type does not matter) __m512 mask = _mm512_set1_ps(-0.f); VectorizedArray res; - res.data = (__m512)_mm512_andnot_epi32((__m512i)mask, (__m512i)data); + res.data = reinterpret_cast<__m512>( + _mm512_andnot_epi32(reinterpret_cast<__m512i>(mask), + reinterpret_cast<__m512i>(data))); return res; } @@ -1294,13 +1306,17 @@ vectorized_load_and_transpose(const unsigned int n_entries, __m256 v1 = _mm256_shuffle_ps(t0, t1, 0xee); __m256 v2 = _mm256_shuffle_ps(t2, t3, 0x44); __m256 v3 = _mm256_shuffle_ps(t2, t3, 0xee); - *(__m256 *)((float *)(&out[4 * i + 0].data) + outer) = + *reinterpret_cast<__m256 *>( + reinterpret_cast(&out[4 * i + 0].data) + outer) = _mm256_shuffle_ps(v0, v2, 0x88); - *(__m256 *)((float *)(&out[4 * i + 1].data) + outer) = + *reinterpret_cast<__m256 *>( + reinterpret_cast(&out[4 * i + 1].data) + outer) = _mm256_shuffle_ps(v0, v2, 0xdd); - *(__m256 *)((float *)(&out[4 * i + 2].data) + outer) = + *reinterpret_cast<__m256 *>( + reinterpret_cast(&out[4 * i + 2].data) + outer) = _mm256_shuffle_ps(v1, v3, 0x88); - *(__m256 *)((float *)(&out[4 * i + 3].data) + outer) = + *reinterpret_cast<__m256 *>( + reinterpret_cast(&out[4 * i + 3].data) + outer) = _mm256_shuffle_ps(v1, v3, 0xdd); } for (unsigned int i = 4 * n_chunks; i < n_entries; ++i) @@ -1327,14 +1343,14 @@ vectorized_transpose_and_store(const bool add_into, { for (unsigned int i = 0; i < n_chunks; ++i) { - __m256 u0 = - *(const __m256 *)((const float *)(&in[4 * i + 0].data) + outer); - __m256 u1 = - *(const __m256 *)((const float *)(&in[4 * i + 1].data) + outer); - __m256 u2 = - *(const __m256 *)((const float *)(&in[4 * i + 2].data) + outer); - __m256 u3 = - *(const __m256 *)((const float *)(&in[4 * i + 3].data) + outer); + __m256 u0 = *reinterpret_cast( + reinterpret_cast(&in[4 * i + 0].data) + outer); + __m256 u1 = *reinterpret_cast( + reinterpret_cast(&in[4 * i + 1].data) + outer); + __m256 u2 = *reinterpret_cast( + reinterpret_cast(&in[4 * i + 2].data) + outer); + __m256 u3 = *reinterpret_cast( + reinterpret_cast(&in[4 * i + 3].data) + outer); __m256 t0 = _mm256_shuffle_ps(u0, u1, 0x44); __m256 t1 = _mm256_shuffle_ps(u0, u1, 0xee); __m256 t2 = _mm256_shuffle_ps(u2, u3, 0x44); @@ -1573,9 +1589,10 @@ public: // unfortunately, there does not appear to be a 128 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. - const __m128 index_val = _mm_loadu_ps((const float *)offsets); - const __m128i index = *((__m128i *)(&index_val)); - data = _mm256_i32gather_pd(base_ptr, index, 8); + const __m128 index_val = + _mm_loadu_ps(reinterpret_cast(offsets)); + const __m128i index = *reinterpret_cast(&index_val); + data = _mm256_i32gather_pd(base_ptr, index, 8); # else for (unsigned int i = 0; i < 4; ++i) *(reinterpret_cast(&data) + i) = base_ptr[offsets[i]]; @@ -1952,9 +1969,10 @@ public: // unfortunately, there does not appear to be a 256 bit integer load, so // do it by some reinterpret casts here. this is allowed because the Intel // API allows aliasing between different vector types. - const __m256 index_val = _mm256_loadu_ps((const float *)offsets); - const __m256i index = *((__m256i *)(&index_val)); - data = _mm256_i32gather_ps(base_ptr, index, 4); + const __m256 index_val = + _mm256_loadu_ps(reinterpret_cast(offsets)); + const __m256i index = *reinterpret_cast(&index_val); + data = _mm256_i32gather_ps(base_ptr, index, 4); # else for (unsigned int i = 0; i < 8; ++i) *(reinterpret_cast(&data) + i) = base_ptr[offsets[i]]; diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 84eb0b5f92..ae0ec7c961 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -911,7 +911,7 @@ namespace LinearAlgebra { Number *new_val; Utilities::System::posix_memalign( - (void **)&new_val, + reinterpret_cast(&new_val), 64, sizeof(Number) * partitioner->n_import_indices()); import_data.values.reset(new_val); @@ -926,7 +926,7 @@ namespace LinearAlgebra // uses a view of the array and thus we need the data on the host to // outlive the scope of the function. Number *new_val; - Utilities::System::posix_memalign((void **)&new_val, + Utilities::System::posix_memalign(reinterpret_cast(&new_val), 64, sizeof(Number) * allocated_size); @@ -1071,7 +1071,7 @@ namespace LinearAlgebra { Number *new_val; Utilities::System::posix_memalign( - (void **)&new_val, + reinterpret_cast(&new_val), 64, sizeof(Number) * partitioner->n_import_indices()); import_data.values.reset(new_val); @@ -1086,7 +1086,7 @@ namespace LinearAlgebra // uses a view of the array and thus we need the data on the host to // outlive the scope of the function. Number *new_val; - Utilities::System::posix_memalign((void **)&new_val, + Utilities::System::posix_memalign(reinterpret_cast(&new_val), 64, sizeof(Number) * allocated_size); diff --git a/source/lac/sparse_direct.cc b/source/lac/sparse_direct.cc index b66fcd1480..276310897c 100644 --- a/source/lac/sparse_direct.cc +++ b/source/lac/sparse_direct.cc @@ -370,8 +370,8 @@ SparseDirectUMFPACK::solve(const Matrix & matrix, SparseDirectUMFPACK::SparseDirectUMFPACK() : _m(0) , _n(0) - , symbolic_decomposition(0) - , numeric_decomposition(0) + , symbolic_decomposition(nullptr) + , numeric_decomposition(nullptr) , control(0) {}