From: Daniel Arndt Date: Thu, 1 Dec 2022 22:15:38 +0000 (+0000) Subject: Rename variables X-Git-Tag: v9.5.0-rc1~697^2~38 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5d3c75684a6c493d409c70de72a4b445b3383ad5;p=dealii.git Rename variables --- diff --git a/include/deal.II/base/memory_space_data.h b/include/deal.II/base/memory_space_data.h index 6daecffd5a..cc6d03a182 100644 --- a/include/deal.II/base/memory_space_data.h +++ b/include/deal.II/base/memory_space_data.h @@ -121,14 +121,14 @@ namespace MemorySpace using MemorySpace = Host; MemorySpaceData() - : values((dealii::Impl::ensure_kokkos_initialized(), + : values_dev((dealii::Impl::ensure_kokkos_initialized(), Kokkos::View("host data", 0))) {} void copy_to(T *begin, std::size_t n_elements) { - Assert(n_elements <= values.extent(0), + Assert(n_elements <= values_dev.extent(0), ExcMessage("n_elements greater than the size of values.")); using ExecutionSpace = typename MemorySpace::kokkos_space::execution_space; Kokkos:: @@ -137,14 +137,14 @@ namespace MemorySpace Kokkos::deep_copy( ExecutionSpace{}, begin_view, - Kokkos::subview(values, Kokkos::make_pair(std::size_t(0), n_elements))); + Kokkos::subview(values_dev, Kokkos::make_pair(std::size_t(0), n_elements))); ExecutionSpace{}.fence(); } void copy_from(T *begin, std::size_t n_elements) { - Assert(n_elements <= values.extent(0), + Assert(n_elements <= values_dev.extent(0), ExcMessage("n_elements greater than the size of values.")); using ExecutionSpace = typename MemorySpace::kokkos_space::execution_space; Kokkos::View values; - // unused + Kokkos::View values_host_buffer; + Kokkos::View values_dev; std::shared_ptr values_sm_ptr; @@ -186,7 +186,7 @@ namespace MemorySpace using MemorySpace = Device; MemorySpaceData() - : values((dealii::Impl::ensure_kokkos_initialized(), + : values_host_buffer((dealii::Impl::ensure_kokkos_initialized(), Kokkos::View("host data", 0))) , values_dev(Kokkos::View( "memoryspace data", @@ -226,7 +226,7 @@ namespace MemorySpace ExecutionSpace{}.fence(); } - Kokkos::View values; + Kokkos::View values_host_buffer; Kokkos::View values_dev; diff --git a/include/deal.II/lac/la_parallel_vector.h b/include/deal.II/lac/la_parallel_vector.h index 6b7e23f468..09bcefad0d 100644 --- a/include/deal.II/lac/la_parallel_vector.h +++ b/include/deal.II/lac/la_parallel_vector.h @@ -1474,7 +1474,7 @@ namespace LinearAlgebra begin(::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values.data(); + return data.values_dev.data(); } static inline @@ -1482,14 +1482,14 @@ namespace LinearAlgebra begin(const ::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values.data(); + return data.values_dev.data(); } static inline Number * get_values(::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values.data(); + return data.values_dev.data(); } }; @@ -1652,7 +1652,7 @@ namespace LinearAlgebra vector_is_ghosted == true, ExcMessage("You tried to read a ghost element of this vector, " "but it has not imported its ghost values.")); - return data.values[partitioner->global_to_local(global_index)]; + return data.values_dev[partitioner->global_to_local(global_index)]; } @@ -1679,7 +1679,7 @@ namespace LinearAlgebra // (then, the compiler picks this method according to the C++ rule book // even if a human would pick the const method when this subsequent use // is just a read) - return data.values[partitioner->global_to_local(global_index)]; + return data.values_dev[partitioner->global_to_local(global_index)]; } @@ -1718,7 +1718,7 @@ namespace LinearAlgebra ExcMessage("You tried to read a ghost element of this vector, " "but it has not imported its ghost values.")); - return data.values[local_index]; + return data.values_dev[local_index]; } @@ -1735,7 +1735,7 @@ namespace LinearAlgebra partitioner->locally_owned_size() + partitioner->n_ghost_indices()); - return data.values[local_index]; + return data.values_dev[local_index]; } diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index b6fd2efb7b..78298e8e5b 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -135,12 +135,12 @@ namespace LinearAlgebra { if (comm_shared == MPI_COMM_SELF) { - Kokkos::resize(data.values, new_alloc_size); + Kokkos::resize(data.values_dev, new_alloc_size); allocated_size = new_alloc_size; data.values_sm = { - ArrayView(data.values.data(), new_alloc_size)}; + ArrayView(data.values_dev.data(), new_alloc_size)}; } else { @@ -224,7 +224,7 @@ namespace LinearAlgebra data.values_sm[i] = ArrayView(others[i], new_alloc_sizes[i]); - data.values = + data.values_dev = Kokkos::View>( @@ -312,7 +312,7 @@ namespace LinearAlgebra { for (size_type i = 0; i < size; ++i) max = - std::max(numbers::NumberTraits::abs(data.values[i]), max); + std::max(numbers::NumberTraits::abs(data.values_dev[i]), max); } }; @@ -528,7 +528,7 @@ namespace LinearAlgebra resize_val(size, comm_sm); // delete previous content in import data - Kokkos::resize(import_data.values, 0); + Kokkos::resize(import_data.values_host_buffer, 0); Kokkos::resize(import_data.values_dev, 0); // set partitioner to serial version @@ -559,7 +559,7 @@ namespace LinearAlgebra resize_val(local_size + ghost_size, comm_sm); // delete previous content in import data - Kokkos::resize(import_data.values, 0); + Kokkos::resize(import_data.values_host_buffer, 0); Kokkos::resize(import_data.values_dev, 0); // create partitioner @@ -605,7 +605,7 @@ namespace LinearAlgebra // is only used as temporary storage for compress() and // update_ghost_values, and we might have vectors where we never // call these methods and hence do not need to have the storage. - Kokkos::resize(import_data.values, 0); + Kokkos::resize(import_data.values_host_buffer, 0); Kokkos::resize(import_data.values_dev, 0); thread_loop_partitioner = v.thread_loop_partitioner; @@ -668,7 +668,7 @@ namespace LinearAlgebra // is only used as temporary storage for compress() and // update_ghost_values, and we might have vectors where we never // call these methods and hence do not need to have the storage. - Kokkos::resize(import_data.values, 0); + Kokkos::resize(import_data.values_host_buffer, 0); Kokkos::resize(import_data.values_dev, 0); vector_is_ghosted = false; @@ -930,12 +930,10 @@ namespace LinearAlgebra void Vector::zero_out_ghost_values() const { - if (data.values.size() != 0) - std::fill_n(data.values.data() + partitioner->locally_owned_size(), - partitioner->n_ghost_indices(), - Number()); -#ifdef DEAL_II_COMPILER_CUDA_AWARE if (data.values_dev.size() != 0) + { +#ifdef DEAL_II_COMPILER_CUDA_AWARE + if (std::is_same_v) { const cudaError_t cuda_error_code = cudaMemset(data.values_dev.data() + @@ -944,7 +942,14 @@ namespace LinearAlgebra partitioner->n_ghost_indices() * sizeof(Number)); AssertCuda(cuda_error_code); } + else #endif + { + std::fill_n(data.values_dev.data() + partitioner->locally_owned_size(), + partitioner->n_ghost_indices(), + Number()); + } + } vector_is_ghosted = false; } @@ -969,11 +974,11 @@ namespace LinearAlgebra if (partitioner->n_import_indices() > 0) { # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) if (std::is_same::value) { - if (import_data.values_dev.size() == 0) - Kokkos::resize(import_data.values_dev, partitioner->n_import_indices()); + if (import_data.values_host_buffer.size() == 0) + Kokkos::resize(import_data.values_host_buffer, partitioner->n_import_indices()); } else # endif @@ -984,8 +989,8 @@ namespace LinearAlgebra std::is_same::value, "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); # endif - if (import_data.values.size() == 0) - Kokkos::resize(import_data.values, partitioner->n_import_indices()); + if (import_data.values_dev.size() == 0) + Kokkos::resize(import_data.values_dev, partitioner->n_import_indices()); } } @@ -997,22 +1002,15 @@ namespace LinearAlgebra // device. We use values to store the elements because the function // uses a view of the array and thus we need the data on the host to // outlive the scope of the function. - data.values = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev); - } -# endif - -# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) - if (std::is_same::value) - { + data.values_host_buffer = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev); partitioner->import_from_ghosted_array_start( operation, communication_channel, - ArrayView( - data.values_dev.data() + partitioner->locally_owned_size(), + ArrayView( + data.values_host_buffer.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), - ArrayView( - import_data.values_dev.data(), partitioner->n_import_indices()), + ArrayView( + import_data.values_host_buffer.data(), partitioner->n_import_indices()), compress_requests); } else @@ -1021,11 +1019,11 @@ namespace LinearAlgebra partitioner->import_from_ghosted_array_start( operation, communication_channel, - ArrayView( - data.values.data() + partitioner->locally_owned_size(), + ArrayView( + data.values_dev.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), - ArrayView( - import_data.values.data(), partitioner->n_import_indices()), + ArrayView( + import_data.values_dev.data(), partitioner->n_import_indices()), compress_requests); } #else @@ -1051,59 +1049,54 @@ namespace LinearAlgebra // make this function thread safe std::lock_guard lock(mutex); # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) if (std::is_same::value) { Assert(partitioner->n_import_indices() == 0 || - import_data.values_dev.size() != 0, - ExcNotInitialized()); - partitioner - ->import_from_ghosted_array_finish( - operation, - ArrayView( - import_data.values_dev.data(), partitioner->n_import_indices()), - ArrayView( - data.values_dev.data(), partitioner->locally_owned_size()), - ArrayView( - data.values_dev.data() + partitioner->locally_owned_size(), - partitioner->n_ghost_indices()), - compress_requests); - } - else -# endif - { - Assert(partitioner->n_import_indices() == 0 || - import_data.values.size() != 0, + import_data.values_host_buffer.size() != 0, ExcNotInitialized()); partitioner ->import_from_ghosted_array_finish( operation, ArrayView( - import_data.values.data(), partitioner->n_import_indices()), + import_data.values_host_buffer.data(), partitioner->n_import_indices()), ArrayView( - data.values.data(), partitioner->locally_owned_size()), + data.values_host_buffer.data(), partitioner->locally_owned_size()), ArrayView( - data.values.data() + partitioner->locally_owned_size(), + data.values_host_buffer.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), compress_requests); - } -# if defined DEAL_II_COMPILER_CUDA_AWARE && \ - !defined DEAL_II_MPI_WITH_CUDA_SUPPORT - // The communication is done on the host, so we need to - // move the data back to the device. - if (std::is_same::value) - { - cudaError_t cuda_error_code = + // The communication is done on the host, so we need to + // move the data back to the device. + cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.data(), - data.values.data(), + data.values_host_buffer.data(), allocated_size * sizeof(Number), cudaMemcpyHostToDevice); AssertCuda(cuda_error_code); - Kokkos::resize(data.values, 0); + Kokkos::resize(data.values_host_buffer, 0); } + else # endif + { + Assert(partitioner->n_import_indices() == 0 || + import_data.values_dev.size() != 0, + ExcNotInitialized()); + partitioner + ->import_from_ghosted_array_finish( + operation, + ArrayView( + import_data.values_dev.data(), partitioner->n_import_indices()), + ArrayView( + data.values_dev.data(), partitioner->locally_owned_size()), + ArrayView( + data.values_dev.data() + partitioner->locally_owned_size(), + partitioner->n_ghost_indices()), + compress_requests); + } + #else (void)operation; #endif @@ -1130,22 +1123,22 @@ namespace LinearAlgebra if (partitioner->n_import_indices() > 0) { # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) - Assert( - (std::is_same::value), - ExcMessage( - "Using MemorySpace::CUDA only allowed if the code is compiled with a CUDA compiler!")); - if (import_data.values_dev.size() == 0) - Kokkos::resize(import_data.values_dev, partitioner->n_import_indices()); -# else + !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + if (std::is_same_v) { + if (import_data.values_host_buffer.size() == 0) + Kokkos::resize(import_data.values_host_buffer, partitioner->n_import_indices()); + } + else +# endif + { # ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT static_assert( std::is_same::value, "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); # endif - if (import_data.values.size() == 0) - Kokkos::resize(import_data.values, partitioner->n_import_indices()); -# endif + if (import_data.values_dev.size() == 0) + Kokkos::resize(import_data.values_dev, partitioner->n_import_indices()); + } } # if defined DEAL_II_COMPILER_CUDA_AWARE && \ @@ -1155,38 +1148,32 @@ namespace LinearAlgebra // device. We use values to store the elements because the function // uses a view of the array and thus we need the data on the host to // outlive the scope of the function. - data.values = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev); - } -# endif - -# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) - if (std::is_same_v) { - partitioner->export_to_ghosted_array_start( + data.values_host_buffer = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev); + + partitioner->export_to_ghosted_array_start( communication_channel, - ArrayView( - data.values_dev.data(), partitioner->locally_owned_size()), - ArrayView(import_data.values_dev.data(), + ArrayView( + data.values_host_buffer.data(), partitioner->locally_owned_size()), + ArrayView(import_data.values_host_buffer.data(), partitioner->n_import_indices()), - ArrayView( - data.values_dev.data() + partitioner->locally_owned_size(), + ArrayView( + data.values_host_buffer.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); } else -#else +#endif { - partitioner->export_to_ghosted_array_start( + partitioner->export_to_ghosted_array_start( communication_channel, - ArrayView( - data.values.data(), partitioner->locally_owned_size()), - ArrayView(import_data.values.data(), + ArrayView( + data.values_dev.data(), partitioner->locally_owned_size()), + ArrayView(import_data.values_dev.data(), partitioner->n_import_indices()), - ArrayView( - data.values.data() + partitioner->locally_owned_size(), + ArrayView( + data.values_dev.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); } -# endif #else (void)communication_channel; @@ -1211,43 +1198,36 @@ namespace LinearAlgebra std::lock_guard lock(mutex); # if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) if (std::is_same::value) { partitioner->export_to_ghosted_array_finish( - ArrayView( - data.values_dev.data() + partitioner->locally_owned_size(), - partitioner->n_ghost_indices()), - update_ghost_values_requests); - } else -#else - { - partitioner->export_to_ghosted_array_finish( ArrayView( - data.values.data() + partitioner->locally_owned_size(), + data.values_host_buffer.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); - } -# endif - } -# if defined DEAL_II_COMPILER_CUDA_AWARE && \ - !defined DEAL_II_MPI_WITH_CUDA_SUPPORT - // The communication is done on the host, so we need to +// The communication is done on the host, so we need to // move the data back to the device. - if (std::is_same::value) - { - cudaError_t cuda_error_code = + cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.data() + partitioner->locally_owned_size(), - data.values.data() + partitioner->locally_owned_size(), + data.values_host_buffer.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices() * sizeof(Number), cudaMemcpyHostToDevice); AssertCuda(cuda_error_code); - Kokkos::resize(data.values, 0); + Kokkos::resize(data.values_host_buffer, 0); + } else +#endif + { + partitioner->export_to_ghosted_array_finish( + ArrayView( + data.values_dev.data() + partitioner->locally_owned_size(), + partitioner->n_ghost_indices()), + update_ghost_values_requests); + } } -# endif #endif vector_is_ghosted = true; @@ -2053,7 +2033,7 @@ namespace LinearAlgebra if (partitioner.use_count() > 0) memory += partitioner->memory_consumption() / partitioner.use_count() + 1; - if (import_data.values.size() != 0 || import_data.values_dev.size() != 0) + if (import_data.values_host_buffer.size() != 0 || import_data.values_dev.size() != 0) memory += (static_cast(partitioner->n_import_indices()) * sizeof(Number)); return memory; diff --git a/include/deal.II/lac/vector_operations_internal.h b/include/deal.II/lac/vector_operations_internal.h index 20cb26af29..80754c2412 100644 --- a/include/deal.II/lac/vector_operations_internal.h +++ b/include/deal.II/lac/vector_operations_internal.h @@ -1734,8 +1734,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vector_copy copier(v_data.values.data(), - data.values.data()); + Vector_copy copier(v_data.values_dev.data(), + data.values_dev.data()); parallel_for(copier, 0, size, thread_loop_partitioner); } @@ -1748,7 +1748,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vector_set setter(s, data.values.data()); + Vector_set setter(s, data.values_dev.data()); parallel_for(setter, 0, size, thread_loop_partitioner); } @@ -1763,8 +1763,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_add_v vector_add(data.values.data(), - v_data.values.data()); + Vectorization_add_v vector_add(data.values_dev.data(), + v_data.values_dev.data()); parallel_for(vector_add, 0, size, thread_loop_partitioner); } @@ -1779,8 +1779,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_subtract_v vector_subtract(data.values.data(), - v_data.values.data()); + Vectorization_subtract_v vector_subtract(data.values_dev.data(), + v_data.values_dev.data()); parallel_for(vector_subtract, 0, size, thread_loop_partitioner); } @@ -1794,7 +1794,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_add_factor vector_add(data.values.data(), a); + Vectorization_add_factor vector_add(data.values_dev.data(), a); parallel_for(vector_add, 0, size, thread_loop_partitioner); } @@ -1809,8 +1809,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_add_av vector_add(data.values.data(), - v_data.values.data(), + Vectorization_add_av vector_add(data.values_dev.data(), + v_data.values_dev.data(), a); parallel_for(vector_add, 0, size, thread_loop_partitioner); } @@ -1831,7 +1831,7 @@ namespace internal &data) { Vectorization_add_avpbw vector_add( - data.values.data(), v_data.values.data(), w_data.values.data(), a, b); + data.values_dev.data(), v_data.values_dev.data(), w_data.values_dev.data(), a, b); parallel_for(vector_add, 0, size, thread_loop_partitioner); } @@ -1847,8 +1847,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_sadd_xv vector_sadd(data.values.data(), - v_data.values.data(), + Vectorization_sadd_xv vector_sadd(data.values_dev.data(), + v_data.values_dev.data(), x); parallel_for(vector_sadd, 0, size, thread_loop_partitioner); } @@ -1866,8 +1866,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_sadd_xav vector_sadd(data.values.data(), - v_data.values.data(), + Vectorization_sadd_xav vector_sadd(data.values_dev.data(), + v_data.values_dev.data(), a, x); parallel_for(vector_sadd, 0, size, thread_loop_partitioner); @@ -1890,7 +1890,7 @@ namespace internal &data) { Vectorization_sadd_xavbw vector_sadd( - data.values.data(), v_data.values.data(), w_data.values.data(), x, a, b); + data.values_dev.data(), v_data.values_dev.data(), w_data.values_dev.data(), x, a, b); parallel_for(vector_sadd, 0, size, thread_loop_partitioner); } @@ -1904,7 +1904,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_multiply_factor vector_multiply(data.values.data(), + Vectorization_multiply_factor vector_multiply(data.values_dev.data(), factor); parallel_for(vector_multiply, 0, size, thread_loop_partitioner); } @@ -1919,8 +1919,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_scale vector_scale(data.values.data(), - v_data.values.data()); + Vectorization_scale vector_scale(data.values_dev.data(), + v_data.values_dev.data()); parallel_for(vector_scale, 0, size, thread_loop_partitioner); } @@ -1935,8 +1935,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_equ_au vector_equ(data.values.data(), - v_data.values.data(), + Vectorization_equ_au vector_equ(data.values_dev.data(), + v_data.values_dev.data(), a); parallel_for(vector_equ, 0, size, thread_loop_partitioner); } @@ -1957,7 +1957,7 @@ namespace internal &data) { Vectorization_equ_aubv vector_equ( - data.values.data(), v_data.values.data(), w_data.values.data(), a, b); + data.values_dev.data(), v_data.values_dev.data(), w_data.values_dev.data(), a, b); parallel_for(vector_equ, 0, size, thread_loop_partitioner); } @@ -1973,7 +1973,7 @@ namespace internal { Number sum; dealii::internal::VectorOperations::Dot dot( - data.values.data(), v_data.values.data()); + data.values_dev.data(), v_data.values_dev.data()); dealii::internal::VectorOperations::parallel_reduce( dot, 0, size, sum, thread_loop_partitioner); AssertIsFinite(sum); @@ -1991,7 +1991,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Norm2 norm2(data.values.data()); + Norm2 norm2(data.values_dev.data()); parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner); } @@ -2004,7 +2004,7 @@ namespace internal MemorySpaceData &data) { Number sum; - MeanValue mean(data.values.data()); + MeanValue mean(data.values_dev.data()); parallel_reduce(mean, 0, size, sum, thread_loop_partitioner); return sum; @@ -2020,7 +2020,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Norm1 norm1(data.values.data()); + Norm1 norm1(data.values_dev.data()); parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner); } @@ -2035,7 +2035,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - NormP normp(data.values.data(), p); + NormP normp(data.values_dev.data(), p); parallel_reduce(normp, 0, size, sum, thread_loop_partitioner); } @@ -2054,9 +2054,9 @@ namespace internal &data) { Number sum; - AddAndDot adder(data.values.data(), - v_data.values.data(), - w_data.values.data(), + AddAndDot adder(data.values_dev.data(), + v_data.values_dev.data(), + w_data.values_dev.data(), a); parallel_reduce(adder, 0, size, sum, thread_loop_partitioner); @@ -2112,7 +2112,7 @@ namespace internal { if (operation == VectorOperation::insert) { - cudaError_t cuda_error_code = cudaMemcpy(data.values.data(), + cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.data(), v_data.values_dev.data(), size * sizeof(Number), cudaMemcpyDeviceToHost); @@ -2630,7 +2630,7 @@ namespace internal if (operation == VectorOperation::insert) { cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.data(), - v_data.values.data(), + v_data.values_dev.data(), size * sizeof(Number), cudaMemcpyHostToDevice); AssertCuda(cuda_error_code);