From: Daniel Arndt Date: Wed, 30 Nov 2022 20:40:06 +0000 (+0000) Subject: Fixes X-Git-Tag: v9.5.0-rc1~697^2~41 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=877de79fda1e10bfee812322bdc7a3a8c637f2cd;p=dealii.git Fixes --- diff --git a/include/deal.II/lac/la_parallel_vector.h b/include/deal.II/lac/la_parallel_vector.h index 3ba62fd927..6b7e23f468 100644 --- a/include/deal.II/lac/la_parallel_vector.h +++ b/include/deal.II/lac/la_parallel_vector.h @@ -1474,7 +1474,7 @@ namespace LinearAlgebra begin(::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values.get(); + return data.values.data(); } static inline @@ -1482,14 +1482,14 @@ namespace LinearAlgebra begin(const ::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values.get(); + return data.values.data(); } static inline Number * get_values(::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values.get(); + return data.values.data(); } }; @@ -1503,7 +1503,7 @@ namespace LinearAlgebra begin(::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values_dev.get(); + return data.values_dev.data(); } static inline @@ -1511,14 +1511,14 @@ namespace LinearAlgebra begin(const ::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values_dev.get(); + return data.values_dev.data(); } static inline Number * get_values(::dealii::MemorySpace:: MemorySpaceData &data) { - return data.values_dev.get(); + return data.values_dev.data(); } }; } // namespace internal diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 596855e072..4f0375f03e 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -135,17 +135,12 @@ namespace LinearAlgebra { if (comm_shared == MPI_COMM_SELF) { - Number *new_val; - Utilities::System::posix_memalign( - reinterpret_cast(&new_val), - 64, - sizeof(Number) * new_alloc_size); - data.values = {new_val, [](Number *data) { std::free(data); }}; + Kokkos::resize(data.values, new_alloc_size); allocated_size = new_alloc_size; data.values_sm = { - ArrayView(data.values.get(), new_alloc_size)}; + ArrayView(data.values.data(), new_alloc_size)}; } else { @@ -229,13 +224,25 @@ namespace LinearAlgebra data.values_sm[i] = ArrayView(others[i], new_alloc_sizes[i]); - data.values = {ptr_aligned, [mpi_window](Number *) mutable { - // note: we are creating here a copy of the - // window other approaches led to segmentation - // faults - const auto ierr = MPI_Win_free(&mpi_window); - AssertThrowMPI(ierr); - }}; + data.values = + Kokkos::View>( + ptr_aligned, new_alloc_size); + + // Kokkos will not free the memory because the memory is + // unmanaged. Instead we use a shared pointer to take care of + // that. + data.values_sm_ptr = {ptr_aligned, + [mpi_window](Number *) mutable { + // note: we are creating here a copy of + // the window other approaches led to + // segmentation faults + const auto ierr = + MPI_Win_free(&mpi_window); + AssertThrowMPI(ierr); + }}; + #else Assert(false, ExcInternalError()); #endif @@ -332,19 +339,17 @@ namespace LinearAlgebra if (new_alloc_size > allocated_size) { - Assert(((allocated_size > 0 && data.values_dev != nullptr) || - data.values_dev == nullptr), + Assert(((allocated_size > 0 && data.values_dev.size() != 0) || + data.values_dev.size() == 0), ExcInternalError()); - Number *new_val_dev; - Utilities::CUDA::malloc(new_val_dev, new_alloc_size); - data.values_dev.reset(new_val_dev); + Kokkos::resize(data.values_dev, new_alloc_size); allocated_size = new_alloc_size; } else if (new_alloc_size == 0) { - data.values_dev.reset(); + Kokkos::resize(data.values_dev, 0); allocated_size = 0; } } @@ -418,14 +423,14 @@ namespace LinearAlgebra ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_permutated< Number><<>>( indices_dev, - data.values_dev.get(), + data.values_dev.data(), tmp_vector.begin(), tmp_n_elements); else ::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated< Number><<>>( indices_dev, - data.values_dev.get(), + data.values_dev.data(), tmp_vector.begin(), tmp_n_elements); @@ -455,7 +460,7 @@ namespace LinearAlgebra Number, ::dealii::LinearAlgebra::CUDAWrappers::kernel::LInfty> <<>>( - result_device, data.values_dev.get(), size); + result_device, data.values_dev.data(), size); // Copy the result back to the host error_code = cudaMemcpy(&result, @@ -523,8 +528,8 @@ namespace LinearAlgebra resize_val(size, comm_sm); // delete previous content in import data - import_data.values.reset(); - import_data.values_dev.reset(); + Kokkos::resize(import_data.values, 0); + Kokkos::resize(import_data.values_dev, 0); // set partitioner to serial version partitioner = std::make_shared(size); @@ -554,8 +559,8 @@ namespace LinearAlgebra resize_val(local_size + ghost_size, comm_sm); // delete previous content in import data - import_data.values.reset(); - import_data.values_dev.reset(); + Kokkos::resize(import_data.values, 0); + Kokkos::resize(import_data.values_dev, 0); // create partitioner partitioner = std::make_shared(local_size, @@ -600,8 +605,8 @@ namespace LinearAlgebra // is only used as temporary storage for compress() and // update_ghost_values, and we might have vectors where we never // call these methods and hence do not need to have the storage. - import_data.values.reset(); - import_data.values_dev.reset(); + Kokkos::resize(import_data.values, 0); + Kokkos::resize(import_data.values_dev, 0); thread_loop_partitioner = v.thread_loop_partitioner; } @@ -663,8 +668,8 @@ namespace LinearAlgebra // is only used as temporary storage for compress() and // update_ghost_values, and we might have vectors where we never // call these methods and hence do not need to have the storage. - import_data.values.reset(); - import_data.values_dev.reset(); + Kokkos::resize(import_data.values, 0); + Kokkos::resize(import_data.values_dev, 0); vector_is_ghosted = false; } @@ -925,15 +930,15 @@ namespace LinearAlgebra void Vector::zero_out_ghost_values() const { - if (data.values != nullptr) - std::fill_n(data.values.get() + partitioner->locally_owned_size(), + if (data.values.size() != 0) + std::fill_n(data.values.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices(), Number()); #ifdef DEAL_II_COMPILER_CUDA_AWARE - if (data.values_dev != nullptr) + if (data.values_dev.size() != 0) { const cudaError_t cuda_error_code = - cudaMemset(data.values_dev.get() + + cudaMemset(data.values_dev.data() + partitioner->locally_owned_size(), 0, partitioner->n_ghost_indices() * sizeof(Number)); @@ -967,10 +972,8 @@ namespace LinearAlgebra defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) if (std::is_same::value) { - if (import_data.values_dev == nullptr) - import_data.values_dev.reset( - Utilities::CUDA::allocate_device_data( - partitioner->n_import_indices())); + if (import_data.values_dev.size() == 0) + Kokkos::resize(import_data.values_dev, partitioner->n_import_indices()); } else # endif @@ -981,15 +984,8 @@ namespace LinearAlgebra std::is_same::value, "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); # endif - if (import_data.values == nullptr) - { - Number *new_val; - Utilities::System::posix_memalign( - reinterpret_cast(&new_val), - 64, - sizeof(Number) * partitioner->n_import_indices()); - import_data.values.reset(new_val); - } + if (import_data.values.size() == 0) + Kokkos::resize(import_data.values, partitioner->n_import_indices()); } } @@ -1001,19 +997,7 @@ namespace LinearAlgebra // device. We use values to store the elements because the function // uses a view of the array and thus we need the data on the host to // outlive the scope of the function. - Number *new_val; - Utilities::System::posix_memalign(reinterpret_cast(&new_val), - 64, - sizeof(Number) * allocated_size); - - data.values = {new_val, [](Number *data) { std::free(data); }}; - - cudaError_t cuda_error_code = - cudaMemcpy(data.values.get(), - data.values_dev.get(), - allocated_size * sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error_code); + data.values = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev); } # endif @@ -1025,10 +1009,10 @@ namespace LinearAlgebra operation, communication_channel, ArrayView( - data.values_dev.get() + partitioner->locally_owned_size(), + data.values_dev.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), ArrayView( - import_data.values_dev.get(), partitioner->n_import_indices()), + import_data.values_dev.data(), partitioner->n_import_indices()), compress_requests); } else @@ -1038,10 +1022,10 @@ namespace LinearAlgebra operation, communication_channel, ArrayView( - data.values.get() + partitioner->locally_owned_size(), + data.values.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), ArrayView( - import_data.values.get(), partitioner->n_import_indices()), + import_data.values.data(), partitioner->n_import_indices()), compress_requests); } #else @@ -1071,17 +1055,17 @@ namespace LinearAlgebra if (std::is_same::value) { Assert(partitioner->n_import_indices() == 0 || - import_data.values_dev != nullptr, + import_data.values_dev.size() != 0, ExcNotInitialized()); partitioner ->import_from_ghosted_array_finish( operation, ArrayView( - import_data.values_dev.get(), partitioner->n_import_indices()), + import_data.values_dev.data(), partitioner->n_import_indices()), ArrayView( - data.values_dev.get(), partitioner->locally_owned_size()), + data.values_dev.data(), partitioner->locally_owned_size()), ArrayView( - data.values_dev.get() + partitioner->locally_owned_size(), + data.values_dev.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), compress_requests); } @@ -1089,17 +1073,17 @@ namespace LinearAlgebra # endif { Assert(partitioner->n_import_indices() == 0 || - import_data.values != nullptr, + import_data.values.size() != 0, ExcNotInitialized()); partitioner ->import_from_ghosted_array_finish( operation, ArrayView( - import_data.values.get(), partitioner->n_import_indices()), + import_data.values.data(), partitioner->n_import_indices()), ArrayView( - data.values.get(), partitioner->locally_owned_size()), + data.values.data(), partitioner->locally_owned_size()), ArrayView( - data.values.get() + partitioner->locally_owned_size(), + data.values.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), compress_requests); } @@ -1111,13 +1095,13 @@ namespace LinearAlgebra if (std::is_same::value) { cudaError_t cuda_error_code = - cudaMemcpy(data.values_dev.get(), - data.values.get(), + cudaMemcpy(data.values_dev.data(), + data.values.data(), allocated_size * sizeof(Number), cudaMemcpyHostToDevice); AssertCuda(cuda_error_code); - data.values.reset(); + Kokkos::resize(data.values, 0); } # endif #else @@ -1151,25 +1135,16 @@ namespace LinearAlgebra (std::is_same::value), ExcMessage( "Using MemorySpace::CUDA only allowed if the code is compiled with a CUDA compiler!")); - if (import_data.values_dev == nullptr) - import_data.values_dev.reset( - Utilities::CUDA::allocate_device_data( - partitioner->n_import_indices())); + if (import_data.values_dev.size() == 0) + Kokkos::resize(import_data.values_dev, partitioner->n_import_indices()); # else # ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT static_assert( std::is_same::value, "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); # endif - if (import_data.values == nullptr) - { - Number *new_val; - Utilities::System::posix_memalign( - reinterpret_cast(&new_val), - 64, - sizeof(Number) * partitioner->n_import_indices()); - import_data.values.reset(new_val); - } + if (import_data.values.size() == 0) + Kokkos::resize(import_data.values, partitioner->n_import_indices()); # endif } @@ -1179,18 +1154,7 @@ namespace LinearAlgebra // device. We use values to store the elements because the function // uses a view of the array and thus we need the data on the host to // outlive the scope of the function. - Number *new_val; - Utilities::System::posix_memalign(reinterpret_cast(&new_val), - 64, - sizeof(Number) * allocated_size); - - data.values = {new_val, [](Number *data) { std::free(data); }}; - - cudaError_t cuda_error_code = cudaMemcpy(data.values.get(), - data.values_dev.get(), - allocated_size * sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error_code); + data.values = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev); # endif # if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ @@ -1198,22 +1162,22 @@ namespace LinearAlgebra partitioner->export_to_ghosted_array_start( communication_channel, ArrayView( - data.values.get(), partitioner->locally_owned_size()), - ArrayView(import_data.values.get(), + data.values.data(), partitioner->locally_owned_size()), + ArrayView(import_data.values.data(), partitioner->n_import_indices()), ArrayView( - data.values.get() + partitioner->locally_owned_size(), + data.values.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); # else partitioner->export_to_ghosted_array_start( communication_channel, ArrayView( - data.values_dev.get(), partitioner->locally_owned_size()), - ArrayView(import_data.values_dev.get(), + data.values_dev.data(), partitioner->locally_owned_size()), + ArrayView(import_data.values_dev.data(), partitioner->n_import_indices()), ArrayView( - data.values_dev.get() + partitioner->locally_owned_size(), + data.values_dev.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); # endif @@ -1244,13 +1208,13 @@ namespace LinearAlgebra defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) partitioner->export_to_ghosted_array_finish( ArrayView( - data.values.get() + partitioner->locally_owned_size(), + data.values.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); # else partitioner->export_to_ghosted_array_finish( ArrayView( - data.values_dev.get() + partitioner->locally_owned_size(), + data.values_dev.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); # endif @@ -1263,14 +1227,14 @@ namespace LinearAlgebra if (std::is_same::value) { cudaError_t cuda_error_code = - cudaMemcpy(data.values_dev.get() + + cudaMemcpy(data.values_dev.data() + partitioner->locally_owned_size(), - data.values.get() + partitioner->locally_owned_size(), + data.values.data() + partitioner->locally_owned_size(), partitioner->n_ghost_indices() * sizeof(Number), cudaMemcpyHostToDevice); AssertCuda(cuda_error_code); - data.values.reset(); + Kokkos::resize(data.values, 0); } # endif @@ -2078,7 +2042,7 @@ namespace LinearAlgebra if (partitioner.use_count() > 0) memory += partitioner->memory_consumption() / partitioner.use_count() + 1; - if (import_data.values != nullptr || import_data.values_dev != nullptr) + if (import_data.values.size() != 0 || import_data.values_dev.size() != 0) memory += (static_cast(partitioner->n_import_indices()) * sizeof(Number)); return memory; diff --git a/include/deal.II/lac/vector_operations_internal.h b/include/deal.II/lac/vector_operations_internal.h index 10ecc4f860..20cb26af29 100644 --- a/include/deal.II/lac/vector_operations_internal.h +++ b/include/deal.II/lac/vector_operations_internal.h @@ -1734,8 +1734,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vector_copy copier(v_data.values.get(), - data.values.get()); + Vector_copy copier(v_data.values.data(), + data.values.data()); parallel_for(copier, 0, size, thread_loop_partitioner); } @@ -1748,7 +1748,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vector_set setter(s, data.values.get()); + Vector_set setter(s, data.values.data()); parallel_for(setter, 0, size, thread_loop_partitioner); } @@ -1763,8 +1763,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_add_v vector_add(data.values.get(), - v_data.values.get()); + Vectorization_add_v vector_add(data.values.data(), + v_data.values.data()); parallel_for(vector_add, 0, size, thread_loop_partitioner); } @@ -1779,8 +1779,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_subtract_v vector_subtract(data.values.get(), - v_data.values.get()); + Vectorization_subtract_v vector_subtract(data.values.data(), + v_data.values.data()); parallel_for(vector_subtract, 0, size, thread_loop_partitioner); } @@ -1794,7 +1794,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_add_factor vector_add(data.values.get(), a); + Vectorization_add_factor vector_add(data.values.data(), a); parallel_for(vector_add, 0, size, thread_loop_partitioner); } @@ -1809,8 +1809,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_add_av vector_add(data.values.get(), - v_data.values.get(), + Vectorization_add_av vector_add(data.values.data(), + v_data.values.data(), a); parallel_for(vector_add, 0, size, thread_loop_partitioner); } @@ -1831,7 +1831,7 @@ namespace internal &data) { Vectorization_add_avpbw vector_add( - data.values.get(), v_data.values.get(), w_data.values.get(), a, b); + data.values.data(), v_data.values.data(), w_data.values.data(), a, b); parallel_for(vector_add, 0, size, thread_loop_partitioner); } @@ -1847,8 +1847,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_sadd_xv vector_sadd(data.values.get(), - v_data.values.get(), + Vectorization_sadd_xv vector_sadd(data.values.data(), + v_data.values.data(), x); parallel_for(vector_sadd, 0, size, thread_loop_partitioner); } @@ -1866,8 +1866,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_sadd_xav vector_sadd(data.values.get(), - v_data.values.get(), + Vectorization_sadd_xav vector_sadd(data.values.data(), + v_data.values.data(), a, x); parallel_for(vector_sadd, 0, size, thread_loop_partitioner); @@ -1890,7 +1890,7 @@ namespace internal &data) { Vectorization_sadd_xavbw vector_sadd( - data.values.get(), v_data.values.get(), w_data.values.get(), x, a, b); + data.values.data(), v_data.values.data(), w_data.values.data(), x, a, b); parallel_for(vector_sadd, 0, size, thread_loop_partitioner); } @@ -1904,7 +1904,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_multiply_factor vector_multiply(data.values.get(), + Vectorization_multiply_factor vector_multiply(data.values.data(), factor); parallel_for(vector_multiply, 0, size, thread_loop_partitioner); } @@ -1919,8 +1919,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_scale vector_scale(data.values.get(), - v_data.values.get()); + Vectorization_scale vector_scale(data.values.data(), + v_data.values.data()); parallel_for(vector_scale, 0, size, thread_loop_partitioner); } @@ -1935,8 +1935,8 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Vectorization_equ_au vector_equ(data.values.get(), - v_data.values.get(), + Vectorization_equ_au vector_equ(data.values.data(), + v_data.values.data(), a); parallel_for(vector_equ, 0, size, thread_loop_partitioner); } @@ -1957,7 +1957,7 @@ namespace internal &data) { Vectorization_equ_aubv vector_equ( - data.values.get(), v_data.values.get(), w_data.values.get(), a, b); + data.values.data(), v_data.values.data(), w_data.values.data(), a, b); parallel_for(vector_equ, 0, size, thread_loop_partitioner); } @@ -1973,7 +1973,7 @@ namespace internal { Number sum; dealii::internal::VectorOperations::Dot dot( - data.values.get(), v_data.values.get()); + data.values.data(), v_data.values.data()); dealii::internal::VectorOperations::parallel_reduce( dot, 0, size, sum, thread_loop_partitioner); AssertIsFinite(sum); @@ -1991,7 +1991,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Norm2 norm2(data.values.get()); + Norm2 norm2(data.values.data()); parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner); } @@ -2004,7 +2004,7 @@ namespace internal MemorySpaceData &data) { Number sum; - MeanValue mean(data.values.get()); + MeanValue mean(data.values.data()); parallel_reduce(mean, 0, size, sum, thread_loop_partitioner); return sum; @@ -2020,7 +2020,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - Norm1 norm1(data.values.get()); + Norm1 norm1(data.values.data()); parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner); } @@ -2035,7 +2035,7 @@ namespace internal ::dealii::MemorySpace::Host> &data) { - NormP normp(data.values.get(), p); + NormP normp(data.values.data(), p); parallel_reduce(normp, 0, size, sum, thread_loop_partitioner); } @@ -2054,9 +2054,9 @@ namespace internal &data) { Number sum; - AddAndDot adder(data.values.get(), - v_data.values.get(), - w_data.values.get(), + AddAndDot adder(data.values.data(), + v_data.values.data(), + w_data.values.data(), a); parallel_reduce(adder, 0, size, sum, thread_loop_partitioner); @@ -2112,8 +2112,8 @@ namespace internal { if (operation == VectorOperation::insert) { - cudaError_t cuda_error_code = cudaMemcpy(data.values.get(), - v_data.values_dev.get(), + cudaError_t cuda_error_code = cudaMemcpy(data.values.data(), + v_data.values_dev.data(), size * sizeof(Number), cudaMemcpyDeviceToHost); AssertCuda(cuda_error_code); @@ -2147,8 +2147,8 @@ namespace internal ::dealii::MemorySpace::CUDA> &data) { - cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(), - v_data.values_dev.get(), + cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.data(), + v_data.values_dev.data(), size * sizeof(Number), cudaMemcpyDeviceToDevice); AssertCuda(cuda_error_code); @@ -2164,7 +2164,7 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::set - <<>>(data.values_dev.get(), s, size); + <<>>(data.values_dev.data(), s, size); AssertCudaKernel(); } @@ -2180,9 +2180,9 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_aV - <<>>(data.values_dev.get(), + <<>>(data.values_dev.data(), 1., - v_data.values_dev.get(), + v_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2199,9 +2199,9 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_aV - <<>>(data.values_dev.get(), + <<>>(data.values_dev.data(), -1., - v_data.values_dev.get(), + v_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2217,7 +2217,7 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::vec_add - <<>>(data.values_dev.get(), a, size); + <<>>(data.values_dev.data(), a, size); AssertCudaKernel(); } @@ -2234,9 +2234,9 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_aV - <<>>(data.values_dev.get(), + <<>>(data.values_dev.data(), a, - v_data.values_dev.get(), + v_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2257,11 +2257,11 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_aVbW - <<>>(data.values_dev.get(), + <<>>(data.values_dev.data(), a, - v_data.values_dev.get(), + v_data.values_dev.data(), b, - w_data.values_dev.get(), + w_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2280,7 +2280,7 @@ namespace internal const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::sadd <<>>( - x, data.values_dev.get(), 1., v_data.values_dev.get(), size); + x, data.values_dev.data(), 1., v_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2299,7 +2299,7 @@ namespace internal const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::sadd <<>>( - x, data.values_dev.get(), a, v_data.values_dev.get(), size); + x, data.values_dev.data(), a, v_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2321,11 +2321,11 @@ namespace internal const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::sadd <<>>(x, - data.values_dev.get(), + data.values_dev.data(), a, - v_data.values_dev.get(), + v_data.values_dev.data(), b, - w_data.values_dev.get(), + w_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2341,7 +2341,7 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::vec_scale - <<>>(data.values_dev.get(), factor, size); + <<>>(data.values_dev.data(), factor, size); AssertCudaKernel(); } @@ -2357,8 +2357,8 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::scale - <<>>(data.values_dev.get(), - v_data.values_dev.get(), + <<>>(data.values_dev.data(), + v_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2376,9 +2376,9 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::equ - <<>>(data.values_dev.get(), + <<>>(data.values_dev.data(), a, - v_data.values_dev.get(), + v_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2399,11 +2399,11 @@ namespace internal { const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::equ - <<>>(data.values_dev.get(), + <<>>(data.values_dev.data(), a, - v_data.values_dev.get(), + v_data.values_dev.data(), b, - w_data.values_dev.get(), + w_data.values_dev.data(), size); AssertCudaKernel(); } @@ -2428,8 +2428,8 @@ namespace internal Number, ::dealii::LinearAlgebra::CUDAWrappers::kernel::DotProduct> <<>>(result_device, - data.values_dev.get(), - v_data.values_dev.get(), + data.values_dev.data(), + v_data.values_dev.data(), static_cast( size)); AssertCudaKernel(); @@ -2480,7 +2480,7 @@ namespace internal Number, ::dealii::LinearAlgebra::CUDAWrappers::kernel::ElemSum> <<>>(result_device, - data.values_dev.get(), + data.values_dev.data(), size); // Copy the result back to the host @@ -2517,7 +2517,7 @@ namespace internal Number, ::dealii::LinearAlgebra::CUDAWrappers::kernel::L1Norm> <<>>(result_device, - data.values_dev.get(), + data.values_dev.data(), size); // Copy the result back to the host @@ -2566,9 +2566,9 @@ namespace internal const int n_blocks = 1 + size / (chunk_size * block_size); ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_and_dot <<>>(res_d, - data.values_dev.get(), - v_data.values_dev.get(), - w_data.values_dev.get(), + data.values_dev.data(), + v_data.values_dev.data(), + w_data.values_dev.data(), a, size); @@ -2629,8 +2629,8 @@ namespace internal { if (operation == VectorOperation::insert) { - cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(), - v_data.values.get(), + cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.data(), + v_data.values.data(), size * sizeof(Number), cudaMemcpyHostToDevice); AssertCuda(cuda_error_code);