From 74f6d67b50fa2d1b066a852d3f431132f45c4d3e Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Tue, 23 Jul 2019 13:17:01 +0000 Subject: [PATCH] Fix a bug in LA::d::Vector::compress on the host compress() would fail if it was called in a CUDA file, for a LA::d::Vector and deal.II/la/la_parallel_vector.templates.h was included --- doc/news/changes/minor/20190723BrunoTurcksin | 7 + .../lac/la_parallel_vector.templates.h | 144 ++++++++++-------- tests/cuda/parallel_vector_26.cu | 81 ++++++++++ tests/cuda/parallel_vector_26.mpirun=2.output | 2 + 4 files changed, 170 insertions(+), 64 deletions(-) create mode 100644 doc/news/changes/minor/20190723BrunoTurcksin create mode 100644 tests/cuda/parallel_vector_26.cu create mode 100644 tests/cuda/parallel_vector_26.mpirun=2.output diff --git a/doc/news/changes/minor/20190723BrunoTurcksin b/doc/news/changes/minor/20190723BrunoTurcksin new file mode 100644 index 0000000000..b7090f9aa1 --- /dev/null +++ b/doc/news/changes/minor/20190723BrunoTurcksin @@ -0,0 +1,7 @@ +Fixed: When compiling with CUDA support, the call to +LinearAlgebra::distributed::Vector::compress() would failed if the vector's +memory space was MemorySpace::Host, the header +deal.II/lac/la_parallel_vector.templates.h was included, and the call was done +in a .cu file. +
+(Bruno Turcksin, 2019/07/23) diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index ca648d76a5..743aac8780 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -920,46 +920,55 @@ namespace LinearAlgebra # if defined DEAL_II_COMPILER_CUDA_AWARE && \ !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) - // Move the data to the host and then move it back to the - // device. We use values to store the elements because the function - // uses a view of the array and thus we need the data on the host to - // outlive the scope of the function. - Number *new_val; - Utilities::System::posix_memalign(reinterpret_cast(&new_val), - 64, - sizeof(Number) * allocated_size); + if (std::is_same::value) + { + // Move the data to the host and then move it back to the + // device. We use values to store the elements because the function + // uses a view of the array and thus we need the data on the host to + // outlive the scope of the function. + Number *new_val; + Utilities::System::posix_memalign(reinterpret_cast(&new_val), + 64, + sizeof(Number) * allocated_size); - data.values.reset(new_val); + data.values.reset(new_val); - cudaError_t cuda_error_code = cudaMemcpy(data.values.get(), - data.values_dev.get(), - allocated_size * sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error_code); + cudaError_t cuda_error_code = + cudaMemcpy(data.values.get(), + data.values_dev.get(), + allocated_size * sizeof(Number), + cudaMemcpyDeviceToHost); + AssertCuda(cuda_error_code); + } # endif -# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) - partitioner->import_from_ghosted_array_start( - operation, - counter, - ArrayView(data.values.get() + - partitioner->local_size(), - partitioner->n_ghost_indices()), - ArrayView(import_data.values.get(), - partitioner->n_import_indices()), - compress_requests); -# else - partitioner->import_from_ghosted_array_start( - operation, - counter, - ArrayView(data.values_dev.get() + - partitioner->local_size(), - partitioner->n_ghost_indices()), - ArrayView(import_data.values_dev.get(), - partitioner->n_import_indices()), - compress_requests); +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + if (std::is_same::value) + { + partitioner->import_from_ghosted_array_start( + operation, + counter, + ArrayView( + data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + ArrayView( + import_data.values_dev.get(), partitioner->n_import_indices()), + compress_requests); + } + else # endif + { + partitioner->import_from_ghosted_array_start( + operation, + counter, + ArrayView( + data.values.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + ArrayView( + import_data.values.get(), partitioner->n_import_indices()), + compress_requests); + } #endif } @@ -979,36 +988,43 @@ namespace LinearAlgebra // make this function thread safe std::lock_guard lock(mutex); -# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ - defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)) - Assert(partitioner->n_import_indices() == 0 || - import_data.values != nullptr, - ExcNotInitialized()); - partitioner->import_from_ghosted_array_finish( - operation, - ArrayView( - import_data.values.get(), partitioner->n_import_indices()), - ArrayView(data.values.get(), - partitioner->local_size()), - ArrayView(data.values.get() + - partitioner->local_size(), - partitioner->n_ghost_indices()), - compress_requests); -# else - Assert(partitioner->n_import_indices() == 0 || - import_data.values_dev != nullptr, - ExcNotInitialized()); - partitioner->import_from_ghosted_array_finish( - operation, - ArrayView( - import_data.values_dev.get(), partitioner->n_import_indices()), - ArrayView(data.values_dev.get(), - partitioner->local_size()), - ArrayView(data.values_dev.get() + - partitioner->local_size(), - partitioner->n_ghost_indices()), - compress_requests); +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_MPI_WITH_CUDA_SUPPORT) + if (std::is_same::value) + { + Assert(partitioner->n_import_indices() == 0 || + import_data.values_dev != nullptr, + ExcNotInitialized()); + partitioner + ->import_from_ghosted_array_finish( + operation, + ArrayView( + import_data.values_dev.get(), partitioner->n_import_indices()), + ArrayView(data.values_dev.get(), + partitioner->local_size()), + ArrayView( + data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + compress_requests); + } + else # endif + { + Assert(partitioner->n_import_indices() == 0 || + import_data.values != nullptr, + ExcNotInitialized()); + partitioner + ->import_from_ghosted_array_finish( + operation, + ArrayView( + import_data.values.get(), partitioner->n_import_indices()), + ArrayView(data.values.get(), + partitioner->local_size()), + ArrayView( + data.values.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + compress_requests); + } # if defined DEAL_II_COMPILER_CUDA_AWARE && \ !defined DEAL_II_MPI_WITH_CUDA_SUPPORT diff --git a/tests/cuda/parallel_vector_26.cu b/tests/cuda/parallel_vector_26.cu new file mode 100644 index 0000000000..10f43db185 --- /dev/null +++ b/tests/cuda/parallel_vector_26.cu @@ -0,0 +1,81 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2019 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Check for a bug where compress cannot be called for a LA::d::Vector in +// a CUDA file + +#include +#include + +#include + +#include "../tests.h" + + +void +test() +{ + unsigned int rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + const unsigned int total_size = 100; + const unsigned int ghost_size = 75; + const unsigned int local_size = 50; + + IndexSet local_owned(total_size); + if (rank == 0) + local_owned.add_range(0, local_size); + else + local_owned.add_range(total_size - local_size, total_size); + + IndexSet ghost_indices(total_size); + if (rank == 0) + ghost_indices.add_range(0, ghost_size); + else + ghost_indices.add_range(total_size - ghost_size, total_size); + + LinearAlgebra::distributed::Vector v( + local_owned, ghost_indices, MPI_COMM_WORLD); + + for (unsigned int i = 0; i < ghost_size; ++i) + v.local_element(i) = i; + v.compress(VectorOperation::add); + + + if (rank == 0) + deallog << "OK" << std::endl; +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, testing_max_num_threads()); + + unsigned int rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + deallog.push(Utilities::int_to_string(rank)); + + init_cuda(true); + + if (rank == 0) + { + initlog(); + deallog << std::setprecision(4); + test(); + } + else + test(); +} diff --git a/tests/cuda/parallel_vector_26.mpirun=2.output b/tests/cuda/parallel_vector_26.mpirun=2.output new file mode 100644 index 0000000000..be8d055f86 --- /dev/null +++ b/tests/cuda/parallel_vector_26.mpirun=2.output @@ -0,0 +1,2 @@ + +DEAL:0::OK -- 2.39.5