From: Daniel Arndt Date: Thu, 1 Nov 2018 11:54:29 +0000 (+0100) Subject: Update Partitioner::(import|export)_to_ghosted_array_(start|finish) X-Git-Tag: v9.1.0-rc1~464^2~5 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c5514ab6eba79a23afa41119ef670da960009f45;p=dealii.git Update Partitioner::(import|export)_to_ghosted_array_(start|finish) --- diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index e44238ebb0..ec65709bb5 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -446,7 +446,7 @@ namespace Utilities * This functionality is used in * LinearAlgebra::distributed::Vector::update_ghost_values(). */ - template + template void export_to_ghosted_array_finish(const ArrayView & ghost_array, std::vector &requests) const; @@ -487,7 +487,7 @@ namespace Utilities * This functionality is used in * LinearAlgebra::distributed::Vector::compress(). */ - template + template void import_from_ghosted_array_start( const VectorOperation::values vector_operation, diff --git a/include/deal.II/base/partitioner.templates.h b/include/deal.II/base/partitioner.templates.h index 62d1cd30ae..5e9d5cc8e4 100644 --- a/include/deal.II/base/partitioner.templates.h +++ b/include/deal.II/base/partitioner.templates.h @@ -150,7 +150,7 @@ namespace Utilities - template + template void Partitioner::export_to_ghosted_array_finish( const ArrayView & ghost_array, @@ -182,28 +182,60 @@ namespace Utilities unsigned int offset = n_ghost_indices_in_larger_set - n_ghost_indices(); // must copy ghost data into extended ghost array - for (std::vector>:: - const_iterator my_ghosts = ghost_indices_subset_data.begin(); - my_ghosts != ghost_indices_subset_data.end(); - ++my_ghosts) - if (offset > my_ghosts->first) - for (unsigned int j = my_ghosts->first; j < my_ghosts->second; - ++j, ++offset) + for (const auto ghost_range : ghost_indices_subset_data) + { + if (offset > ghost_range.first) { - ghost_array[j] = ghost_array[offset]; - ghost_array[offset] = Number(); + const unsigned int chunk_size = + ghost_range.second - ghost_range.first; + if (std::is_same::value) + { + std::copy(ghost_array.data() + offset, + ghost_array.data() + offset + chunk_size, + ghost_array.data() + ghost_range.first); + std::fill(ghost_array.data() + + std::max(ghost_range.second, offset), + ghost_array.data() + offset + chunk_size, + Number{}); + } + else + { +# if defined(DEAL_II_COMPILER_CUDA_AWARE) + cudaError_t cuda_error = + cudaMemcpy(ghost_array.data() + ghost_range.first, + ghost_array.data() + offset, + chunk_size * sizeof(Number), + cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error); + cuda_error = + cudaMemset(ghost_array.data() + + std::max(ghost_range.second, offset), + 0, + (offset + chunk_size - + std::max(ghost_range.second, offset)) * + sizeof(Number)); + AssertCuda(cuda_error); +# else + Assert( + false, + ExcMessage( + "If the compiler doesn't understand CUDA code, only MemorySpace::Host is allowed!")); +# endif + } + offset += chunk_size; } - else - { - AssertDimension(offset, my_ghosts->first); - break; - } + else + { + AssertDimension(offset, ghost_range.first); + break; + } + } } } - template + template void Partitioner::import_from_ghosted_array_start( const VectorOperation::values vector_operation, @@ -294,16 +326,52 @@ namespace Utilities ghost_indices_subset_chunks_by_rank_data[i + 1]; unsigned int offset = 0; for (; my_ghosts != end_my_ghosts; ++my_ghosts) - if (ghost_array_ptr + offset != - ghost_array.data() + my_ghosts->first) - for (unsigned int j = my_ghosts->first; j < my_ghosts->second; - ++j, ++offset) + { + const unsigned int chunk_size = + my_ghosts->second - my_ghosts->first; + if (ghost_array_ptr + offset != + ghost_array.data() + my_ghosts->first) { - ghost_array_ptr[offset] = ghost_array[j]; - ghost_array[j] = Number(); + if (std::is_same::value) + { + std::copy(ghost_array.data() + my_ghosts->first, + ghost_array.data() + my_ghosts->second, + ghost_array_ptr + offset); + std::fill( + std::max(ghost_array.data() + my_ghosts->first, + ghost_array_ptr + offset + chunk_size), + ghost_array.data() + my_ghosts->second, + Number{}); + } + else + { +# if defined(DEAL_II_COMPILER_CUDA_AWARE) + cudaError_t cuda_error = + cudaMemcpy(ghost_array_ptr + offset, + ghost_array.data() + my_ghosts->first, + chunk_size * sizeof(Number), + cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error); + cuda_error = cudaMemset( + std::max(ghost_array.data() + my_ghosts->first, + ghost_array_ptr + offset + chunk_size), + 0, + (ghost_array.data() + my_ghosts->second - + std::max(ghost_array.data() + my_ghosts->first, + ghost_array_ptr + offset + chunk_size)) * + sizeof(Number)); + AssertCuda(cuda_error); +# else + Assert( + false, + ExcMessage( + "If the compiler doesn't understand CUDA code, only MemorySpace::Host is allowed!")); +# endif + } } - else - offset += my_ghosts->second - my_ghosts->first; + offset += chunk_size; + } AssertDimension(offset, ghost_targets_data[i].second); } @@ -526,7 +594,41 @@ namespace Utilities read_position += chunk_size; } } - else + else if (vector_operation == dealii::VectorOperation::min) + for (const auto &import_range : import_indices_data) + { + const auto chunk_size = + import_range.second - import_range.first; + const int n_blocks = + 1 + (chunk_size - 1) / (::dealii::CUDAWrappers::chunk_size * + ::dealii::CUDAWrappers::block_size); + dealii::LinearAlgebra::CUDAWrappers::kernel::vector_bin_op< + Number, + dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Min> + <<>>( + locally_owned_array.data() + import_range.first, + read_position, + chunk_size); + read_position += chunk_size; + } + else if (vector_operation == dealii::VectorOperation::max) + for (const auto &import_range : import_indices_data) + { + const auto chunk_size = + import_range.second - import_range.first; + const int n_blocks = + 1 + (chunk_size - 1) / (::dealii::CUDAWrappers::chunk_size * + ::dealii::CUDAWrappers::block_size); + dealii::LinearAlgebra::CUDAWrappers::kernel::vector_bin_op< + Number, + dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Max> + <<>>( + locally_owned_array.data() + import_range.first, + read_position, + chunk_size); + read_position += chunk_size; + } + else // TODO for (const auto &import_range : import_indices_data) { const auto chunk_size = @@ -539,13 +641,6 @@ namespace Utilities AssertCuda(cuda_error_code); read_position += chunk_size; } - - static_assert( - std::is_same::value, - "If we are using the CPU implementation, we should not trigger the restriction"); - Assert(vector_operation == dealii::VectorOperation::insert || - vector_operation == dealii::VectorOperation::add, - ExcNotImplemented()); # endif AssertDimension(read_position - temporary_storage.data(), n_import_indices()); diff --git a/include/deal.II/lac/cuda_kernels.h b/include/deal.II/lac/cuda_kernels.h index b375af82bd..6ce0f431f3 100644 --- a/include/deal.II/lac/cuda_kernels.h +++ b/include/deal.II/lac/cuda_kernels.h @@ -73,11 +73,11 @@ namespace LinearAlgebra struct Binop_Addition> { __device__ static inline std::complex - operation(const std::complex a, const std::complex b) + operation(const std::complex a, const std::complex) { printf("This function is not implemented for std::complex!"); assert(false); - return {}; + return a; } }; @@ -98,6 +98,76 @@ namespace LinearAlgebra } }; + template + struct Binop_Subtraction> + { + __device__ static inline std::complex + operation(const std::complex a, const std::complex b) + { + printf("This function is not implemented for std::complex!"); + assert(false); + return a; + } + }; + + + + /** + * Functor defining the maximum of two Numbers. + * + * @ingroup CUDAWrappers + */ + template + struct Binop_Max + { + __device__ static inline Number + operation(const Number a, const Number b) + { + return a > b ? a : b; + } + }; + + template + struct Binop_Max> + { + __device__ static inline std::complex + operation(const std::complex a, const std::complex) + { + printf("This function is not implemented for std::complex!"); + assert(false); + return a; + } + }; + + + + /** + * Functor defining the maximum of two Numbers. + * + * @ingroup CUDAWrappers + */ + template + struct Binop_Min + { + __device__ static inline Number + operation(const Number a, const Number b) + { + return a > b ? b : a; + } + }; + + template + struct Binop_Min> + { + __device__ static inline std::complex + operation(const std::complex a, const std::complex) + { + printf("This function is not implemented for std::complex!"); + assert(false); + return a; + } + }; + /** @@ -112,8 +182,8 @@ namespace LinearAlgebra /** - * Structure implementing the functions used to add elements when using a - * reduction. + * Structure implementing the functions used to add elements when + * using a reduction. * * @ingroup CUDAWrappers */ @@ -136,8 +206,8 @@ namespace LinearAlgebra /** - * Structure implementing the functions used to compute the L1 norm when - * using a reduction. + * Structure implementing the functions used to compute the L1 norm + * when using a reduction. * * @ingroup CUDAWrappers */ @@ -195,8 +265,8 @@ namespace LinearAlgebra /** - * Structure implementing the functions used to compute the dot product - * norm when using a double vector reduction. + * Structure implementing the functions used to compute the dot + * product norm when using a double vector reduction. * * @ingroup CUDAWrappers */ @@ -276,8 +346,8 @@ namespace LinearAlgebra /** - * Scaling and simple addition of a multiple of a vector, i.e. val = - * = s*val + a*V_val + * Scaling and simple addition of a multiple of a vector, i.e. val + * = = s*val + a*V_val * * @ingroup CUDAWrappers */ @@ -310,8 +380,8 @@ namespace LinearAlgebra /** - * Scale each element of this vector by the corresponding element in the - * argument. + * Scale each element of this vector by the corresponding element in + * the argument. * * @ingroup CUDAWrappers */ diff --git a/source/base/partitioner.cuda.inst.in b/source/base/partitioner.cuda.inst.in index c590984dae..d6b6ab7f6c 100644 --- a/source/base/partitioner.cuda.inst.in +++ b/source/base/partitioner.cuda.inst.in @@ -25,6 +25,19 @@ for (SCALAR : MPI_SCALARS) const ArrayView &, const ArrayView &, std::vector &) const; + + template void Utilities::MPI::Partitioner:: + export_to_ghosted_array_finish( + const ArrayView &, std::vector &) const; + + template void Utilities::MPI::Partitioner::import_from_ghosted_array_start< + SCALAR, + MemorySpace::CUDA>(const VectorOperation::values, + const unsigned int, + const ArrayView &, + const ArrayView &, + std::vector &) const; + template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish< SCALAR, MemorySpace::CUDA>(const VectorOperation::values, diff --git a/source/base/partitioner.inst.in b/source/base/partitioner.inst.in index f20ad8a6a5..c1b1696302 100644 --- a/source/base/partitioner.inst.in +++ b/source/base/partitioner.inst.in @@ -25,14 +25,16 @@ for (SCALAR : MPI_SCALARS) const ArrayView &, const ArrayView &, std::vector &) const; - template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish< - SCALAR>(const ArrayView &, std::vector &) const; + template void Utilities::MPI::Partitioner:: + export_to_ghosted_array_finish( + const ArrayView &, std::vector &) const; template void Utilities::MPI::Partitioner::import_from_ghosted_array_start< - SCALAR>(const VectorOperation::values, - const unsigned int, - const ArrayView &, - const ArrayView &, - std::vector &) const; + SCALAR, + MemorySpace::Host>(const VectorOperation::values, + const unsigned int, + const ArrayView &, + const ArrayView &, + std::vector &) const; template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish< SCALAR, MemorySpace::Host>(const VectorOperation::values, diff --git a/tests/cuda/parallel_partitioner_06.cu b/tests/cuda/parallel_partitioner_06.cu new file mode 100644 index 0000000000..14c1096346 --- /dev/null +++ b/tests/cuda/parallel_partitioner_06.cu @@ -0,0 +1,216 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// test for the Partitioner with a smaller ghost index set within a larger one +// regarding the export_to_ghosted_array() calls +#include +#include +#include + +#include +#include +#include + +#include "../tests.h" + +template +void +print_cuda_view(const ArrayView cuda_view) +{ + std::vector cpu_values(cuda_view.size()); + Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values); + for (Number value : cpu_values) + deallog << value << " "; + deallog << std::endl; +} + + + +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + Assert(numproc > 2, ExcNotImplemented()); + + const unsigned int set = 200; + AssertIndexRange(numproc, set - 2); + const unsigned int local_size = set - myid; + types::global_dof_index global_size = 0; + types::global_dof_index my_start = 0; + for (unsigned int i = 0; i < numproc; ++i) + { + global_size += set - i; + if (i < myid) + my_start += set - i; + } + + // each processor owns some indices and all are ghosting elements from three + // processors (the second). some entries are right around the border between + // two processors + IndexSet local_owned(global_size); + local_owned.add_range(my_start, my_start + local_size); + IndexSet local_relevant_1(global_size), local_relevant_2(global_size); + local_relevant_1 = local_owned; + + types::global_dof_index ghost_indices[10] = {1, + 2, + 13, + set - 2, + set - 1, + set, + set + 1, + 2 * set, + 2 * set + 1, + 2 * set + 3}; + + local_relevant_1.add_indices(&ghost_indices[0], ghost_indices + 10); + if (myid > 0) + local_relevant_1.add_range(my_start - 10, my_start); + if (myid < numproc - 1) + local_relevant_1.add_range(my_start + local_size, + my_start + local_size + 10); + + local_relevant_2 = local_owned; + local_relevant_2.add_indices(&ghost_indices[0], ghost_indices + 10); + if (myid > 0) + local_relevant_2.add_index(my_start - 10); + if (myid < numproc - 1) + local_relevant_2.add_index(my_start + local_size + 9); + + Utilities::MPI::Partitioner v(local_owned, local_relevant_1, MPI_COMM_WORLD); + Utilities::MPI::Partitioner w(local_owned, MPI_COMM_WORLD); + w.set_ghost_indices(local_relevant_2, v.ghost_indices()); + + IndexSet local_relevant_3(global_size); + local_relevant_3.add_index(2); + if (myid > 0 && my_start > 0) + local_relevant_3.add_range(my_start - 10, my_start); + Utilities::MPI::Partitioner x(local_owned, MPI_COMM_WORLD); + x.set_ghost_indices(local_relevant_3, v.ghost_indices()); + + // set up a locally owned array with some entries + std::vector cpu_locally_owned_data(local_size); + for (unsigned int i = 0; i < local_size; ++i) + cpu_locally_owned_data[i] = my_start + i; + std::unique_ptr locally_owned_data( + nullptr, Utilities::CUDA::delete_device_data); + locally_owned_data.reset( + Utilities::CUDA::allocate_device_data(local_size)); + ArrayView locally_owned_data_view(locally_owned_data.get(), + local_size); + Utilities::CUDA::copy_to_dev(cpu_locally_owned_data, + locally_owned_data.get()); + + // set up a ghost array + std::unique_ptr ghosts( + nullptr, Utilities::CUDA::delete_device_data); + ghosts.reset( + Utilities::CUDA::allocate_device_data(v.n_ghost_indices())); + ArrayView ghosts_view(ghosts.get(), v.n_ghost_indices()); + + std::unique_ptr temp_array( + nullptr, Utilities::CUDA::delete_device_data); + temp_array.reset( + Utilities::CUDA::allocate_device_data(v.n_import_indices())); + ArrayView temp_array_view(temp_array.get(), + v.n_import_indices()); + + std::vector requests; + + // send the full array + v.export_to_ghosted_array_start( + 3, locally_owned_data_view, temp_array_view, ghosts_view, requests); + v.export_to_ghosted_array_finish(ghosts_view, + requests); + deallog << "All ghosts: "; + print_cuda_view(ghosts_view); + + // send only the array in w + cudaError_t cuda_error = + cudaMemset(ghosts_view.data(), + 0, + ghosts_view.size() * sizeof(unsigned int)); + AssertCuda(cuda_error); + + Assert(temp_array_view.size() >= w.n_import_indices(), ExcInternalError()); + ArrayView temp_array_view_w(temp_array_view.data(), + w.n_import_indices()); + w.export_to_ghosted_array_start( + 3, locally_owned_data_view, temp_array_view_w, ghosts_view, requests); + + // start a second send operation for the x partitioner in parallel to make + // sure communication does not get messed up + std::unique_ptr temp_array2( + nullptr, Utilities::CUDA::delete_device_data); + temp_array2.reset( + Utilities::CUDA::allocate_device_data(x.n_import_indices())); + ArrayView temp_array2_view(temp_array2.get(), + x.n_import_indices()); + + std::unique_ptr ghosts2( + nullptr, Utilities::CUDA::delete_device_data); + ghosts2.reset( + Utilities::CUDA::allocate_device_data(x.n_ghost_indices())); + ArrayView ghosts2_view(ghosts2.get(), x.n_ghost_indices()); + + std::vector requests2; + x.export_to_ghosted_array_start( + 4, locally_owned_data_view, temp_array2_view, ghosts2_view, requests2); + + w.export_to_ghosted_array_finish(ghosts_view, + requests); + deallog << "Ghosts on reduced 1: "; + print_cuda_view(ghosts_view); + + cuda_error = cudaMemset(ghosts_view.data(), + 0, + ghosts_view.size() * sizeof(unsigned int)); + AssertCuda(cuda_error); + + Assert(temp_array_view.size() >= x.n_import_indices(), ExcInternalError()); + ArrayView temp_array_view_x(temp_array_view.data(), + x.n_import_indices()); + x.export_to_ghosted_array_start( + 3, locally_owned_data_view, temp_array_view_x, ghosts_view, requests); + x.export_to_ghosted_array_finish(ghosts_view, + requests); + deallog << "Ghosts on reduced 2: "; + print_cuda_view(ghosts_view); + + x.export_to_ghosted_array_finish( + ghosts2_view, requests2); + deallog << "Ghosts on reduced 2 without excess entries: "; + print_cuda_view(ghosts2_view); + + x.export_to_ghosted_array_start( + 3, locally_owned_data_view, temp_array_view_x, ghosts_view, requests); + x.export_to_ghosted_array_finish(ghosts_view, + requests); + deallog << "Ghosts on reduced 2: "; + print_cuda_view(ghosts_view); +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi(argc, argv); + MPILogInitAll log; + init_cuda(true); + test(); +} diff --git a/tests/cuda/parallel_partitioner_06.with_cuda_aware_mpi=on.mpirun=4.output b/tests/cuda/parallel_partitioner_06.with_cuda_aware_mpi=on.mpirun=4.output new file mode 100644 index 0000000000..1564165b12 --- /dev/null +++ b/tests/cuda/parallel_partitioner_06.with_cuda_aware_mpi=on.mpirun=4.output @@ -0,0 +1,27 @@ + +DEAL:0::All ghosts: 200 201 202 203 204 205 206 207 208 209 400 401 403 +DEAL:0::Ghosts on reduced 1: 200 201 0 0 0 0 0 0 0 209 400 401 403 +DEAL:0::Ghosts on reduced 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:0::Ghosts on reduced 2 without excess entries: +DEAL:0::Ghosts on reduced 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 + +DEAL:1::All ghosts: 1 2 13 190 191 192 193 194 195 196 197 198 199 399 400 401 402 403 404 405 406 407 408 +DEAL:1::Ghosts on reduced 1: 1 2 13 190 0 0 0 0 0 0 0 198 199 0 400 401 0 403 0 0 0 0 408 +DEAL:1::Ghosts on reduced 2: 0 2 0 190 191 192 193 194 195 196 197 198 199 0 0 0 0 0 0 0 0 0 0 +DEAL:1::Ghosts on reduced 2 without excess entries: 2 190 191 192 193 194 195 196 197 198 199 +DEAL:1::Ghosts on reduced 2: 0 2 0 190 191 192 193 194 195 196 197 198 199 0 0 0 0 0 0 0 0 0 0 + + +DEAL:2::All ghosts: 1 2 13 198 199 200 201 389 390 391 392 393 394 395 396 397 398 597 598 599 600 601 602 603 604 605 606 +DEAL:2::Ghosts on reduced 1: 1 2 13 198 199 200 201 389 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 606 +DEAL:2::Ghosts on reduced 2: 0 2 0 0 0 0 0 389 390 391 392 393 394 395 396 397 398 0 0 0 0 0 0 0 0 0 0 +DEAL:2::Ghosts on reduced 2 without excess entries: 2 389 390 391 392 393 394 395 396 397 398 +DEAL:2::Ghosts on reduced 2: 0 2 0 0 0 0 0 389 390 391 392 393 394 395 396 397 398 0 0 0 0 0 0 0 0 0 0 + + +DEAL:3::All ghosts: 1 2 13 198 199 200 201 400 401 403 587 588 589 590 591 592 593 594 595 596 +DEAL:3::Ghosts on reduced 1: 1 2 13 198 199 200 201 400 401 403 587 0 0 0 0 0 0 0 0 0 +DEAL:3::Ghosts on reduced 2: 0 2 0 0 0 0 0 0 0 0 587 588 589 590 591 592 593 594 595 596 +DEAL:3::Ghosts on reduced 2 without excess entries: 2 587 588 589 590 591 592 593 594 595 596 +DEAL:3::Ghosts on reduced 2: 0 2 0 0 0 0 0 0 0 0 587 588 589 590 591 592 593 594 595 596 + diff --git a/tests/cuda/parallel_partitioner_07.cu b/tests/cuda/parallel_partitioner_07.cu new file mode 100644 index 0000000000..1372d1f4c6 --- /dev/null +++ b/tests/cuda/parallel_partitioner_07.cu @@ -0,0 +1,263 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// test for the Partitioner with a smaller ghost index set within a larger one +// regarding the import_from_ghosted_array() calls +#include +#include +#include + +#include +#include +#include + +#include "../tests.h" + +template +void +print_cuda_view(const ArrayView cuda_view) +{ + std::vector cpu_values(cuda_view.size()); + Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values); + for (Number value : cpu_values) + deallog << value << " "; + deallog << std::endl; +} + + +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + Assert(numproc > 2, ExcNotImplemented()); + + const unsigned int set = 50; + AssertIndexRange(numproc, set - 2); + const unsigned int local_size = set - myid; + types::global_dof_index global_size = 0; + types::global_dof_index my_start = 0; + for (unsigned int i = 0; i < numproc; ++i) + { + global_size += set - i; + if (i < myid) + my_start += set - i; + } + + // each processor owns some indices and all are ghosting elements from three + // processors (the second). some entries are right around the border between + // two processors + IndexSet local_owned(global_size); + local_owned.add_range(my_start, my_start + local_size); + IndexSet local_relevant_1(global_size), local_relevant_2(global_size); + local_relevant_1 = local_owned; + types::global_dof_index ghost_indices[10] = {1, + 2, + 13, + set - 2, + set - 1, + set, + set + 1, + 2 * set, + 2 * set + 1, + 2 * set + 3}; + local_relevant_1.add_indices(&ghost_indices[0], ghost_indices + 10); + if (myid > 0) + local_relevant_1.add_range(my_start - 10, my_start); + if (myid < numproc - 1) + local_relevant_1.add_range(my_start + local_size, + my_start + local_size + 10); + + local_relevant_2 = local_owned; + local_relevant_2.add_indices(&ghost_indices[0], ghost_indices + 10); + if (myid > 0) + local_relevant_2.add_index(my_start - 10); + if (myid < numproc - 1) + local_relevant_2.add_index(my_start + local_size + 9); + + Utilities::MPI::Partitioner v(local_owned, local_relevant_1, MPI_COMM_WORLD); + Utilities::MPI::Partitioner w(local_owned, MPI_COMM_WORLD); + w.set_ghost_indices(local_relevant_2, v.ghost_indices()); + + IndexSet local_relevant_3(global_size); + local_relevant_3.add_index(2); + if (myid > 0 && my_start > 0) + local_relevant_3.add_range(my_start - 10, my_start); + Utilities::MPI::Partitioner x(local_owned, MPI_COMM_WORLD); + x.set_ghost_indices(local_relevant_3, v.ghost_indices()); + + // set up a ghost array with some entries + std::vector cpu_ghost_array(v.n_ghost_indices(), 1); + std::unique_ptr ghost_array( + Utilities::CUDA::allocate_device_data(cpu_ghost_array.size()), + Utilities::CUDA::delete_device_data); + ArrayView ghost_array_view(ghost_array.get(), + cpu_ghost_array.size()); + Utilities::CUDA::copy_to_dev(cpu_ghost_array, ghost_array.get()); + + // set up other arrays + std::unique_ptr locally_owned_array( + Utilities::CUDA::allocate_device_data(local_size), + Utilities::CUDA::delete_device_data); + ArrayView locally_owned_array_view(locally_owned_array.get(), + local_size); + + std::unique_ptr temp_array( + Utilities::CUDA::allocate_device_data(v.n_import_indices()), + Utilities::CUDA::delete_device_data); + ArrayView temp_array_view(temp_array.get(), + v.n_import_indices()); + + std::vector requests; + + // send the full array + { + std::unique_ptr ghosts( + Utilities::CUDA::allocate_device_data( + ghost_array_view.size()), + Utilities::CUDA::delete_device_data); + ArrayView ghosts_view(ghosts.get(), ghost_array_view.size()); + const cudaError_t cuda_error = + cudaMemcpy(ghosts.get(), + ghost_array_view.data(), + ghost_array_view.size() * sizeof(unsigned int), + cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error); + + v.import_from_ghosted_array_start( + VectorOperation::add, 3, ghosts_view, temp_array_view, requests); + v.import_from_ghosted_array_finish( + VectorOperation::add, + temp_array_view, + locally_owned_array_view, + ghosts_view, + requests); + // check that the ghost entries are zeroed out in these calls + deallog << "v ghost entries (should be zero up to index " + << v.n_ghost_indices() - 1 << "):" << std::endl; + print_cuda_view(ghosts_view); + } + deallog << "From all ghosts: "; + print_cuda_view(locally_owned_array_view); + + // send only the array in w + cudaError_t cuda_error = + cudaMemset(locally_owned_array_view.data(), + 0, + locally_owned_array_view.size() * sizeof(unsigned int)); + AssertCuda(cuda_error); + Assert(temp_array_view.size() >= w.n_import_indices(), ExcInternalError()); + ArrayView temp_array_view_w(temp_array_view.data(), + w.n_import_indices()); + { + std::unique_ptr ghosts( + Utilities::CUDA::allocate_device_data( + ghost_array_view.size()), + Utilities::CUDA::delete_device_data); + ArrayView ghosts_view(ghosts.get(), ghost_array_view.size()); + const cudaError_t cuda_error = + cudaMemcpy(ghosts.get(), + ghost_array_view.data(), + ghost_array_view.size() * sizeof(unsigned int), + cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error); + + w.import_from_ghosted_array_start( + VectorOperation::add, 3, ghosts_view, temp_array_view_w, requests); + w.import_from_ghosted_array_finish( + VectorOperation::add, + temp_array_view_w, + locally_owned_array_view, + ghosts_view, + requests); + + // check that the ghost entries are zeroed out in these calls + deallog << "w ghost entries (should be zero up to index " + << w.n_ghost_indices() - 1 << "):" << std::endl; + print_cuda_view(ghosts_view); + } + deallog << "From reduced ghosts 1: "; + print_cuda_view(locally_owned_array_view); + + // send only the array in x + cuda_error = + cudaMemset(locally_owned_array_view.data(), + 0, + locally_owned_array_view.size() * sizeof(unsigned int)); + AssertCuda(cuda_error); + Assert(temp_array_view.size() >= x.n_import_indices(), ExcInternalError()); + ArrayView temp_array_view_x(temp_array_view.data(), + x.n_import_indices()); + { + std::unique_ptr ghosts( + Utilities::CUDA::allocate_device_data( + ghost_array_view.size()), + Utilities::CUDA::delete_device_data); + ArrayView ghosts_view(ghosts.get(), ghost_array_view.size()); + const cudaError_t cuda_error = + cudaMemcpy(ghosts.get(), + ghost_array_view.data(), + ghost_array_view.size() * sizeof(unsigned int), + cudaMemcpyDeviceToDevice); + AssertCuda(cuda_error); + + x.import_from_ghosted_array_start( + VectorOperation::add, 3, ghosts_view, temp_array_view_x, requests); + x.import_from_ghosted_array_finish( + VectorOperation::add, + temp_array_view_x, + locally_owned_array_view, + ghosts_view, + requests); + + // check that the ghost entries are zeroed out in these calls + deallog << "x ghost entries (should be zero up to index " + << x.n_ghost_indices() << "):" << std::endl; + print_cuda_view(ghosts_view); + } + deallog << "From reduced ghosts 2: "; + print_cuda_view(locally_owned_array_view); + + // now send a tight array from x and add into the existing entries + std::vector cpu_ghosts(x.n_ghost_indices(), 1); + std::unique_ptr ghosts( + Utilities::CUDA::allocate_device_data(cpu_ghosts.size()), + Utilities::CUDA::delete_device_data); + ArrayView ghosts_view(ghosts.get(), cpu_ghosts.size()); + Utilities::CUDA::copy_to_dev(cpu_ghosts, ghosts.get()); + + x.import_from_ghosted_array_start( + VectorOperation::add, 3, ghosts_view, temp_array_view_x, requests); + x.import_from_ghosted_array_finish( + VectorOperation::add, + temp_array_view_x, + locally_owned_array_view, + ghosts_view, + requests); + deallog << "From tight reduced ghosts 2: "; + print_cuda_view(locally_owned_array_view); +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi(argc, argv); + MPILogInitAll log; + init_cuda(true); + test(); +} diff --git a/tests/cuda/parallel_partitioner_07.with_cuda_aware_mpi=on.mpirun=4.output b/tests/cuda/parallel_partitioner_07.with_cuda_aware_mpi=on.mpirun=4.output new file mode 100644 index 0000000000..60cc237fa6 --- /dev/null +++ b/tests/cuda/parallel_partitioner_07.with_cuda_aware_mpi=on.mpirun=4.output @@ -0,0 +1,47 @@ + +DEAL:0::v ghost entries (should be zero up to index 12): +DEAL:0::0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:0::From all ghosts: 0 3 3 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 3 3 +DEAL:0::w ghost entries (should be zero up to index 5): +DEAL:0::0 0 0 0 0 0 1 1 1 0 0 0 0 +DEAL:0::From reduced ghosts 1: 0 3 3 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 3 3 +DEAL:0::x ghost entries (should be zero up to index 0): +DEAL:0::1 1 1 1 1 1 1 1 1 1 1 1 1 +DEAL:0::From reduced ghosts 2: 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:0::From tight reduced ghosts 2: 0 0 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2 + +DEAL:1::v ghost entries (should be zero up to index 22): +DEAL:1::0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:1::From all ghosts: 3 3 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:1::w ghost entries (should be zero up to index 9): +DEAL:1::0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 1 0 1 1 1 1 0 +DEAL:1::From reduced ghosts 1: 3 3 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 +DEAL:1::x ghost entries (should be zero up to index 11): +DEAL:1::0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:1::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:1::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2 + + +DEAL:2::v ghost entries (should be zero up to index 26): +DEAL:2::0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:2::From all ghosts: 1 3 3 1 3 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:2::w ghost entries (should be zero up to index 8): +DEAL:2::0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 +DEAL:2::From reduced ghosts 1: 0 3 3 0 3 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 +DEAL:2::x ghost entries (should be zero up to index 11): +DEAL:2::0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:2::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:2::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2 + + +DEAL:3::v ghost entries (should be zero up to index 19): +DEAL:3::0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:3::From all ghosts: 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:3::w ghost entries (should be zero up to index 10): +DEAL:3::0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 +DEAL:3::From reduced ghosts 1: 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:3::x ghost entries (should be zero up to index 11): +DEAL:3::0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:3::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:3::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + diff --git a/tests/cuda/parallel_partitioner_08.cu b/tests/cuda/parallel_partitioner_08.cu new file mode 100644 index 0000000000..65a0fbd4f2 --- /dev/null +++ b/tests/cuda/parallel_partitioner_08.cu @@ -0,0 +1,192 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// test MPI::Partitioner update_ghosts() and compress() in case we have +// empty owned DoFs + +#include +#include +#include +#include + +#include "../tests.h" + +template +void +print_cuda_view(const ArrayView cuda_view) +{ + std::vector cpu_values(cuda_view.size()); + Utilities::CUDA::copy_to_host(cuda_view.data(), cpu_values); + for (Number value : cpu_values) + deallog << value << " "; + deallog << std::endl; +} + +__global__ void +set_value(double *values_dev, unsigned int index, double val) +{ + values_dev[index] = val; +} + + +template +void +test() +{ + const unsigned int rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + + // setup index sets + // subset: is2 + // ghost: 8 9 10 11 is3 + // rank 0 : 00 01 02 03 04 05 06 07 00 00 00 00 + // rank 1 : 00 00 00 00 00 00 00 00 00 00 00 00 + // ghost: 0 1 2 3 is3 + // subset: 1 2 is2 + // + // expected result update ghosts() + // + // rank 0 : 00 01 02 03 04 05 06 07 00 00 00 00 + // rank 1 : 00 00 00 00 00 00 00 00 00 01 02 00 + // + // compress(insert) -- does not change anything but zero ghosts + // + // set rank1 ghosts to: 00 10 20 00 + // compress(add) + // + // rank 0 : 00 11 22 03 04 05 06 07 00 00 00 00 + // rank 1 : 00 00 00 00 00 00 00 00 00 10 20 00 + + + IndexSet is1(16), is2(16), is3(16); + + if (rank == 0) + { + is1.add_range(0, 8); + // note: empty is2 + is3.add_range(8, 12); + } + else if (rank == 1) + { + is1.add_range(8, 16); + is2.add_index(1); + is2.add_index(2); + is3.add_range(0, 4); + } + + // create partitioner + std::shared_ptr partitioner( + new Utilities::MPI::Partitioner(is1, MPI_COMM_WORLD)); + partitioner->set_ghost_indices(is3); + std::shared_ptr tight_partitioner( + new Utilities::MPI::Partitioner(is1, MPI_COMM_WORLD)); + tight_partitioner->set_ghost_indices(is2, is3); + + // create vector + std::vector cpu_owned(rank == 0 ? 8 : 0); + for (int i = 0; i < cpu_owned.size(); ++i) + cpu_owned[i] = i; + std::unique_ptr owned( + Utilities::CUDA::allocate_device_data(cpu_owned.size()), + Utilities::CUDA::delete_device_data); + ArrayView owned_view(owned.get(), cpu_owned.size()); + Utilities::CUDA::copy_to_dev(cpu_owned, owned.get()); + + std::vector cpu_ghost(4, 0); + std::unique_ptr ghost( + Utilities::CUDA::allocate_device_data(cpu_ghost.size()), + Utilities::CUDA::delete_device_data); + ArrayView ghost_view(ghost.get(), cpu_ghost.size()); + Utilities::CUDA::copy_to_dev(cpu_ghost, ghost.get()); + + // update ghost values + // vector of requests + std::vector requests; + std::vector compress_requests; + + // allocate temporal array + std::unique_ptr tmp_data( + Utilities::CUDA::allocate_device_data( + tight_partitioner->n_import_indices()), + Utilities::CUDA::delete_device_data); + ArrayView tmp_data_view(tmp_data.get(), + tight_partitioner->n_import_indices()); + + // begin exchange, and ... + tight_partitioner->export_to_ghosted_array_start( + 0, owned_view, tmp_data_view, ghost_view, requests); + + // ... finish exchange + tight_partitioner->export_to_ghosted_array_finish( + ghost_view, requests); + + auto print = [&]() { + deallog << "owned:" << std::endl; + print_cuda_view(owned_view); + deallog << "ghost:" << std::endl; + print_cuda_view(ghost_view); + }; + + deallog << "update ghosts()" << std::endl; + print(); + + std::unique_ptr import_data( + Utilities::CUDA::allocate_device_data( + tight_partitioner->n_import_indices()), + Utilities::CUDA::delete_device_data); + ArrayView import_data_view(tmp_data.get(), + tight_partitioner->n_import_indices()); + + // now do insert: + auto compress = [&](VectorOperation::values operation) { + const unsigned int counter = 0; + tight_partitioner + ->import_from_ghosted_array_start( + operation, counter, ghost_view, import_data_view, compress_requests); + + tight_partitioner + ->import_from_ghosted_array_finish( + operation, import_data_view, owned_view, ghost_view, compress_requests); + }; + + deallog << "compress(insert)" << std::endl; + compress(VectorOperation::insert); + print(); + + if (rank == 1) + { + set_value<<<1, 1>>>(ghost.get(), 1, 10); + set_value<<<1, 1>>>(ghost.get(), 2, 20); + } + + deallog << "compress(add)" << std::endl; + compress(VectorOperation::add); + print(); +} + +int +main(int argc, char **argv) +{ + using namespace dealii; + + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + MPILogInitAll log; + + init_cuda(true); + + test(); + + return 0; +} diff --git a/tests/cuda/parallel_partitioner_08.with_cuda_aware_mpi=on.mpirun=2.output b/tests/cuda/parallel_partitioner_08.with_cuda_aware_mpi=on.mpirun=2.output new file mode 100644 index 0000000000..d60bc99626 --- /dev/null +++ b/tests/cuda/parallel_partitioner_08.with_cuda_aware_mpi=on.mpirun=2.output @@ -0,0 +1,33 @@ + +DEAL:0::update ghosts() +DEAL:0::owned: +DEAL:0::0.00000 1.00000 2.00000 3.00000 4.00000 5.00000 6.00000 7.00000 +DEAL:0::ghost: +DEAL:0::0.00000 0.00000 0.00000 0.00000 +DEAL:0::compress(insert) +DEAL:0::owned: +DEAL:0::0.00000 1.00000 2.00000 3.00000 4.00000 5.00000 6.00000 7.00000 +DEAL:0::ghost: +DEAL:0::0.00000 0.00000 0.00000 0.00000 +DEAL:0::compress(add) +DEAL:0::owned: +DEAL:0::0.00000 11.0000 22.0000 3.00000 4.00000 5.00000 6.00000 7.00000 +DEAL:0::ghost: +DEAL:0::0.00000 0.00000 0.00000 0.00000 + +DEAL:1::update ghosts() +DEAL:1::owned: +DEAL:1:: +DEAL:1::ghost: +DEAL:1::0.00000 1.00000 2.00000 0.00000 +DEAL:1::compress(insert) +DEAL:1::owned: +DEAL:1:: +DEAL:1::ghost: +DEAL:1::0.00000 0.00000 0.00000 0.00000 +DEAL:1::compress(add) +DEAL:1::owned: +DEAL:1:: +DEAL:1::ghost: +DEAL:1::0.00000 0.00000 0.00000 0.00000 + diff --git a/tests/cuda/parallel_vector_22.cu b/tests/cuda/parallel_vector_22.cu new file mode 100644 index 0000000000..22cd2d1622 --- /dev/null +++ b/tests/cuda/parallel_vector_22.cu @@ -0,0 +1,196 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// check LA::Vector::compress(VectorOperation::min/max) from ghosts + +#include +#include + +#include + +#include +#include + +#include "../tests.h" + + +__global__ void +set_value(double *values_dev, unsigned int index, double val) +{ + values_dev[index] = val; +} + + +template +double +print_value(Number *values_dev, unsigned int index) +{ + static std::vector cpu_value(1); + Utilities::CUDA::copy_to_host(values_dev + index, cpu_value); + return cpu_value[0]; +} + + + +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + + if (myid == 0) + deallog << "numproc=" << numproc << std::endl; + + + // each processor owns 2 indices and all + // are ghosting element 1 (the second) + IndexSet local_owned(numproc * 2); + local_owned.add_range(myid * 2, myid * 2 + 2); + IndexSet local_relevant(numproc * 2); + local_relevant = local_owned; + local_relevant.add_range(1, 2); + + // create vector + LinearAlgebra::distributed::Vector v( + local_owned, local_relevant, MPI_COMM_WORLD); + const auto &partitioner = v.get_partitioner(); + + // set local values + set_value<<<1, 1>>>(v.get_values(), + partitioner->global_to_local(myid * 2), + myid * 2.0); + set_value<<<1, 1>>>(v.get_values(), + partitioner->global_to_local(myid * 2 + 1), + myid * 2.0 + 1.0); + v.compress(VectorOperation::add); + v *= 2.0; + + // check setup of vectors + deallog << myid << ":" + << "first owned entry: " + << print_value(v.get_values(), partitioner->global_to_local(myid * 2)) + << std::endl; + deallog << myid << ":" + << "second owned entry: " + << print_value(v.get_values(), + partitioner->global_to_local(myid * 2 + 1)) + << std::endl; + + // set ghost dof on owning processor and maximize + if (myid != 0) + set_value<<<1, 1>>>(v.get_values(), + partitioner->global_to_local(1), + 7. * myid); + v.compress(VectorOperation::max); + + // import ghosts onto all procs + v.update_ghost_values(); + + // check + deallog << myid << ":" + << "ghost entry after max from owner: " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + + // ghosts are set to zero + v.zero_out_ghosts(); + + // minimize + v.compress(VectorOperation::min); + v.update_ghost_values(); + + // check + deallog << myid << ":" + << "ghost entry after min from zero: " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + + // set ghost dof on non-owning processors and minimize + v.zero_out_ghosts(); + if (myid == 0) + set_value<<<1, 1>>>(v.get_values(), partitioner->global_to_local(1), -1.); + v.compress(VectorOperation::min); + v.update_ghost_values(); + + // check + deallog << myid << ":" + << "ghost entry after min from : " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + + // set vector to 1, zeros in ghosts except on owner where -1. is set + v.zero_out_ghosts(); + v = 1.0; + if (myid == 0) + set_value<<<1, 1>>>(v.get_values(), partitioner->global_to_local(1), -1.); + + // maximize + v.compress(VectorOperation::max); + v.update_ghost_values(); + + // even if only one value is set (-1. on owner), the other values + // contribute a "0" and maximization receives zero and returns it + deallog << myid << ":" + << "ghost entry after max and partly init: " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + + // however, if the ghost value is set on all processors, the + // maximum is -1: + v.zero_out_ghosts(); + v = 1.0; + set_value<<<1, 1>>>(v.get_values(), partitioner->global_to_local(1), -1.); + v.compress(VectorOperation::max); + v.update_ghost_values(); + deallog << myid << ":" + << "ghost entry after max and full init: " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + + // what happens in case max is called two times and all values were smaller + // than zero + v.zero_out_ghosts(); + v = -1.0; + set_value<<<1, 1>>>(v.get_values(), partitioner->global_to_local(1), -1.); + v.compress(VectorOperation::max); + deallog << myid << ":" + << "ghost entry after first max: " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + v.compress(VectorOperation::max); + deallog << myid << ":" + << "ghost entry after second max: " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + + if (myid == 0) + deallog << "OK" << std::endl; +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, testing_max_num_threads()); + + MPILogInitAll log; + + init_cuda(true); + + test(); +} diff --git a/tests/cuda/parallel_vector_22.mpirun=4.output b/tests/cuda/parallel_vector_22.mpirun=4.output new file mode 100644 index 0000000000..f1940df5e2 --- /dev/null +++ b/tests/cuda/parallel_vector_22.mpirun=4.output @@ -0,0 +1,45 @@ + +DEAL:0::numproc=4 +DEAL:0::0:first owned entry: 0.00000 +DEAL:0::0:second owned entry: 2.00000 +DEAL:0::0:ghost entry after max from owner: 21.0000 +DEAL:0::0:ghost entry after min from zero: 0.00000 +DEAL:0::0:ghost entry after min from : -1.00000 +DEAL:0::0:ghost entry after max and partly init: 0.00000 +DEAL:0::0:ghost entry after max and full init: -1.00000 +DEAL:0::0:ghost entry after first max: -1.00000 +DEAL:0::0:ghost entry after second max: 0.00000 +DEAL:0::OK + +DEAL:1::1:first owned entry: 4.00000 +DEAL:1::1:second owned entry: 6.00000 +DEAL:1::1:ghost entry after max from owner: 21.0000 +DEAL:1::1:ghost entry after min from zero: 0.00000 +DEAL:1::1:ghost entry after min from : -1.00000 +DEAL:1::1:ghost entry after max and partly init: 0.00000 +DEAL:1::1:ghost entry after max and full init: -1.00000 +DEAL:1::1:ghost entry after first max: 0.00000 +DEAL:1::1:ghost entry after second max: 0.00000 + + +DEAL:2::2:first owned entry: 8.00000 +DEAL:2::2:second owned entry: 10.0000 +DEAL:2::2:ghost entry after max from owner: 21.0000 +DEAL:2::2:ghost entry after min from zero: 0.00000 +DEAL:2::2:ghost entry after min from : -1.00000 +DEAL:2::2:ghost entry after max and partly init: 0.00000 +DEAL:2::2:ghost entry after max and full init: -1.00000 +DEAL:2::2:ghost entry after first max: 0.00000 +DEAL:2::2:ghost entry after second max: 0.00000 + + +DEAL:3::3:first owned entry: 12.0000 +DEAL:3::3:second owned entry: 14.0000 +DEAL:3::3:ghost entry after max from owner: 21.0000 +DEAL:3::3:ghost entry after min from zero: 0.00000 +DEAL:3::3:ghost entry after min from : -1.00000 +DEAL:3::3:ghost entry after max and partly init: 0.00000 +DEAL:3::3:ghost entry after max and full init: -1.00000 +DEAL:3::3:ghost entry after first max: 0.00000 +DEAL:3::3:ghost entry after second max: 0.00000 + diff --git a/tests/cuda/parallel_vector_23.cu b/tests/cuda/parallel_vector_23.cu new file mode 100644 index 0000000000..a9bb9c5d9c --- /dev/null +++ b/tests/cuda/parallel_vector_23.cu @@ -0,0 +1,116 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// check LA::Vector::compress(VectorOperation::min/max) from ghosts + +#include +#include + +#include +#include + +#include +#include + +#include "../tests.h" + + +__global__ void +set_value(double *values_dev, unsigned int index, double val) +{ + values_dev[index] = val; +} + + +template +double +print_value(Number *values_dev, unsigned int index) +{ + static std::vector cpu_value(1); + Utilities::CUDA::copy_to_host(values_dev + index, cpu_value); + return cpu_value[0]; +} + + + +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + + if (myid == 0) + deallog << "numproc=" << numproc << std::endl; + + + // each processor owns 2 indices and all + // are ghosting element 1 (the second) + IndexSet local_owned(numproc * 2); + local_owned.add_range(myid * 2, myid * 2 + 2); + IndexSet local_relevant(numproc * 2); + local_relevant = local_owned; + local_relevant.add_range(1, 2); + + // create vector + LinearAlgebra::distributed::Vector v( + local_owned, local_relevant, MPI_COMM_WORLD); + const auto &partitioner = v.get_partitioner(); + + // the read write vector additionally has ghost elements + IndexSet read_write_owned(numproc * 2); + LinearAlgebra::ReadWriteVector read_write_vector(local_relevant); + + read_write_vector.local_element(0) = myid; + read_write_vector(1) = 2. * myid; + + v.import(read_write_vector, VectorOperation::max); + v.update_ghost_values(); + + deallog << myid << ":" + << "ghost entry after max: " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + + if (myid == 0) + read_write_vector(1) = -1.0; + + v.import(read_write_vector, VectorOperation::min); + v.update_ghost_values(); + + deallog << myid << ":" + << "ghost entry after min: " + << print_value(v.get_values(), partitioner->global_to_local(1)) + << std::endl; + + + if (myid == 0) + deallog << "OK" << std::endl; +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, testing_max_num_threads()); + + MPILogInitAll log; + + init_cuda(true); + + test(); +} diff --git a/tests/cuda/parallel_vector_23.mpirun=4.output b/tests/cuda/parallel_vector_23.mpirun=4.output new file mode 100644 index 0000000000..d1ab3a1164 --- /dev/null +++ b/tests/cuda/parallel_vector_23.mpirun=4.output @@ -0,0 +1,17 @@ + +DEAL:0::numproc=4 +DEAL:0::0:ghost entry after max: 6.00000 +DEAL:0::0:ghost entry after min: -1.00000 +DEAL:0::OK + +DEAL:1::1:ghost entry after max: 6.00000 +DEAL:1::1:ghost entry after min: -1.00000 + + +DEAL:2::2:ghost entry after max: 6.00000 +DEAL:2::2:ghost entry after min: -1.00000 + + +DEAL:3::3:ghost entry after max: 6.00000 +DEAL:3::3:ghost entry after min: -1.00000 +