From: Daniel Arndt Date: Wed, 31 Oct 2018 18:42:46 +0000 (+0100) Subject: Use CUDA-aware MPI in Vector::compress* X-Git-Tag: v9.1.0-rc1~464^2~6 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9a4ad63454f925229aabc8edc3c1b4b3e9fae90b;p=dealii.git Use CUDA-aware MPI in Vector::compress* --- diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index d09de8cda3..d6d7295389 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -81,19 +81,19 @@ namespace LinearAlgebra // Resize the underlying array on the host or on the device - template + template struct la_parallel_vector_templates_functions { - static_assert( - std::is_same::value || - std::is_same::value, - "MemorySpace should be Host or CUDA"); + static_assert(std::is_same::value || + std::is_same::value, + "MemorySpace should be Host or CUDA"); static void - resize_val(const types::global_dof_index /*new_alloc_size*/, - types::global_dof_index & /*allocated_size*/, - ::dealii::MemorySpace::MemorySpaceData - & /*data*/) + resize_val( + const types::global_dof_index /*new_alloc_size*/, + types::global_dof_index & /*allocated_size*/, + ::dealii::MemorySpace::MemorySpaceData + & /*data*/) {} static void @@ -103,14 +103,14 @@ namespace LinearAlgebra const std::shared_ptr & /*communication_pattern*/, const IndexSet & /*locally_owned_elem*/, - ::dealii::MemorySpace::MemorySpaceData + ::dealii::MemorySpace::MemorySpaceData & /*data*/) {} template static void linfty_norm_local( - const ::dealii::MemorySpace::MemorySpaceData + const ::dealii::MemorySpace::MemorySpaceData & /*data*/, const unsigned int /*size*/, RealType & /*max*/) @@ -375,9 +375,9 @@ namespace LinearAlgebra } // namespace internal - template + template void - Vector::clear_mpi_requests() + Vector::clear_mpi_requests() { #ifdef DEAL_II_WITH_MPI for (size_type j = 0; j < compress_requests.size(); j++) @@ -397,12 +397,13 @@ namespace LinearAlgebra - template + template void - Vector::resize_val(const size_type new_alloc_size) + Vector::resize_val(const size_type new_alloc_size) { - internal::la_parallel_vector_templates_functions:: - resize_val(new_alloc_size, allocated_size, data); + internal::la_parallel_vector_templates_functions< + Number, + MemorySpaceType>::resize_val(new_alloc_size, allocated_size, data); thread_loop_partitioner = std::make_shared<::dealii::parallel::internal::TBBPartitioner>(); @@ -410,10 +411,10 @@ namespace LinearAlgebra - template + template void - Vector::reinit(const size_type size, - const bool omit_zeroing_entries) + Vector::reinit(const size_type size, + const bool omit_zeroing_entries) { clear_mpi_requests(); @@ -436,11 +437,12 @@ namespace LinearAlgebra - template + template template void - Vector::reinit(const Vector &v, - const bool omit_zeroing_entries) + Vector::reinit( + const Vector &v, + const bool omit_zeroing_entries) { clear_mpi_requests(); Assert(v.partitioner.get() != nullptr, ExcNotInitialized()); @@ -474,11 +476,12 @@ namespace LinearAlgebra - template + template void - Vector::reinit(const IndexSet &locally_owned_indices, - const IndexSet &ghost_indices, - const MPI_Comm communicator) + Vector::reinit( + const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const MPI_Comm communicator) { // set up parallel partitioner with index sets and communicator std::shared_ptr new_partitioner( @@ -490,10 +493,11 @@ namespace LinearAlgebra - template + template void - Vector::reinit(const IndexSet &locally_owned_indices, - const MPI_Comm communicator) + Vector::reinit( + const IndexSet &locally_owned_indices, + const MPI_Comm communicator) { // set up parallel partitioner with index sets and communicator std::shared_ptr new_partitioner( @@ -503,9 +507,9 @@ namespace LinearAlgebra - template + template void - Vector::reinit( + Vector::reinit( const std::shared_ptr &partitioner_in) { clear_mpi_requests(); @@ -532,8 +536,8 @@ namespace LinearAlgebra - template - Vector::Vector() + template + Vector::Vector() : partitioner(new Utilities::MPI::Partitioner()) , allocated_size(0) { @@ -542,8 +546,9 @@ namespace LinearAlgebra - template - Vector::Vector(const Vector &v) + template + Vector::Vector( + const Vector &v) : Subscriptor() , allocated_size(0) , vector_is_ghosted(false) @@ -556,17 +561,17 @@ namespace LinearAlgebra if (this_size > 0) { dealii::internal::VectorOperations:: - functions::copy( + functions::copy( thread_loop_partitioner, partitioner->local_size(), v.data, data); } } - template - Vector::Vector(const IndexSet &local_range, - const IndexSet &ghost_indices, - const MPI_Comm communicator) + template + Vector::Vector(const IndexSet &local_range, + const IndexSet &ghost_indices, + const MPI_Comm communicator) : allocated_size(0) , vector_is_ghosted(false) { @@ -575,9 +580,9 @@ namespace LinearAlgebra - template - Vector::Vector(const IndexSet &local_range, - const MPI_Comm communicator) + template + Vector::Vector(const IndexSet &local_range, + const MPI_Comm communicator) : allocated_size(0) , vector_is_ghosted(false) { @@ -586,8 +591,8 @@ namespace LinearAlgebra - template - Vector::Vector(const size_type size) + template + Vector::Vector(const size_type size) : allocated_size(0) , vector_is_ghosted(false) { @@ -596,8 +601,8 @@ namespace LinearAlgebra - template - Vector::Vector( + template + Vector::Vector( const std::shared_ptr &partitioner) : allocated_size(0) , vector_is_ghosted(false) @@ -607,8 +612,8 @@ namespace LinearAlgebra - template - inline Vector::~Vector() + template + inline Vector::~Vector() { try { @@ -620,9 +625,10 @@ namespace LinearAlgebra - template - inline Vector & - Vector::operator=(const Vector &c) + template + inline Vector & + Vector:: + operator=(const Vector &c) { #ifdef _MSC_VER return this->operator=(c); @@ -633,11 +639,11 @@ namespace LinearAlgebra - template + template template - inline Vector & - Vector:: - operator=(const Vector &c) + inline Vector & + Vector:: + operator=(const Vector &c) { Assert(c.partitioner.get() != nullptr, ExcNotInitialized()); @@ -689,7 +695,7 @@ namespace LinearAlgebra if (this_size > 0) { dealii::internal::VectorOperations:: - functions::copy( + functions::copy( thread_loop_partitioner, this_size, c.data, data); } @@ -702,17 +708,17 @@ namespace LinearAlgebra - template + template template void - Vector::copy_locally_owned_data_from( - const Vector &src) + Vector::copy_locally_owned_data_from( + const Vector &src) { AssertDimension(partitioner->local_size(), src.partitioner->local_size()); if (partitioner->local_size() > 0) { dealii::internal::VectorOperations:: - functions::copy( + functions::copy( thread_loop_partitioner, partitioner->local_size(), src.data, @@ -754,9 +760,9 @@ namespace LinearAlgebra } } // namespace petsc_helpers - template - Vector & - Vector:: + template + Vector & + Vector:: operator=(const PETScWrappers::MPI::Vector &petsc_vec) { // TODO: We would like to use the same compact infrastructure as for the @@ -797,9 +803,9 @@ namespace LinearAlgebra #ifdef DEAL_II_WITH_TRILINOS - template - Vector & - Vector:: + template + Vector & + Vector:: operator=(const TrilinosWrappers::MPI::Vector &trilinos_vec) { # ifdef DEAL_II_WITH_MPI @@ -822,9 +828,9 @@ namespace LinearAlgebra - template + template void - Vector::compress( + Vector::compress( ::dealii::VectorOperation::values operation) { compress_start(0, operation); @@ -833,9 +839,9 @@ namespace LinearAlgebra - template + template void - Vector::update_ghost_values() const + Vector::update_ghost_values() const { update_ghost_values_start(); update_ghost_values_finish(); @@ -843,9 +849,9 @@ namespace LinearAlgebra - template + template void - Vector::zero_out_ghosts() const + Vector::zero_out_ghosts() const { if (data.values != nullptr) std::fill_n(data.values.get() + partitioner->local_size(), @@ -867,9 +873,9 @@ namespace LinearAlgebra - template + template void - Vector::compress_start( + Vector::compress_start( const unsigned int counter, ::dealii::VectorOperation::values operation) { @@ -883,37 +889,58 @@ namespace LinearAlgebra std::lock_guard lock(mutex); // allocate import_data in case it is not set up yet - if (import_data.values == nullptr && partitioner->n_import_indices() > 0) + if (partitioner->n_import_indices() > 0) { - Number *new_val; - Utilities::System::posix_memalign((void **)&new_val, - 64, - sizeof(Number) * - partitioner->n_import_indices()); - import_data.values.reset(new_val); +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI) + Assert( + (std::is_same::value), + ExcMessage( + "Using MemorySpace::CUDA only allowed if the code is compiled with a CUDA compiler!")); + if (import_data.values_dev == nullptr) + import_data.values_dev.reset( + Utilities::CUDA::allocate_device_data( + partitioner->n_import_indices())); +# else +# ifdef DEAL_II_WITH_CUDA_AWARE_MPI + static_assert( + std::is_same::value, + "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); +# endif + if (import_data.values == nullptr) + { + Number *new_val; + Utilities::System::posix_memalign( + (void **)&new_val, + 64, + sizeof(Number) * partitioner->n_import_indices()); + import_data.values.reset(new_val); + } +# endif } -# ifdef DEAL_II_COMPILER_CUDA_AWARE - // TODO: for now move the data to the host and then move it back to the +# if defined DEAL_II_COMPILER_CUDA_AWARE && \ + !defined(DEAL_II_WITH_CUDA_AWARE_MPI) + // Move the data to the host and then move it back to the // the device. We use values to store the elements because the function // uses a view of the array and thus we need the data on the host to // outlive the scope of the function. - if (std::is_same::value) - { - Number *new_val; - Utilities::System::posix_memalign((void **)&new_val, - 64, - sizeof(Number) * allocated_size); - data.values.reset(new_val); - - cudaError_t cuda_error_code = - cudaMemcpy(data.values.get(), - data.values_dev.get(), - allocated_size * sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error_code); - } + Number *new_val; + Utilities::System::posix_memalign((void **)&new_val, + 64, + sizeof(Number) * allocated_size); + + data.values.reset(new_val); + + cudaError_t cuda_error_code = cudaMemcpy(data.values.get(), + data.values_dev.get(), + allocated_size * sizeof(Number), + cudaMemcpyDeviceToHost); + AssertCuda(cuda_error_code); # endif + +# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI)) partitioner->import_from_ghosted_array_start( operation, counter, @@ -922,14 +949,24 @@ namespace LinearAlgebra ArrayView(import_data.values.get(), partitioner->n_import_indices()), compress_requests); +# else + partitioner->import_from_ghosted_array_start( + operation, + counter, + ArrayView(data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + ArrayView(import_data.values_dev.get(), + partitioner->n_import_indices()), + compress_requests); +# endif #endif } - template + template void - Vector::compress_finish( + Vector::compress_finish( ::dealii::VectorOperation::values operation) { #ifdef DEAL_II_WITH_MPI @@ -941,11 +978,12 @@ namespace LinearAlgebra // make this function thread safe std::lock_guard lock(mutex); - +# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI)) Assert(partitioner->n_import_indices() == 0 || import_data.values != nullptr, ExcNotInitialized()); - partitioner->import_from_ghosted_array_finish( + partitioner->import_from_ghosted_array_finish( operation, ArrayView(import_data.values.get(), partitioner->n_import_indices()), @@ -953,11 +991,25 @@ namespace LinearAlgebra ArrayView(data.values.get() + partitioner->local_size(), partitioner->n_ghost_indices()), compress_requests); +# else + Assert(partitioner->n_import_indices() == 0 || + import_data.values_dev != nullptr, + ExcNotInitialized()); + partitioner->import_from_ghosted_array_finish( + operation, + ArrayView(import_data.values_dev.get(), + partitioner->n_import_indices()), + ArrayView(data.values_dev.get(), partitioner->local_size()), + ArrayView(data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + compress_requests); +# endif -# ifdef DEAL_II_COMPILER_CUDA_AWARE - // TODO For now, the communication is done on the host, so we need to +# if defined DEAL_II_COMPILER_CUDA_AWARE && \ + !defined DEAL_II_WITH_CUDA_AWARE_MPI + // The communication is done on the host, so we need to // move the data back to the device. - if (std::is_same::value) + if (std::is_same::value) { cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get(), @@ -976,9 +1028,9 @@ namespace LinearAlgebra - template + template void - Vector::update_ghost_values_start( + Vector::update_ghost_values_start( const unsigned int counter) const { #ifdef DEAL_II_WITH_MPI @@ -991,40 +1043,77 @@ namespace LinearAlgebra std::lock_guard lock(mutex); // allocate import_data in case it is not set up yet - if (import_data == nullptr && partitioner->n_import_indices() > 0) - import_data = - std_cxx14::make_unique(partitioner->n_import_indices()); + if (partitioner->n_import_indices() > 0) + { +# if defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI) + Assert( + (std::is_same::value), + ExcMessage( + "Using MemorySpace::CUDA only allowed if the code is compiled with a CUDA compiler!")); + if (import_data.values_dev == nullptr) + import_data.values_dev.reset( + Utilities::CUDA::allocate_device_data( + partitioner->n_import_indices())); +# else +# ifdef DEAL_II_WITH_CUDA_AWARE_MPI + static_assert( + std::is_same::value, + "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!"); +# endif + if (import_data.values == nullptr) + { + Number *new_val; + Utilities::System::posix_memalign( + (void **)&new_val, + 64, + sizeof(Number) * partitioner->n_import_indices()); + import_data.values.reset(new_val); + } +# endif + } -# ifdef DEAL_II_COMPILER_CUDA_AWARE - // TODO: for now move the data to the host and then move it back to the +# if defined DEAL_II_COMPILER_CUDA_AWARE && \ + !defined(DEAL_II_WITH_CUDA_AWARE_MPI) + // Move the data to the host and then move it back to the // the device. We use values to store the elements because the function // uses a view of the array and thus we need the data on the host to // outlive the scope of the function. - if (std::is_same::value) - { - Number *new_val; - Utilities::System::posix_memalign((void **)&new_val, - 64, - sizeof(Number) * allocated_size); - - data.values.reset(new_val); - - cudaError_t cuda_error_code = - cudaMemcpy(data.values.get(), - data.values_dev.get(), - allocated_size * sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error_code); - } + Number *new_val; + Utilities::System::posix_memalign((void **)&new_val, + 64, + sizeof(Number) * allocated_size); + + data.values.reset(new_val); + + cudaError_t cuda_error_code = cudaMemcpy(data.values.get(), + data.values_dev.get(), + allocated_size * sizeof(Number), + cudaMemcpyDeviceToHost); + AssertCuda(cuda_error_code); # endif - partitioner->export_to_ghosted_array_start( +# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI)) + partitioner->export_to_ghosted_array_start( counter, ArrayView(data.values.get(), partitioner->local_size()), - ArrayView(import_data.get(), partitioner->n_import_indices()), + ArrayView(import_data.values.get(), + partitioner->n_import_indices()), ArrayView(data.values.get() + partitioner->local_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); +# else + partitioner->export_to_ghosted_array_start( + counter, + ArrayView(data.values_dev.get(), + partitioner->local_size()), + ArrayView(import_data.values_dev.get(), + partitioner->n_import_indices()), + ArrayView(data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + update_ghost_values_requests); +# endif #else (void)counter; @@ -1033,9 +1122,9 @@ namespace LinearAlgebra - template + template void - Vector::update_ghost_values_finish() const + Vector::update_ghost_values_finish() const { #ifdef DEAL_II_WITH_MPI // wait for both sends and receives to complete, even though only @@ -1048,15 +1137,25 @@ namespace LinearAlgebra // make this function thread safe std::lock_guard lock(mutex); +# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \ + defined(DEAL_II_WITH_CUDA_AWARE_MPI)) partitioner->export_to_ghosted_array_finish( ArrayView(data.values.get() + partitioner->local_size(), partitioner->n_ghost_indices()), update_ghost_values_requests); +# else + partitioner->export_to_ghosted_array_finish( + ArrayView(data.values_dev.get() + partitioner->local_size(), + partitioner->n_ghost_indices()), + update_ghost_values_requests); +# endif } -# ifdef DEAL_II_COMPILER_CUDA_AWARE - // TODO For now, the communication is done on the host, so we need to + +# if defined DEAL_II_COMPILER_CUDA_AWARE && \ + !defined DEAL_II_WITH_CUDA_AWARE_MPI + // The communication is done on the host, so we need to // move the data back to the device. - if (std::is_same::value) + if (std::is_same::value) { cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.get() + partitioner->local_size(), @@ -1068,15 +1167,16 @@ namespace LinearAlgebra data.values.reset(); } # endif + #endif vector_is_ghosted = true; } - template + template void - Vector::import( + Vector::import( const ReadWriteVector & V, VectorOperation::values operation, std::shared_ptr communication_pattern) @@ -1103,7 +1203,7 @@ namespace LinearAlgebra ExcMessage("The communication pattern is not of type " "Utilities::MPI::Partitioner.")); } - Vector tmp_vector(comm_pattern); + Vector tmp_vector(comm_pattern); data.copy_to(tmp_vector.begin(), local_size()); @@ -1157,9 +1257,9 @@ namespace LinearAlgebra data.copy_from(tmp_vector.begin(), local_size()); } - template + template void - Vector::swap(Vector &v) + Vector::swap(Vector &v) { #ifdef DEAL_II_WITH_MPI @@ -1209,18 +1309,16 @@ namespace LinearAlgebra - template - Vector & - Vector::operator=(const Number s) + template + Vector & + Vector::operator=(const Number s) { const size_type this_size = local_size(); if (this_size > 0) { dealii::internal::VectorOperations:: - functions::set(thread_loop_partitioner, - this_size, - s, - data); + functions::set( + thread_loop_partitioner, this_size, s, data); } // if we call Vector::operator=0, we want to zero out all the entries @@ -1233,13 +1331,13 @@ namespace LinearAlgebra - template + template void - Vector::reinit(const VectorSpaceVector &V, - const bool omit_zeroing_entries) + Vector::reinit(const VectorSpaceVector &V, + const bool omit_zeroing_entries) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert(dynamic_cast(&V) != nullptr, ExcVectorTypeNotCompatible()); const VectorType &down_V = dynamic_cast(V); @@ -1249,12 +1347,13 @@ namespace LinearAlgebra - template - Vector & - Vector::operator+=(const VectorSpaceVector &vv) + template + Vector & + Vector:: + operator+=(const VectorSpaceVector &vv) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert(dynamic_cast(&vv) != nullptr, ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1262,7 +1361,7 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); dealii::internal::VectorOperations:: - functions::add_vector( + functions::add_vector( thread_loop_partitioner, partitioner->local_size(), v.data, data); if (vector_is_ghosted) @@ -1273,12 +1372,13 @@ namespace LinearAlgebra - template - Vector & - Vector::operator-=(const VectorSpaceVector &vv) + template + Vector & + Vector:: + operator-=(const VectorSpaceVector &vv) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert(dynamic_cast(&vv) != nullptr, ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1286,7 +1386,7 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); dealii::internal::VectorOperations:: - functions::subtract_vector( + functions::subtract_vector( thread_loop_partitioner, partitioner->local_size(), v.data, data); if (vector_is_ghosted) @@ -1297,14 +1397,14 @@ namespace LinearAlgebra - template + template void - Vector::add(const Number a) + Vector::add(const Number a) { AssertIsFinite(a); dealii::internal::VectorOperations:: - functions::add_factor( + functions::add_factor( thread_loop_partitioner, partitioner->local_size(), a, data); if (vector_is_ghosted) @@ -1313,13 +1413,14 @@ namespace LinearAlgebra - template + template void - Vector::add_local(const Number a, - const VectorSpaceVector &vv) + Vector::add_local( + const Number a, + const VectorSpaceVector &vv) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert(dynamic_cast(&vv) != nullptr, ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1332,16 +1433,16 @@ namespace LinearAlgebra return; dealii::internal::VectorOperations:: - functions::add_av( + functions::add_av( thread_loop_partitioner, partitioner->local_size(), a, v.data, data); } - template + template void - Vector::add(const Number a, - const VectorSpaceVector &vv) + Vector::add(const Number a, + const VectorSpaceVector &vv) { add_local(a, vv); @@ -1351,15 +1452,15 @@ namespace LinearAlgebra - template + template void - Vector::add(const Number a, - const VectorSpaceVector &vv, - const Number b, - const VectorSpaceVector &ww) + Vector::add(const Number a, + const VectorSpaceVector &vv, + const Number b, + const VectorSpaceVector &ww) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert(dynamic_cast(&vv) != nullptr, ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1374,7 +1475,7 @@ namespace LinearAlgebra AssertDimension(local_size(), w.local_size()); dealii::internal::VectorOperations:: - functions::add_avpbw( + functions::add_avpbw( thread_loop_partitioner, partitioner->local_size(), a, @@ -1389,10 +1490,10 @@ namespace LinearAlgebra - template + template void - Vector::add(const std::vector &indices, - const std::vector & values) + Vector::add(const std::vector &indices, + const std::vector & values) { for (std::size_t i = 0; i < indices.size(); ++i) { @@ -1402,16 +1503,17 @@ namespace LinearAlgebra - template + template void - Vector::sadd(const Number x, - const Vector &v) + Vector::sadd( + const Number x, + const Vector &v) { AssertIsFinite(x); AssertDimension(local_size(), v.local_size()); dealii::internal::VectorOperations:: - functions::sadd_xv( + functions::sadd_xv( thread_loop_partitioner, partitioner->local_size(), x, v.data, data); if (vector_is_ghosted) @@ -1420,14 +1522,15 @@ namespace LinearAlgebra - template + template void - Vector::sadd_local(const Number x, - const Number a, - const VectorSpaceVector &vv) + Vector::sadd_local( + const Number x, + const Number a, + const VectorSpaceVector &vv) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert((dynamic_cast(&vv) != nullptr), ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1437,7 +1540,7 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); dealii::internal::VectorOperations:: - functions::sadd_xav( + functions::sadd_xav( thread_loop_partitioner, partitioner->local_size(), x, @@ -1448,11 +1551,11 @@ namespace LinearAlgebra - template + template void - Vector::sadd(const Number x, - const Number a, - const VectorSpaceVector &vv) + Vector::sadd(const Number x, + const Number a, + const VectorSpaceVector &vv) { sadd_local(x, a, vv); @@ -1462,13 +1565,14 @@ namespace LinearAlgebra - template + template void - Vector::sadd(const Number x, - const Number a, - const Vector &v, - const Number b, - const Vector &w) + Vector::sadd( + const Number x, + const Number a, + const Vector &v, + const Number b, + const Vector &w) { AssertIsFinite(x); AssertIsFinite(a); @@ -1478,7 +1582,7 @@ namespace LinearAlgebra AssertDimension(local_size(), w.local_size()); dealii::internal::VectorOperations:: - functions::sadd_xavbw( + functions::sadd_xavbw( thread_loop_partitioner, partitioner->local_size(), x, @@ -1494,14 +1598,14 @@ namespace LinearAlgebra - template - Vector & - Vector::operator*=(const Number factor) + template + Vector & + Vector::operator*=(const Number factor) { AssertIsFinite(factor); dealii::internal::VectorOperations:: - functions::multiply_factor( + functions::multiply_factor( thread_loop_partitioner, partitioner->local_size(), factor, data); if (vector_is_ghosted) @@ -1512,9 +1616,9 @@ namespace LinearAlgebra - template - Vector & - Vector::operator/=(const Number factor) + template + Vector & + Vector::operator/=(const Number factor) { operator*=(static_cast(1.) / factor); return *this; @@ -1522,12 +1626,12 @@ namespace LinearAlgebra - template + template void - Vector::scale(const VectorSpaceVector &vv) + Vector::scale(const VectorSpaceVector &vv) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert(dynamic_cast(&vv) != nullptr, ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1535,10 +1639,8 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); dealii::internal::VectorOperations:: - functions::scale(thread_loop_partitioner, - local_size(), - v.data, - data); + functions::scale( + thread_loop_partitioner, local_size(), v.data, data); if (vector_is_ghosted) update_ghost_values(); @@ -1546,13 +1648,13 @@ namespace LinearAlgebra - template + template void - Vector::equ(const Number a, - const VectorSpaceVector &vv) + Vector::equ(const Number a, + const VectorSpaceVector &vv) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert(dynamic_cast(&vv) != nullptr, ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1561,7 +1663,7 @@ namespace LinearAlgebra AssertDimension(local_size(), v.local_size()); dealii::internal::VectorOperations:: - functions::equ_au( + functions::equ_au( thread_loop_partitioner, partitioner->local_size(), a, v.data, data); @@ -1571,12 +1673,13 @@ namespace LinearAlgebra - template + template void - Vector::equ(const Number a, - const Vector &v, - const Number b, - const Vector &w) + Vector::equ( + const Number a, + const Vector &v, + const Number b, + const Vector &w) { AssertIsFinite(a); AssertIsFinite(b); @@ -1585,7 +1688,7 @@ namespace LinearAlgebra AssertDimension(local_size(), w.local_size()); dealii::internal::VectorOperations:: - functions::equ_aubv( + functions::equ_aubv( thread_loop_partitioner, partitioner->local_size(), a, @@ -1600,20 +1703,20 @@ namespace LinearAlgebra - template + template bool - Vector::all_zero() const + Vector::all_zero() const { return (linfty_norm() == 0) ? true : false; } - template + template template Number - Vector::inner_product_local( - const Vector &v) const + Vector::inner_product_local( + const Vector &v) const { if (PointerComparison::equal(this, &v)) return norm_sqr_local(); @@ -1621,20 +1724,18 @@ namespace LinearAlgebra AssertDimension(partitioner->local_size(), v.partitioner->local_size()); return dealii::internal::VectorOperations:: - functions::dot(thread_loop_partitioner, - partitioner->local_size(), - v.data, - data); + functions::dot( + thread_loop_partitioner, partitioner->local_size(), v.data, data); } - template - Number Vector:: + template + Number Vector:: operator*(const VectorSpaceVector &vv) const { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert((dynamic_cast(&vv) != nullptr), ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1649,15 +1750,15 @@ namespace LinearAlgebra - template - typename Vector::real_type - Vector::norm_sqr_local() const + template + typename Vector::real_type + Vector::norm_sqr_local() const { real_type sum; dealii::internal::VectorOperations:: - functions::norm_2( + functions::norm_2( thread_loop_partitioner, partitioner->local_size(), sum, data); AssertIsFinite(sum); @@ -1667,9 +1768,9 @@ namespace LinearAlgebra - template + template Number - Vector::mean_value_local() const + Vector::mean_value_local() const { Assert(size() != 0, ExcEmptyObject()); @@ -1677,7 +1778,7 @@ namespace LinearAlgebra return Number(); Number sum = ::dealii::internal::VectorOperations:: - functions::mean_value( + functions::mean_value( thread_loop_partitioner, partitioner->local_size(), data); return sum / real_type(partitioner->local_size()); @@ -1685,9 +1786,9 @@ namespace LinearAlgebra - template + template Number - Vector::mean_value() const + Vector::mean_value() const { Number local_result = mean_value_local(); if (partitioner->n_mpi_processes() > 1) @@ -1701,14 +1802,14 @@ namespace LinearAlgebra - template - typename Vector::real_type - Vector::l1_norm_local() const + template + typename Vector::real_type + Vector::l1_norm_local() const { real_type sum; dealii::internal::VectorOperations:: - functions::norm_1( + functions::norm_1( thread_loop_partitioner, partitioner->local_size(), sum, data); return sum; @@ -1716,9 +1817,9 @@ namespace LinearAlgebra - template - typename Vector::real_type - Vector::l1_norm() const + template + typename Vector::real_type + Vector::l1_norm() const { real_type local_result = l1_norm_local(); if (partitioner->n_mpi_processes() > 1) @@ -1730,9 +1831,9 @@ namespace LinearAlgebra - template - typename Vector::real_type - Vector::norm_sqr() const + template + typename Vector::real_type + Vector::norm_sqr() const { real_type local_result = norm_sqr_local(); if (partitioner->n_mpi_processes() > 1) @@ -1744,23 +1845,23 @@ namespace LinearAlgebra - template - typename Vector::real_type - Vector::l2_norm() const + template + typename Vector::real_type + Vector::l2_norm() const { return std::sqrt(norm_sqr()); } - template - typename Vector::real_type - Vector::lp_norm_local(const real_type p) const + template + typename Vector::real_type + Vector::lp_norm_local(const real_type p) const { real_type sum = 0.; dealii::internal::VectorOperations:: - functions::norm_p( + functions::norm_p( thread_loop_partitioner, partitioner->local_size(), sum, p, data); return std::pow(sum, 1. / p); @@ -1768,9 +1869,9 @@ namespace LinearAlgebra - template - typename Vector::real_type - Vector::lp_norm(const real_type p) const + template + typename Vector::real_type + Vector::lp_norm(const real_type p) const { const real_type local_result = lp_norm_local(p); if (partitioner->n_mpi_processes() > 1) @@ -1784,24 +1885,25 @@ namespace LinearAlgebra - template - typename Vector::real_type - Vector::linfty_norm_local() const + template + typename Vector::real_type + Vector::linfty_norm_local() const { real_type max = 0.; const size_type local_size = partitioner->local_size(); - internal::la_parallel_vector_templates_functions:: - linfty_norm_local(data, local_size, max); + internal::la_parallel_vector_templates_functions< + Number, + MemorySpaceType>::linfty_norm_local(data, local_size, max); return max; } - template - inline typename Vector::real_type - Vector::linfty_norm() const + template + inline typename Vector::real_type + Vector::linfty_norm() const { const real_type local_result = linfty_norm_local(); if (partitioner->n_mpi_processes() > 1) @@ -1813,19 +1915,19 @@ namespace LinearAlgebra - template + template Number - Vector::add_and_dot_local( - const Number a, - const Vector &v, - const Vector &w) + Vector::add_and_dot_local( + const Number a, + const Vector &v, + const Vector &w) { const size_type vec_size = partitioner->local_size(); AssertDimension(vec_size, v.local_size()); AssertDimension(vec_size, w.local_size()); Number sum = dealii::internal::VectorOperations:: - functions::add_and_dot( + functions::add_and_dot( thread_loop_partitioner, vec_size, a, v.data, w.data, data); AssertIsFinite(sum); @@ -1835,15 +1937,15 @@ namespace LinearAlgebra - template + template Number - Vector::add_and_dot( + Vector::add_and_dot( const Number a, const VectorSpaceVector &vv, const VectorSpaceVector &ww) { // Downcast. Throws an exception if invalid. - using VectorType = Vector; + using VectorType = Vector; Assert((dynamic_cast(&vv) != nullptr), ExcVectorTypeNotCompatible()); const VectorType &v = dynamic_cast(vv); @@ -1861,9 +1963,9 @@ namespace LinearAlgebra - template + template inline bool - Vector::partitioners_are_compatible( + Vector::partitioners_are_compatible( const Utilities::MPI::Partitioner &part) const { return partitioner->is_compatible(part); @@ -1871,9 +1973,9 @@ namespace LinearAlgebra - template + template inline bool - Vector::partitioners_are_globally_compatible( + Vector::partitioners_are_globally_compatible( const Utilities::MPI::Partitioner &part) const { return partitioner->is_globally_compatible(part); @@ -1881,9 +1983,9 @@ namespace LinearAlgebra - template + template std::size_t - Vector::memory_consumption() const + Vector::memory_consumption() const { std::size_t memory = sizeof(*this); memory += sizeof(Number) * static_cast(allocated_size); @@ -1894,7 +1996,7 @@ namespace LinearAlgebra if (partitioner.use_count() > 0) memory += partitioner->memory_consumption() / partitioner.use_count() + 1; - if (import_data != nullptr) + if (import_data.values != nullptr || import_data.values_dev != nullptr) memory += (static_cast(partitioner->n_import_indices()) * sizeof(Number)); return memory; @@ -1902,12 +2004,12 @@ namespace LinearAlgebra - template + template void - Vector::print(std::ostream & out, - const unsigned int precision, - const bool scientific, - const bool across) const + Vector::print(std::ostream & out, + const unsigned int precision, + const bool scientific, + const bool across) const { Assert(partitioner.get() != nullptr, ExcInternalError()); AssertThrow(out, ExcIO()); @@ -1987,8 +2089,8 @@ namespace LinearAlgebra out.precision(old_precision); } - } // namespace distributed -} // namespace LinearAlgebra + } // end of namespace distributed +} // end of namespace LinearAlgebra DEAL_II_NAMESPACE_CLOSE diff --git a/tests/cuda/parallel_vector_16.cu b/tests/cuda/parallel_vector_16.cu new file mode 100644 index 0000000000..d310e0e134 --- /dev/null +++ b/tests/cuda/parallel_vector_16.cu @@ -0,0 +1,118 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2011 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// build a vector whose elements exceed the size of unsigned int in case of 64 +// bit indices. To avoid excessive memory consumption, let the vector start at +// a number close to the maximum of unsigned int but extend past the last +// index + +#include +#include + +#include + +#include +#include + +#include "../tests.h" + + +__global__ void +set_value(double *values_dev, unsigned int index, double val) +{ + values_dev[index] = val; +} + +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + + if (myid == 0) + deallog << "numproc=" << numproc << std::endl; + + types::global_dof_index min_index = 0xffffffffU - 39; + types::global_dof_index local_size = 42; + IndexSet local_owned(min_index + numproc * local_size); + local_owned.add_range(min_index + myid * local_size, + min_index + (myid + 1) * local_size); + + // all processors ghost some entries around invalid_unsigned_int and on the + // border between two processors + IndexSet local_relevant(local_owned.size()); + local_relevant = local_owned; + local_relevant.add_range(min_index + 38, min_index + 40); + local_relevant.add_range(min_index + 41, min_index + 43); + + LinearAlgebra::distributed::Vector v( + local_owned, local_relevant, MPI_COMM_WORLD); + + deallog << "Local range of proc 0: " << v.local_range().first << " " + << v.local_range().second << std::endl; + + // set local values + for (types::global_dof_index i = 0; i < local_size; ++i) + { + double *values_dev = v.get_values(); + set_value<<<1, 1>>>(values_dev, i, min_index + myid * local_size + i); + } + + deallog << "vector norm: " << v.l2_norm() << std::endl; + + // check ghost values + v.print(deallog.get_file_stream(), 12, false, false); + v.update_ghost_values(); + v.print(deallog.get_file_stream(), 12, false, false); + + v.zero_out_ghosts(); + double * values_dev = v.get_values(); + const auto &partitioner = v.get_partitioner(); + set_value<<<1, 1>>>(values_dev, + partitioner->global_to_local(min_index + 38), + min_index); + set_value<<<1, 1>>>(values_dev, + partitioner->global_to_local(min_index + 39), + min_index * 2); + set_value<<<1, 1>>>(values_dev, + partitioner->global_to_local(min_index + 41), + min_index + 7); + set_value<<<1, 1>>>(values_dev, + partitioner->global_to_local(min_index + 42), + -static_cast(min_index)); + v.compress(VectorOperation::add); + v.update_ghost_values(); + v.print(deallog.get_file_stream(), 12, false, false); + + deallog << "OK" << std::endl; +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, testing_max_num_threads()); + + MPILogInitAll mpilog; + + init_cuda(true); + + deallog << std::setprecision(12); + + test(); +} diff --git a/tests/cuda/parallel_vector_16.with_64bit_indices=on.mpirun=4.output b/tests/cuda/parallel_vector_16.with_64bit_indices=on.mpirun=4.output new file mode 100644 index 0000000000..0bf5c6db2b --- /dev/null +++ b/tests/cuda/parallel_vector_16.with_64bit_indices=on.mpirun=4.output @@ -0,0 +1,612 @@ + +DEAL:0::numproc=4 +DEAL:0::Local range of proc 0: 4294967256 4294967298 +DEAL:0::vector norm: 55669139270.9 +Process #0 +Local range: [4294967256, 4294967298), global size: 4294967424 +Vector data: +4294967256.000000000000 +4294967257.000000000000 +4294967258.000000000000 +4294967259.000000000000 +4294967260.000000000000 +4294967261.000000000000 +4294967262.000000000000 +4294967263.000000000000 +4294967264.000000000000 +4294967265.000000000000 +4294967266.000000000000 +4294967267.000000000000 +4294967268.000000000000 +4294967269.000000000000 +4294967270.000000000000 +4294967271.000000000000 +4294967272.000000000000 +4294967273.000000000000 +4294967274.000000000000 +4294967275.000000000000 +4294967276.000000000000 +4294967277.000000000000 +4294967278.000000000000 +4294967279.000000000000 +4294967280.000000000000 +4294967281.000000000000 +4294967282.000000000000 +4294967283.000000000000 +4294967284.000000000000 +4294967285.000000000000 +4294967286.000000000000 +4294967287.000000000000 +4294967288.000000000000 +4294967289.000000000000 +4294967290.000000000000 +4294967291.000000000000 +4294967292.000000000000 +4294967293.000000000000 +4294967294.000000000000 +4294967295.000000000000 +4294967296.000000000000 +4294967297.000000000000 + +Process #0 +Local range: [4294967256, 4294967298), global size: 4294967424 +Vector data: +4294967256.000000000000 +4294967257.000000000000 +4294967258.000000000000 +4294967259.000000000000 +4294967260.000000000000 +4294967261.000000000000 +4294967262.000000000000 +4294967263.000000000000 +4294967264.000000000000 +4294967265.000000000000 +4294967266.000000000000 +4294967267.000000000000 +4294967268.000000000000 +4294967269.000000000000 +4294967270.000000000000 +4294967271.000000000000 +4294967272.000000000000 +4294967273.000000000000 +4294967274.000000000000 +4294967275.000000000000 +4294967276.000000000000 +4294967277.000000000000 +4294967278.000000000000 +4294967279.000000000000 +4294967280.000000000000 +4294967281.000000000000 +4294967282.000000000000 +4294967283.000000000000 +4294967284.000000000000 +4294967285.000000000000 +4294967286.000000000000 +4294967287.000000000000 +4294967288.000000000000 +4294967289.000000000000 +4294967290.000000000000 +4294967291.000000000000 +4294967292.000000000000 +4294967293.000000000000 +4294967294.000000000000 +4294967295.000000000000 +4294967296.000000000000 +4294967297.000000000000 + +Ghost entries (global index / value): +(4294967298/4294967298.000000000000) + +Process #0 +Local range: [4294967256, 4294967298), global size: 4294967424 +Vector data: +4294967256.000000000000 +4294967257.000000000000 +4294967258.000000000000 +4294967259.000000000000 +4294967260.000000000000 +4294967261.000000000000 +4294967262.000000000000 +4294967263.000000000000 +4294967264.000000000000 +4294967265.000000000000 +4294967266.000000000000 +4294967267.000000000000 +4294967268.000000000000 +4294967269.000000000000 +4294967270.000000000000 +4294967271.000000000000 +4294967272.000000000000 +4294967273.000000000000 +4294967274.000000000000 +4294967275.000000000000 +4294967276.000000000000 +4294967277.000000000000 +4294967278.000000000000 +4294967279.000000000000 +4294967280.000000000000 +4294967281.000000000000 +4294967282.000000000000 +4294967283.000000000000 +4294967284.000000000000 +4294967285.000000000000 +4294967286.000000000000 +4294967287.000000000000 +4294967288.000000000000 +4294967289.000000000000 +4294967290.000000000000 +4294967291.000000000000 +4294967292.000000000000 +4294967293.000000000000 +17179869024.000000000000 +34359738048.000000000000 +4294967296.000000000000 +17179869052.000000000000 + +Ghost entries (global index / value): +(4294967298/-17179869024.000000000000) + +DEAL:0::OK + +DEAL:1::Local range of proc 0: 4294967298 4294967340 +DEAL:1::vector norm: 55669139270.9 +Process #1 +Local range: [4294967298, 4294967340), global size: 4294967424 +Vector data: +4294967298.000000000000 +4294967299.000000000000 +4294967300.000000000000 +4294967301.000000000000 +4294967302.000000000000 +4294967303.000000000000 +4294967304.000000000000 +4294967305.000000000000 +4294967306.000000000000 +4294967307.000000000000 +4294967308.000000000000 +4294967309.000000000000 +4294967310.000000000000 +4294967311.000000000000 +4294967312.000000000000 +4294967313.000000000000 +4294967314.000000000000 +4294967315.000000000000 +4294967316.000000000000 +4294967317.000000000000 +4294967318.000000000000 +4294967319.000000000000 +4294967320.000000000000 +4294967321.000000000000 +4294967322.000000000000 +4294967323.000000000000 +4294967324.000000000000 +4294967325.000000000000 +4294967326.000000000000 +4294967327.000000000000 +4294967328.000000000000 +4294967329.000000000000 +4294967330.000000000000 +4294967331.000000000000 +4294967332.000000000000 +4294967333.000000000000 +4294967334.000000000000 +4294967335.000000000000 +4294967336.000000000000 +4294967337.000000000000 +4294967338.000000000000 +4294967339.000000000000 + +Process #1 +Local range: [4294967298, 4294967340), global size: 4294967424 +Vector data: +4294967298.000000000000 +4294967299.000000000000 +4294967300.000000000000 +4294967301.000000000000 +4294967302.000000000000 +4294967303.000000000000 +4294967304.000000000000 +4294967305.000000000000 +4294967306.000000000000 +4294967307.000000000000 +4294967308.000000000000 +4294967309.000000000000 +4294967310.000000000000 +4294967311.000000000000 +4294967312.000000000000 +4294967313.000000000000 +4294967314.000000000000 +4294967315.000000000000 +4294967316.000000000000 +4294967317.000000000000 +4294967318.000000000000 +4294967319.000000000000 +4294967320.000000000000 +4294967321.000000000000 +4294967322.000000000000 +4294967323.000000000000 +4294967324.000000000000 +4294967325.000000000000 +4294967326.000000000000 +4294967327.000000000000 +4294967328.000000000000 +4294967329.000000000000 +4294967330.000000000000 +4294967331.000000000000 +4294967332.000000000000 +4294967333.000000000000 +4294967334.000000000000 +4294967335.000000000000 +4294967336.000000000000 +4294967337.000000000000 +4294967338.000000000000 +4294967339.000000000000 + +Ghost entries (global index / value): +(4294967294/4294967294.000000000000) +(4294967295/4294967295.000000000000) +(4294967297/4294967297.000000000000) + +Process #1 +Local range: [4294967298, 4294967340), global size: 4294967424 +Vector data: +-17179869024.000000000000 +4294967299.000000000000 +4294967300.000000000000 +4294967301.000000000000 +4294967302.000000000000 +4294967303.000000000000 +4294967304.000000000000 +4294967305.000000000000 +4294967306.000000000000 +4294967307.000000000000 +4294967308.000000000000 +4294967309.000000000000 +4294967310.000000000000 +4294967311.000000000000 +4294967312.000000000000 +4294967313.000000000000 +4294967314.000000000000 +4294967315.000000000000 +4294967316.000000000000 +4294967317.000000000000 +4294967318.000000000000 +4294967319.000000000000 +4294967320.000000000000 +4294967321.000000000000 +4294967322.000000000000 +4294967323.000000000000 +4294967324.000000000000 +4294967325.000000000000 +4294967326.000000000000 +4294967327.000000000000 +4294967328.000000000000 +4294967329.000000000000 +4294967330.000000000000 +4294967331.000000000000 +4294967332.000000000000 +4294967333.000000000000 +4294967334.000000000000 +4294967335.000000000000 +4294967336.000000000000 +4294967337.000000000000 +4294967338.000000000000 +4294967339.000000000000 + +Ghost entries (global index / value): +(4294967294/17179869024.000000000000) +(4294967295/34359738048.000000000000) +(4294967297/17179869052.000000000000) + +DEAL:1::OK + + +DEAL:2::Local range of proc 0: 4294967340 4294967382 +DEAL:2::vector norm: 55669139270.9 +Process #2 +Local range: [4294967340, 4294967382), global size: 4294967424 +Vector data: +4294967340.000000000000 +4294967341.000000000000 +4294967342.000000000000 +4294967343.000000000000 +4294967344.000000000000 +4294967345.000000000000 +4294967346.000000000000 +4294967347.000000000000 +4294967348.000000000000 +4294967349.000000000000 +4294967350.000000000000 +4294967351.000000000000 +4294967352.000000000000 +4294967353.000000000000 +4294967354.000000000000 +4294967355.000000000000 +4294967356.000000000000 +4294967357.000000000000 +4294967358.000000000000 +4294967359.000000000000 +4294967360.000000000000 +4294967361.000000000000 +4294967362.000000000000 +4294967363.000000000000 +4294967364.000000000000 +4294967365.000000000000 +4294967366.000000000000 +4294967367.000000000000 +4294967368.000000000000 +4294967369.000000000000 +4294967370.000000000000 +4294967371.000000000000 +4294967372.000000000000 +4294967373.000000000000 +4294967374.000000000000 +4294967375.000000000000 +4294967376.000000000000 +4294967377.000000000000 +4294967378.000000000000 +4294967379.000000000000 +4294967380.000000000000 +4294967381.000000000000 + +Process #2 +Local range: [4294967340, 4294967382), global size: 4294967424 +Vector data: +4294967340.000000000000 +4294967341.000000000000 +4294967342.000000000000 +4294967343.000000000000 +4294967344.000000000000 +4294967345.000000000000 +4294967346.000000000000 +4294967347.000000000000 +4294967348.000000000000 +4294967349.000000000000 +4294967350.000000000000 +4294967351.000000000000 +4294967352.000000000000 +4294967353.000000000000 +4294967354.000000000000 +4294967355.000000000000 +4294967356.000000000000 +4294967357.000000000000 +4294967358.000000000000 +4294967359.000000000000 +4294967360.000000000000 +4294967361.000000000000 +4294967362.000000000000 +4294967363.000000000000 +4294967364.000000000000 +4294967365.000000000000 +4294967366.000000000000 +4294967367.000000000000 +4294967368.000000000000 +4294967369.000000000000 +4294967370.000000000000 +4294967371.000000000000 +4294967372.000000000000 +4294967373.000000000000 +4294967374.000000000000 +4294967375.000000000000 +4294967376.000000000000 +4294967377.000000000000 +4294967378.000000000000 +4294967379.000000000000 +4294967380.000000000000 +4294967381.000000000000 + +Ghost entries (global index / value): +(4294967294/4294967294.000000000000) +(4294967295/4294967295.000000000000) +(4294967297/4294967297.000000000000) +(4294967298/4294967298.000000000000) + +Process #2 +Local range: [4294967340, 4294967382), global size: 4294967424 +Vector data: +4294967340.000000000000 +4294967341.000000000000 +4294967342.000000000000 +4294967343.000000000000 +4294967344.000000000000 +4294967345.000000000000 +4294967346.000000000000 +4294967347.000000000000 +4294967348.000000000000 +4294967349.000000000000 +4294967350.000000000000 +4294967351.000000000000 +4294967352.000000000000 +4294967353.000000000000 +4294967354.000000000000 +4294967355.000000000000 +4294967356.000000000000 +4294967357.000000000000 +4294967358.000000000000 +4294967359.000000000000 +4294967360.000000000000 +4294967361.000000000000 +4294967362.000000000000 +4294967363.000000000000 +4294967364.000000000000 +4294967365.000000000000 +4294967366.000000000000 +4294967367.000000000000 +4294967368.000000000000 +4294967369.000000000000 +4294967370.000000000000 +4294967371.000000000000 +4294967372.000000000000 +4294967373.000000000000 +4294967374.000000000000 +4294967375.000000000000 +4294967376.000000000000 +4294967377.000000000000 +4294967378.000000000000 +4294967379.000000000000 +4294967380.000000000000 +4294967381.000000000000 + +Ghost entries (global index / value): +(4294967294/17179869024.000000000000) +(4294967295/34359738048.000000000000) +(4294967297/17179869052.000000000000) +(4294967298/-17179869024.000000000000) + +DEAL:2::OK + + +DEAL:3::Local range of proc 0: 4294967382 4294967424 +DEAL:3::vector norm: 55669139270.9 +Process #3 +Local range: [4294967382, 4294967424), global size: 4294967424 +Vector data: +4294967382.000000000000 +4294967383.000000000000 +4294967384.000000000000 +4294967385.000000000000 +4294967386.000000000000 +4294967387.000000000000 +4294967388.000000000000 +4294967389.000000000000 +4294967390.000000000000 +4294967391.000000000000 +4294967392.000000000000 +4294967393.000000000000 +4294967394.000000000000 +4294967395.000000000000 +4294967396.000000000000 +4294967397.000000000000 +4294967398.000000000000 +4294967399.000000000000 +4294967400.000000000000 +4294967401.000000000000 +4294967402.000000000000 +4294967403.000000000000 +4294967404.000000000000 +4294967405.000000000000 +4294967406.000000000000 +4294967407.000000000000 +4294967408.000000000000 +4294967409.000000000000 +4294967410.000000000000 +4294967411.000000000000 +4294967412.000000000000 +4294967413.000000000000 +4294967414.000000000000 +4294967415.000000000000 +4294967416.000000000000 +4294967417.000000000000 +4294967418.000000000000 +4294967419.000000000000 +4294967420.000000000000 +4294967421.000000000000 +4294967422.000000000000 +4294967423.000000000000 + +Process #3 +Local range: [4294967382, 4294967424), global size: 4294967424 +Vector data: +4294967382.000000000000 +4294967383.000000000000 +4294967384.000000000000 +4294967385.000000000000 +4294967386.000000000000 +4294967387.000000000000 +4294967388.000000000000 +4294967389.000000000000 +4294967390.000000000000 +4294967391.000000000000 +4294967392.000000000000 +4294967393.000000000000 +4294967394.000000000000 +4294967395.000000000000 +4294967396.000000000000 +4294967397.000000000000 +4294967398.000000000000 +4294967399.000000000000 +4294967400.000000000000 +4294967401.000000000000 +4294967402.000000000000 +4294967403.000000000000 +4294967404.000000000000 +4294967405.000000000000 +4294967406.000000000000 +4294967407.000000000000 +4294967408.000000000000 +4294967409.000000000000 +4294967410.000000000000 +4294967411.000000000000 +4294967412.000000000000 +4294967413.000000000000 +4294967414.000000000000 +4294967415.000000000000 +4294967416.000000000000 +4294967417.000000000000 +4294967418.000000000000 +4294967419.000000000000 +4294967420.000000000000 +4294967421.000000000000 +4294967422.000000000000 +4294967423.000000000000 + +Ghost entries (global index / value): +(4294967294/4294967294.000000000000) +(4294967295/4294967295.000000000000) +(4294967297/4294967297.000000000000) +(4294967298/4294967298.000000000000) + +Process #3 +Local range: [4294967382, 4294967424), global size: 4294967424 +Vector data: +4294967382.000000000000 +4294967383.000000000000 +4294967384.000000000000 +4294967385.000000000000 +4294967386.000000000000 +4294967387.000000000000 +4294967388.000000000000 +4294967389.000000000000 +4294967390.000000000000 +4294967391.000000000000 +4294967392.000000000000 +4294967393.000000000000 +4294967394.000000000000 +4294967395.000000000000 +4294967396.000000000000 +4294967397.000000000000 +4294967398.000000000000 +4294967399.000000000000 +4294967400.000000000000 +4294967401.000000000000 +4294967402.000000000000 +4294967403.000000000000 +4294967404.000000000000 +4294967405.000000000000 +4294967406.000000000000 +4294967407.000000000000 +4294967408.000000000000 +4294967409.000000000000 +4294967410.000000000000 +4294967411.000000000000 +4294967412.000000000000 +4294967413.000000000000 +4294967414.000000000000 +4294967415.000000000000 +4294967416.000000000000 +4294967417.000000000000 +4294967418.000000000000 +4294967419.000000000000 +4294967420.000000000000 +4294967421.000000000000 +4294967422.000000000000 +4294967423.000000000000 + +Ghost entries (global index / value): +(4294967294/17179869024.000000000000) +(4294967295/34359738048.000000000000) +(4294967297/17179869052.000000000000) +(4294967298/-17179869024.000000000000) + +DEAL:3::OK +