From 96bb2bf1bc1567a453d4bb43c77f22e2e26f75d1 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Mon, 21 Sep 2015 19:24:58 +0200 Subject: [PATCH] Fix bugs in partitioner for indices beyond 2^32. Enable use of distributed vector with non-zero starting range. Fix 64 bit bug in dof handler MG levels --- include/deal.II/base/partitioner.h | 18 +-- .../deal.II/lac/parallel_vector.templates.h | 30 ++--- source/base/partitioner.cc | 26 +++-- source/dofs/dof_handler_policy.cc | 4 +- tests/mpi/parallel_vector_16.cc | 107 ++++++++++++++++++ ...r_16.with_64bit_indices=on.mpirun=4.output | 11 ++ 6 files changed, 161 insertions(+), 35 deletions(-) create mode 100644 tests/mpi/parallel_vector_16.cc create mode 100644 tests/mpi/parallel_vector_16.with_64bit_indices=on.mpirun=4.output diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index 5d856e5e03..129659aaa4 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -182,7 +182,7 @@ namespace Utilities * of freedom for the individual processor on the ghost elements present * (second entry). */ - const std::vector > & + const std::vector > & ghost_targets() const; /** @@ -191,7 +191,7 @@ namespace Utilities * structure as in an IndexSet, but tailored to be iterated over, and * some indices may be duplicates. */ - const std::vector > & + const std::vector > & import_indices() const; /** @@ -206,7 +206,7 @@ namespace Utilities * entry), i.e., locally owned indices that are ghosts on other * processors. */ - const std::vector > & + const std::vector > & import_targets() const; /** @@ -306,7 +306,7 @@ namespace Utilities * Contains information which processors my ghost indices belong to and * how many those indices are */ - std::vector > ghost_targets_data; + std::vector > ghost_targets_data; /** * The set of (local) indices that we are importing during compress(), @@ -314,7 +314,7 @@ namespace Utilities * structure as in an IndexSet, but tailored to be iterated over, and * some indices may be duplicates. */ - std::vector > import_indices_data; + std::vector > import_indices_data; /** * Caches the number of ghost indices. It would be expensive to compute @@ -326,7 +326,7 @@ namespace Utilities * The set of processors and length of data field which send us their * ghost data */ - std::vector > import_targets_data; + std::vector > import_targets_data; /** * The ID of the current processor in the MPI network @@ -466,7 +466,7 @@ namespace Utilities inline - const std::vector > & + const std::vector > & Partitioner::ghost_targets() const { return ghost_targets_data; @@ -474,7 +474,7 @@ namespace Utilities inline - const std::vector > & + const std::vector > & Partitioner::import_indices() const { return import_indices_data; @@ -492,7 +492,7 @@ namespace Utilities inline - const std::vector > & + const std::vector > & Partitioner::import_targets() const { return import_targets_data; diff --git a/include/deal.II/lac/parallel_vector.templates.h b/include/deal.II/lac/parallel_vector.templates.h index cce0c112a3..125719894b 100644 --- a/include/deal.II/lac/parallel_vector.templates.h +++ b/include/deal.II/lac/parallel_vector.templates.h @@ -360,8 +360,8 @@ namespace parallel // make this function thread safe Threads::Mutex::ScopedLock lock (mutex); - const size_type n_import_targets = part.import_targets().size(); - const size_type n_ghost_targets = part.ghost_targets().size(); + const unsigned int n_import_targets = part.import_targets().size(); + const unsigned int n_ghost_targets = part.ghost_targets().size(); // Need to send and receive the data. Use non-blocking communication, // where it is generally less overhead to first initiate the receive and @@ -376,7 +376,7 @@ namespace parallel // allocate import_data in case it is not set up yet if (import_data == 0) import_data = new Number[part.n_import_indices()]; - for (size_type i=0; ival[current_index_start], part.ghost_targets()[i].second*sizeof(Number), @@ -446,8 +446,8 @@ namespace parallel // make this function thread safe Threads::Mutex::ScopedLock lock (mutex); - const size_type n_import_targets = part.import_targets().size(); - const size_type n_ghost_targets = part.ghost_targets().size(); + const unsigned int n_import_targets = part.import_targets().size(); + const unsigned int n_ghost_targets = part.ghost_targets().size(); if (operation != dealii::VectorOperation::insert) AssertDimension (n_ghost_targets+n_import_targets, @@ -462,7 +462,7 @@ namespace parallel Assert (ierr == MPI_SUCCESS, ExcInternalError()); Number *read_position = import_data; - std::vector >::const_iterator + std::vector >::const_iterator my_imports = part.import_indices().begin(); // If the operation is no insertion, add the imported data to the @@ -471,11 +471,11 @@ namespace parallel // the ones already present if (operation != dealii::VectorOperation::insert) for ( ; my_imports!=part.import_indices().end(); ++my_imports) - for (size_type j=my_imports->first; jsecond; j++) + for (unsigned int j=my_imports->first; jsecond; j++) local_element(j) += *read_position++; else for ( ; my_imports!=part.import_indices().end(); ++my_imports) - for (size_type j=my_imports->first; jsecond; + for (unsigned int j=my_imports->first; jsecond; j++, read_position++) Assert(*read_position == 0. || std::abs(local_element(j) - *read_position) <= @@ -519,8 +519,8 @@ namespace parallel // make this function thread safe Threads::Mutex::ScopedLock lock (mutex); - const size_type n_import_targets = part.import_targets().size(); - const size_type n_ghost_targets = part.ghost_targets().size(); + const unsigned int n_import_targets = part.import_targets().size(); + const unsigned int n_ghost_targets = part.ghost_targets().size(); // Need to send and receive the data. Use non-blocking communication, // where it is generally less overhead to first initiate the receive and @@ -531,7 +531,7 @@ namespace parallel ExcInternalError()); size_type current_index_start = part.local_size(); update_ghost_values_requests.resize (n_import_targets+n_ghost_targets); - for (size_type i=0; i 0) import_data = new Number[part.n_import_indices()]; current_index_start = 0; - for (size_type i=0; i >::const_iterator + std::vector >::const_iterator my_imports = part.import_indices().begin(); for ( ; my_imports!=part.import_indices().end(); ++my_imports) - for (size_type j=my_imports->first; jsecond; j++) + for (unsigned int j=my_imports->first; jsecond; j++) *write_position++ = local_element(j); } diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index b39efd0207..8cfc73e9b2 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -161,7 +161,13 @@ namespace Utilities } std::vector first_index (n_procs+1); - first_index[0] = 0; + // Allow non-zero start index for the vector. send this data to all + // processors + first_index[0] = local_range_data.first; + MPI_Bcast(&first_index[0], sizeof(types::global_dof_index), MPI_BYTE, + 0, communicator); + + // Get the end-of-local_range for all processors MPI_Allgather(&local_range_data.second, sizeof(types::global_dof_index), MPI_BYTE, &first_index[1], sizeof(types::global_dof_index), MPI_BYTE, communicator); @@ -203,11 +209,11 @@ namespace Utilities // vector filled with push_back might actually be too long. unsigned int current_proc = 0; ghost_indices_data.fill_index_vector (expanded_ghost_indices); - unsigned int current_index = expanded_ghost_indices[0]; + types::global_dof_index current_index = expanded_ghost_indices[0]; while (current_index >= first_index[current_proc+1]) current_proc++; - std::vector > ghost_targets_temp - (1, std::pair(current_proc, 0)); + std::vector > ghost_targets_temp + (1, std::pair(current_proc, 0)); n_ghost_targets++; for (unsigned int iterator=1; iterator(current_proc,iterator)); + unsigned int>(current_proc,iterator)); n_ghost_targets++; } } @@ -240,14 +246,14 @@ namespace Utilities MPI_INT, communicator); // allocate memory for import data - std::vector > import_targets_temp; + std::vector > import_targets_temp; n_import_indices_data = 0; for (unsigned int i=0; i 0) { n_import_indices_data += receive_buffer[i]; import_targets_temp.push_back(std::pair (i, receive_buffer[i])); + unsigned int> (i, receive_buffer[i])); } import_targets_data = import_targets_temp; } @@ -289,7 +295,7 @@ namespace Utilities // contiguous indices in form of ranges { types::global_dof_index last_index = numbers::invalid_dof_index-1; - std::vector > compressed_import_indices; + std::vector > compressed_import_indices; for (unsigned int i=0; i= local_range_data.first && @@ -298,12 +304,14 @@ namespace Utilities local_range_data.second)); types::global_dof_index new_index = (expanded_import_indices[i] - local_range_data.first); + Assert(new_index(new_index,new_index+1)); + (std::pair(new_index,new_index+1)); } last_index = new_index; } diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index 9209468334..dd3eaff84e 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -2404,7 +2404,7 @@ namespace internal number_cache .n_locally_owned_dofs_per_processor.begin() + tr->locally_owned_subdomain(), - 0); + static_cast(0)); for (std::vector::iterator it=renumbering.begin(); it!=renumbering.end(); ++it) if (*it != DoFHandler::invalid_dof_index) @@ -2425,7 +2425,7 @@ namespace internal .n_locally_owned_dofs_per_processor.begin(), number_cache .n_locally_owned_dofs_per_processor.end(), - 0); + static_cast(0)); number_cache.locally_owned_dofs = IndexSet(number_cache.n_global_dofs); number_cache.locally_owned_dofs diff --git a/tests/mpi/parallel_vector_16.cc b/tests/mpi/parallel_vector_16.cc new file mode 100644 index 0000000000..98199ee505 --- /dev/null +++ b/tests/mpi/parallel_vector_16.cc @@ -0,0 +1,107 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2011 - 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// build a vector whose elements exceed the size of unsigned int in case of 64 +// bit indices. To avoid excessive memory consumption, let the vector start at +// a number close to the maximum of unsigned int but extend past the last +// index + +#include "../tests.h" +#include +#include +#include +#include +#include +#include + + +void test () +{ + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + + if (myid==0) deallog << "numproc=" << numproc << std::endl; + + types::global_dof_index min_index = 0xffffffffU - 39; + types::global_dof_index local_size = 42; + IndexSet local_owned(min_index+numproc*local_size); + local_owned.add_range(min_index+myid*local_size, min_index+(myid+1)*local_size); + + // all processors ghost some entries around invalid_unsigned_int and on the + // border between two processors + IndexSet local_relevant(local_owned.size()); + local_relevant = local_owned; + local_relevant.add_range(min_index+38,min_index+40); + local_relevant.add_range(min_index+41,min_index+43); + + parallel::distributed::Vector v(local_owned, local_relevant, MPI_COMM_WORLD); + + deallog << "Local range of proc 0: " << v.local_range().first << " " + << v.local_range().second << std::endl; + + // set local values + for (types::global_dof_index i=min_index+myid*local_size; + i