From: Martin Kronbichler Date: Thu, 26 Nov 2015 15:16:49 +0000 (+0100) Subject: Fix overflow issue with parallel::distributed::Vector X-Git-Tag: v8.4.0-rc2~201^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1d606a7e8cf880080f8001ec1d446b37babc963b;p=dealii.git Fix overflow issue with parallel::distributed::Vector --- diff --git a/doc/news/changes.h b/doc/news/changes.h index b9d8d2e7bf..545f7031d6 100644 --- a/doc/news/changes.h +++ b/doc/news/changes.h @@ -437,6 +437,13 @@ inconvenience this causes.
    +
  1. Fixed: parallel::distributed::Vector now detects of the size of MPI + messages exceeds 2GB or if the local range exceeds the size of 32-bit + integers and throws an exception about unsupported range of operation. +
    + (Martin Kronbichler, 2015/11/26) +
  2. +
  3. Fixed: GridGenerator::extract_boundary_mesh() in 3d could generate surface cells that did not uniformly had a right- or left-handed coordinate system associated with them when viewed from one side of the surface. This diff --git a/include/deal.II/base/partitioner.h b/include/deal.II/base/partitioner.h index 129659aaa4..415782de2d 100644 --- a/include/deal.II/base/partitioner.h +++ b/include/deal.II/base/partitioner.h @@ -441,7 +441,7 @@ namespace Utilities { AssertIndexRange (local_index, local_size() + n_ghost_indices_data); if (local_index < local_size()) - return local_range_data.first + local_index; + return local_range_data.first + types::global_dof_index(local_index); else return ghost_indices_data.nth_index_in_set (local_index-local_size()); } diff --git a/include/deal.II/lac/parallel_vector.h b/include/deal.II/lac/parallel_vector.h index b00d3b1c76..a6881ace24 100644 --- a/include/deal.II/lac/parallel_vector.h +++ b/include/deal.II/lac/parallel_vector.h @@ -140,6 +140,31 @@ namespace parallel * multiple threads. This may or may not be desired when working also with * MPI. * + *

    Limitations regarding the vector size

    + * + * This vector class is based on two different number types for indexing. + * The so-called global index type encodes the overall size of the vector. + * Its type is types::global_dof_index. The largest possible + * value is 2^32-1 or approximately four billion in case 64 + * bit integers are disabled at configuration of deal.II (default case) or + * 2^64-1 or approximately 10^19 if 64 bit + * integers are enabled (see the glossary entry on + * @ref GlobalDoFIndex for further information). + * + * The second relevant index type is the local index used within one MPI + * rank. As opposed to the global index, the implementation assumes + * 32-bit unsigned integers unconditionally. In other words, to actually + * use a vector with more than four billion entries, you need to use MPI + * with more than one rank (which in general is a safe assumption since + * four billion entries consume at least 16 GB of memory for floats or 32 + * GB of memory for doubles). If more than 4 billion local elements are + * present, the implementation tries to detect that, which triggers an + * exception will be thrown and aborts the code. Note, however, that the + * detection of overflow is tricky and the detection mechanism might fail + * in some circumstances. Therefore, it is strongly recommended to not + * rely on this class to detect the case if local elements are more than + * four billion. + * * @author Katharina Kormann, Martin Kronbichler, 2010, 2011 */ template diff --git a/include/deal.II/lac/parallel_vector.templates.h b/include/deal.II/lac/parallel_vector.templates.h index 5171807f70..a3d585fe85 100644 --- a/include/deal.II/lac/parallel_vector.templates.h +++ b/include/deal.II/lac/parallel_vector.templates.h @@ -378,6 +378,12 @@ namespace parallel import_data = new Number[part.n_import_indices()]; for (unsigned int i=0; i(part.import_targets()[i].second)* + sizeof(Number) < + static_cast(std::numeric_limits::max()), + ExcMessage("Index overflow: Maximum message size in MPI is 2GB. " + "The number of ghost entries times the size of 'Number' " + "exceeds this value. This is not supported.")); MPI_Recv_init (&import_data[current_index_start], part.import_targets()[i].second*sizeof(Number), MPI_BYTE, @@ -394,6 +400,12 @@ namespace parallel current_index_start = part.local_size(); for (unsigned int i=0; i(part.ghost_targets()[i].second)* + sizeof(Number) < + static_cast(std::numeric_limits::max()), + ExcMessage("Index overflow: Maximum message size in MPI is 2GB. " + "The number of ghost entries times the size of 'Number' " + "exceeds this value. This is not supported.")); MPI_Send_init (&this->val[current_index_start], part.ghost_targets()[i].second*sizeof(Number), MPI_BYTE, diff --git a/source/base/partitioner.cc b/source/base/partitioner.cc index 57c1b96a0f..3e44d8b2a3 100644 --- a/source/base/partitioner.cc +++ b/source/base/partitioner.cc @@ -113,6 +113,9 @@ namespace Utilities (locally_owned_indices.nth_index_in_set(0), locally_owned_indices.nth_index_in_set(0) + locally_owned_indices.n_elements()); + AssertThrow (local_range_data.second-local_range_data.first < + static_cast(std::numeric_limits::max()), + ExcMessage("Index overflow: This class supports at most 2^32-1 locally owned vector entries")); locally_owned_range_data.set_size (locally_owned_indices.size()); locally_owned_range_data.add_range (local_range_data.first, local_range_data.second); locally_owned_range_data.compress(); @@ -138,6 +141,9 @@ namespace Utilities ghost_indices_data.set_size(locally_owned_range_data.size()); ghost_indices_data.subtract_set (locally_owned_range_data); ghost_indices_data.compress(); + AssertThrow (ghost_indices_data.n_elements() < + static_cast(std::numeric_limits::max()), + ExcMessage("Index overflow: This class supports at most 2^32-1 ghost elements")); n_ghost_indices_data = ghost_indices_data.n_elements(); have_ghost_indices =