Enable use of distributed vector with non-zero starting range.
Fix 64 bit bug in dof handler MG levels
* of freedom for the individual processor on the ghost elements present
* (second entry).
*/
- const std::vector<std::pair<unsigned int, types::global_dof_index> > &
+ const std::vector<std::pair<unsigned int, unsigned int> > &
ghost_targets() const;
/**
* structure as in an IndexSet, but tailored to be iterated over, and
* some indices may be duplicates.
*/
- const std::vector<std::pair<types::global_dof_index, types::global_dof_index> > &
+ const std::vector<std::pair<unsigned int, unsigned int> > &
import_indices() const;
/**
* entry), i.e., locally owned indices that are ghosts on other
* processors.
*/
- const std::vector<std::pair<unsigned int, types::global_dof_index> > &
+ const std::vector<std::pair<unsigned int, unsigned int> > &
import_targets() const;
/**
* Contains information which processors my ghost indices belong to and
* how many those indices are
*/
- std::vector<std::pair<unsigned int, types::global_dof_index> > ghost_targets_data;
+ std::vector<std::pair<unsigned int, unsigned int> > ghost_targets_data;
/**
* The set of (local) indices that we are importing during compress(),
* structure as in an IndexSet, but tailored to be iterated over, and
* some indices may be duplicates.
*/
- std::vector<std::pair<types::global_dof_index, types::global_dof_index> > import_indices_data;
+ std::vector<std::pair<unsigned int, unsigned int> > import_indices_data;
/**
* Caches the number of ghost indices. It would be expensive to compute
* The set of processors and length of data field which send us their
* ghost data
*/
- std::vector<std::pair<unsigned int,types::global_dof_index> > import_targets_data;
+ std::vector<std::pair<unsigned int, unsigned int> > import_targets_data;
/**
* The ID of the current processor in the MPI network
inline
- const std::vector<std::pair<unsigned int, types::global_dof_index> > &
+ const std::vector<std::pair<unsigned int, unsigned int> > &
Partitioner::ghost_targets() const
{
return ghost_targets_data;
inline
- const std::vector<std::pair<types::global_dof_index, types::global_dof_index> > &
+ const std::vector<std::pair<unsigned int, unsigned int> > &
Partitioner::import_indices() const
{
return import_indices_data;
inline
- const std::vector<std::pair<unsigned int,types::global_dof_index> > &
+ const std::vector<std::pair<unsigned int, unsigned int> > &
Partitioner::import_targets() const
{
return import_targets_data;
// make this function thread safe
Threads::Mutex::ScopedLock lock (mutex);
- const size_type n_import_targets = part.import_targets().size();
- const size_type n_ghost_targets = part.ghost_targets().size();
+ const unsigned int n_import_targets = part.import_targets().size();
+ const unsigned int n_ghost_targets = part.ghost_targets().size();
// Need to send and receive the data. Use non-blocking communication,
// where it is generally less overhead to first initiate the receive and
// allocate import_data in case it is not set up yet
if (import_data == 0)
import_data = new Number[part.n_import_indices()];
- for (size_type i=0; i<n_import_targets; i++)
+ for (unsigned int i=0; i<n_import_targets; i++)
{
MPI_Recv_init (&import_data[current_index_start],
part.import_targets()[i].second*sizeof(Number),
Assert (part.local_size() == vector_view.size(), ExcInternalError());
current_index_start = part.local_size();
- for (size_type i=0; i<n_ghost_targets; i++)
+ for (unsigned int i=0; i<n_ghost_targets; i++)
{
MPI_Send_init (&this->val[current_index_start],
part.ghost_targets()[i].second*sizeof(Number),
// make this function thread safe
Threads::Mutex::ScopedLock lock (mutex);
- const size_type n_import_targets = part.import_targets().size();
- const size_type n_ghost_targets = part.ghost_targets().size();
+ const unsigned int n_import_targets = part.import_targets().size();
+ const unsigned int n_ghost_targets = part.ghost_targets().size();
if (operation != dealii::VectorOperation::insert)
AssertDimension (n_ghost_targets+n_import_targets,
Assert (ierr == MPI_SUCCESS, ExcInternalError());
Number *read_position = import_data;
- std::vector<std::pair<size_type, size_type> >::const_iterator
+ std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
my_imports = part.import_indices().begin();
// If the operation is no insertion, add the imported data to the
// the ones already present
if (operation != dealii::VectorOperation::insert)
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
- for (size_type j=my_imports->first; j<my_imports->second; j++)
+ for (unsigned int j=my_imports->first; j<my_imports->second; j++)
local_element(j) += *read_position++;
else
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
- for (size_type j=my_imports->first; j<my_imports->second;
+ for (unsigned int j=my_imports->first; j<my_imports->second;
j++, read_position++)
Assert(*read_position == 0. ||
std::abs(local_element(j) - *read_position) <=
// make this function thread safe
Threads::Mutex::ScopedLock lock (mutex);
- const size_type n_import_targets = part.import_targets().size();
- const size_type n_ghost_targets = part.ghost_targets().size();
+ const unsigned int n_import_targets = part.import_targets().size();
+ const unsigned int n_ghost_targets = part.ghost_targets().size();
// Need to send and receive the data. Use non-blocking communication,
// where it is generally less overhead to first initiate the receive and
ExcInternalError());
size_type current_index_start = part.local_size();
update_ghost_values_requests.resize (n_import_targets+n_ghost_targets);
- for (size_type i=0; i<n_ghost_targets; i++)
+ for (unsigned int i=0; i<n_ghost_targets; i++)
{
// allow writing into ghost indices even though we are in a
// const function
if (import_data == 0 && part.n_import_indices() > 0)
import_data = new Number[part.n_import_indices()];
current_index_start = 0;
- for (size_type i=0; i<n_import_targets; i++)
+ for (unsigned int i=0; i<n_import_targets; i++)
{
MPI_Send_init (&import_data[current_index_start],
part.import_targets()[i].second*sizeof(Number),
{
Assert (import_data != 0, ExcInternalError());
Number *write_position = import_data;
- std::vector<std::pair<size_type, size_type> >::const_iterator
+ std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
my_imports = part.import_indices().begin();
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
- for (size_type j=my_imports->first; j<my_imports->second; j++)
+ for (unsigned int j=my_imports->first; j<my_imports->second; j++)
*write_position++ = local_element(j);
}
}
std::vector<types::global_dof_index> first_index (n_procs+1);
- first_index[0] = 0;
+ // Allow non-zero start index for the vector. send this data to all
+ // processors
+ first_index[0] = local_range_data.first;
+ MPI_Bcast(&first_index[0], sizeof(types::global_dof_index), MPI_BYTE,
+ 0, communicator);
+
+ // Get the end-of-local_range for all processors
MPI_Allgather(&local_range_data.second, sizeof(types::global_dof_index),
MPI_BYTE, &first_index[1], sizeof(types::global_dof_index),
MPI_BYTE, communicator);
// vector filled with push_back might actually be too long.
unsigned int current_proc = 0;
ghost_indices_data.fill_index_vector (expanded_ghost_indices);
- unsigned int current_index = expanded_ghost_indices[0];
+ types::global_dof_index current_index = expanded_ghost_indices[0];
while (current_index >= first_index[current_proc+1])
current_proc++;
- std::vector<std::pair<unsigned int,types::global_dof_index> > ghost_targets_temp
- (1, std::pair<unsigned int, types::global_dof_index>(current_proc, 0));
+ std::vector<std::pair<unsigned int, unsigned int> > ghost_targets_temp
+ (1, std::pair<unsigned int, unsigned int>(current_proc, 0));
n_ghost_targets++;
for (unsigned int iterator=1; iterator<n_ghost_indices_data; ++iterator)
ghost_targets_temp[n_ghost_targets-1].second =
iterator - ghost_targets_temp[n_ghost_targets-1].second;
ghost_targets_temp.push_back(std::pair<unsigned int,
- types::global_dof_index>(current_proc,iterator));
+ unsigned int>(current_proc,iterator));
n_ghost_targets++;
}
}
MPI_INT, communicator);
// allocate memory for import data
- std::vector<std::pair<unsigned int,types::global_dof_index> > import_targets_temp;
+ std::vector<std::pair<unsigned int,unsigned int> > import_targets_temp;
n_import_indices_data = 0;
for (unsigned int i=0; i<n_procs; i++)
if (receive_buffer[i] > 0)
{
n_import_indices_data += receive_buffer[i];
import_targets_temp.push_back(std::pair<unsigned int,
- types::global_dof_index> (i, receive_buffer[i]));
+ unsigned int> (i, receive_buffer[i]));
}
import_targets_data = import_targets_temp;
}
// contiguous indices in form of ranges
{
types::global_dof_index last_index = numbers::invalid_dof_index-1;
- std::vector<std::pair<types::global_dof_index,types::global_dof_index> > compressed_import_indices;
+ std::vector<std::pair<unsigned int,unsigned int> > compressed_import_indices;
for (unsigned int i=0; i<n_import_indices_data; i++)
{
Assert (expanded_import_indices[i] >= local_range_data.first &&
local_range_data.second));
types::global_dof_index new_index = (expanded_import_indices[i] -
local_range_data.first);
+ Assert(new_index<numbers::invalid_unsigned_int,
+ ExcNotImplemented());
if (new_index == last_index+1)
compressed_import_indices.back().second++;
else
{
compressed_import_indices.push_back
- (std::pair<types::global_dof_index,types::global_dof_index>(new_index,new_index+1));
+ (std::pair<unsigned int,unsigned int>(new_index,new_index+1));
}
last_index = new_index;
}
number_cache
.n_locally_owned_dofs_per_processor.begin()
+ tr->locally_owned_subdomain(),
- 0);
+ static_cast<dealii::types::global_dof_index>(0));
for (std::vector<dealii::types::global_dof_index>::iterator it=renumbering.begin();
it!=renumbering.end(); ++it)
if (*it != DoFHandler<dim,spacedim>::invalid_dof_index)
.n_locally_owned_dofs_per_processor.begin(),
number_cache
.n_locally_owned_dofs_per_processor.end(),
- 0);
+ static_cast<dealii::types::global_dof_index>(0));
number_cache.locally_owned_dofs = IndexSet(number_cache.n_global_dofs);
number_cache.locally_owned_dofs
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2011 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// build a vector whose elements exceed the size of unsigned int in case of 64
+// bit indices. To avoid excessive memory consumption, let the vector start at
+// a number close to the maximum of unsigned int but extend past the last
+// index
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+ types::global_dof_index min_index = 0xffffffffU - 39;
+ types::global_dof_index local_size = 42;
+ IndexSet local_owned(min_index+numproc*local_size);
+ local_owned.add_range(min_index+myid*local_size, min_index+(myid+1)*local_size);
+
+ // all processors ghost some entries around invalid_unsigned_int and on the
+ // border between two processors
+ IndexSet local_relevant(local_owned.size());
+ local_relevant = local_owned;
+ local_relevant.add_range(min_index+38,min_index+40);
+ local_relevant.add_range(min_index+41,min_index+43);
+
+ parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+ deallog << "Local range of proc 0: " << v.local_range().first << " "
+ << v.local_range().second << std::endl;
+
+ // set local values
+ for (types::global_dof_index i=min_index+myid*local_size;
+ i<min_index+(myid+1)*local_size; ++i)
+ v(i) = (double)i;
+
+ deallog << "vector norm: " << v.l2_norm() << std::endl;
+
+ // check ghost values
+ deallog << "v(42) = " << v(min_index+42) << std::endl;
+ v.update_ghost_values();
+ deallog << "v(42) = " << v(min_index+42) << std::endl;
+ Assert(v(min_index+41) == min_index+41, ExcInternalError());
+ Assert(v(min_index+39) == min_index+39, ExcInternalError());
+ Assert(v(min_index+38) == min_index+38, ExcInternalError());
+
+ v.zero_out_ghosts();
+ v(min_index+38) = min_index;
+ v(min_index+39) = min_index*2;
+ v(min_index+41) = min_index+7;
+ v(min_index+42) = -static_cast<double>(min_index);
+ v.compress(VectorOperation::add);
+ v.update_ghost_values();
+ deallog << "v(38) = " << v(min_index+38) << std::endl;
+ deallog << "v(39) = " << v(min_index+39) << std::endl;
+ deallog << "v(41) = " << v(min_index+41) << std::endl;
+ deallog << "v(42) = " << v(min_index+42) << std::endl;
+
+ deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ deallog.push(Utilities::int_to_string(myid));
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog << std::setprecision(12);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ test();
+ }
+ else
+ test();
+
+}
--- /dev/null
+
+DEAL:0::numproc=4
+DEAL:0::Local range of proc 0: 4294967256 4294967298
+DEAL:0::vector norm: 55669139270.9
+DEAL:0::v(42) = 0
+DEAL:0::v(42) = 4294967298.00
+DEAL:0::v(38) = 17179869024.0
+DEAL:0::v(39) = 34359738048.0
+DEAL:0::v(41) = 17179869052.0
+DEAL:0::v(42) = -17179869024.0
+DEAL:0::OK