* components.
*/
std::size_t
- local_size() const;
+ locally_owned_size() const;
/**
* Return an index set that describes which elements of this vector are
template <class VectorType>
inline std::size_t
-BlockVectorBase<VectorType>::local_size() const
+BlockVectorBase<VectorType>::locally_owned_size() const
{
std::size_t local_size = 0;
for (unsigned int b = 0; b < n_blocks(); ++b)
- local_size += block(b).local_size();
+ local_size += block(b).locally_owned_size();
return local_size;
}
&start_ptr);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- const size_type vec_size = this->block(i).local_size();
+ const size_type vec_size = this->block(i).locally_owned_size();
petsc_helpers::copy_petsc_vector(start_ptr,
start_ptr + vec_size,
this->block(i).begin());
* <li> Besides the usual global access operator() it is also possible to
* access vector entries in the local index space with the function @p
* local_element(). Locally owned indices are placed first, [0,
- * local_size()), and then all ghost indices follow after them
- * contiguously, [local_size(), local_size()+n_ghost_entries()).
+ * locally_owned_size()), and then all ghost indices follow after them
+ * contiguously, [locally_owned_size(),
+ * locally_owned_size()+n_ghost_entries()).
* </ul>
*
* Functions related to parallel functionality:
/**
* Return the local size of the vector, i.e., the number of indices
* owned locally.
+ *
+ * @deprecated use locally_owned_size() instead.
*/
+ DEAL_II_DEPRECATED
size_type
local_size() const;
+ /**
+ * Return the local size of the vector, i.e., the number of indices
+ * owned locally.
+ */
+ size_type
+ locally_owned_size() const;
+
/**
* Return true if the given global index is in the local range of this
* processor.
+ template <typename Number, typename MemorySpace>
+ inline typename Vector<Number, MemorySpace>::size_type
+ Vector<Number, MemorySpace>::locally_owned_size() const
+ {
+ return partitioner->local_size();
+ }
+
+
+
template <typename Number, typename MemorySpace>
inline bool
Vector<Number, MemorySpace>::in_local_range(
*
* To figure out which elements exactly are stored locally, use
* local_range().
+ *
+ * @deprecated use locally_owned_size() instead.
*/
+ DEAL_II_DEPRECATED
size_type
local_size() const;
+ /**
+ * Return the local dimension of the vector, i.e. the number of elements
+ * stored on the present MPI process. For sequential vectors, this number
+ * is the same as size(), but for parallel vectors it may be smaller.
+ *
+ * To figure out which elements exactly are stored locally, use
+ * local_range().
+ */
+ size_type
+ locally_owned_size() const;
+
/**
* Return a pair of indices indicating which elements of this vector are
* stored locally. The first number is the index of the first element
* equal to the dimension of the vector space that is modeled by an object
* of this kind. This dimension is return by size().
*
- * @deprecated use local_size() instead.
+ * @deprecated use locally_owned_size() instead.
*/
DEAL_II_DEPRECATED
size_type
* owned locally.
*/
size_type
- local_size() const;
-
+ locally_owned_size() const;
/**
* Return the IndexSet that represents the indices of the elements stored.
template <typename Number>
inline typename ReadWriteVector<Number>::size_type
- ReadWriteVector<Number>::local_size() const
+ ReadWriteVector<Number>::locally_owned_size() const
{
return stored_elements.n_elements();
}
VecGetArray(static_cast<const Vec &>(petsc_vec), &start_ptr);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- const size_type vec_size = petsc_vec.local_size();
+ const size_type vec_size = petsc_vec.locally_owned_size();
internal::copy_petsc_vector(start_ptr, start_ptr + vec_size, begin());
// restore the representation of the vector
* owned locally.
*/
size_type
- local_size() const;
+ locally_owned_size() const;
/**
* Return the MPI communicator object in use with this object.
* owned locally.
*/
size_type
- local_size() const;
+ locally_owned_size() const;
/**
* Return the MPI communicator object in use with this object.
template <typename Number>
typename Vector<Number>::size_type
- Vector<Number>::local_size() const
+ Vector<Number>::locally_owned_size() const
{
return vector->getLocalLength();
}
*
* If the vector contains ghost elements, they are included in this
* number.
+ *
+ * @deprecated This function is deprecated.
*/
+ DEAL_II_DEPRECATED
size_type
local_size() const;
+ /**
+ * Return the local size of the vector, i.e., the number of indices
+ * owned locally.
+ */
+ size_type
+ locally_owned_size() const;
+
/**
* Return a pair of indices indicating which elements of this vector are
* stored locally. The first number is the index of the first element
+ inline Vector::size_type
+ Vector::locally_owned_size() const
+ {
+ return owned_elements.n_elements();
+ }
+
+
+
inline std::pair<Vector::size_type, Vector::size_type>
Vector::local_range() const
{
* LinearAlgebra::ReadWriteVector.
*/
size_type
- local_size() const;
+ locally_owned_size() const;
/**
* Return whether the vector contains only elements with value zero. This
template <typename Number>
inline typename Vector<Number>::size_type
-Vector<Number>::local_size() const
+Vector<Number>::locally_owned_size() const
{
return values.size();
}
+ VectorBase::size_type
+ VectorBase::locally_owned_size() const
+ {
+ PetscInt sz;
+ const PetscErrorCode ierr = VecGetLocalSize(vector, &sz);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
+
+ return sz;
+ }
+
+
+
VectorBase::size_type
VectorBase::local_size() const
{
out.setf(std::ios::fixed, std::ios::floatfield);
if (across)
- for (size_type i = 0; i < local_size(); ++i)
+ for (size_type i = 0; i < locally_owned_size(); ++i)
out << val[i] << ' ';
else
- for (size_type i = 0; i < local_size(); ++i)
+ for (size_type i = 0; i < locally_owned_size(); ++i)
out << val[i] << std::endl;
out << std::endl;
// TH: I am relatively sure that PETSc is
// storing the local data in a contiguous
// block without indices:
- mem += local_size() * sizeof(PetscScalar);
+ mem += locally_owned_size() * sizeof(PetscScalar);
// assume that PETSc is storing one index
// and one double per ghost element
if (ghosted)
Vector::size_type
- Vector::local_size() const
+ Vector::locally_owned_size() const
{
return vector->MyLength();
}
//
// ---------------------------------------------------------------------
-// check Vector::local_size() for all supported vector types
+// check Vector::locally_owned_size() for all supported vector types
#include <deal.II/base/index_set.h>
#include <deal.II/base/utilities.h>
check_serial()
{
const auto dofs_per_proc = 4;
- VEC vec(dofs_per_proc);
+ VEC vec(dofs_per_proc);
deallog << "type: " << Utilities::type_to_string(vec) << std::endl;
- deallog << "local size: " << vec.local_size() << std::endl;
+ deallog << "local size: " << vec.locally_owned_size() << std::endl;
deallog << "size: " << vec.size() << std::endl;
}
const auto my_rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
const auto dofs_per_proc = 4;
- const auto n_dofs = dofs_per_proc*n_procs;
+ const auto n_dofs = dofs_per_proc * n_procs;
- IndexSet local_indices(n_dofs);
- const auto my_dofs_begin = dofs_per_proc*my_rank;
- const auto my_dofs_end = dofs_per_proc*(my_rank + 1);
+ IndexSet local_indices(n_dofs);
+ const auto my_dofs_begin = dofs_per_proc * my_rank;
+ const auto my_dofs_end = dofs_per_proc * (my_rank + 1);
local_indices.add_range(my_dofs_begin, my_dofs_end);
local_indices.compress();
VEC vec(local_indices, MPI_COMM_WORLD);
deallog << "type: " << Utilities::type_to_string(vec) << std::endl;
deallog << "index set size: " << local_indices.n_elements() << std::endl;
- deallog << "local size: " << vec.local_size() << std::endl;
+ deallog << "local size: " << vec.locally_owned_size() << std::endl;
deallog << "size: " << vec.size() << std::endl;
}
const auto my_rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
const auto dofs_per_proc = 4;
- const auto n_dofs = dofs_per_proc*n_procs;
+ const auto n_dofs = dofs_per_proc * n_procs;
- IndexSet local_indices(n_dofs);
- IndexSet ghost_indices(n_dofs);
- const auto my_dofs_begin = dofs_per_proc*my_rank;
- const auto my_dofs_end = dofs_per_proc*(my_rank + 1);
+ IndexSet local_indices(n_dofs);
+ IndexSet ghost_indices(n_dofs);
+ const auto my_dofs_begin = dofs_per_proc * my_rank;
+ const auto my_dofs_end = dofs_per_proc * (my_rank + 1);
local_indices.add_range(my_dofs_begin, my_dofs_end);
local_indices.compress();
if (my_rank == 0)
VEC vec(local_indices, ghost_indices, MPI_COMM_WORLD);
deallog << "type: " << Utilities::type_to_string(vec) << std::endl;
deallog << "index set size: " << local_indices.n_elements() << std::endl;
- deallog << "local size: " << vec.local_size() << std::endl;
+ deallog << "local size: " << vec.locally_owned_size() << std::endl;
deallog << "size: " << vec.size() << std::endl;
}
const auto my_rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
const auto dofs_per_proc = 4;
- const auto n_dofs = dofs_per_proc*n_procs;
+ const auto n_dofs = dofs_per_proc * n_procs;
- IndexSet local_indices(n_dofs);
- IndexSet ghost_indices(n_dofs);
- const auto my_dofs_begin = dofs_per_proc*my_rank;
- const auto my_dofs_end = dofs_per_proc*(my_rank + 1);
+ IndexSet local_indices(n_dofs);
+ IndexSet ghost_indices(n_dofs);
+ const auto my_dofs_begin = dofs_per_proc * my_rank;
+ const auto my_dofs_end = dofs_per_proc * (my_rank + 1);
local_indices.add_range(my_dofs_begin, my_dofs_end);
local_indices.compress();
if (my_rank == 0)
ghost_indices.add_index(my_dofs_end % n_dofs);
}
- std::vector<IndexSet> local_blocks {local_indices, local_indices};
+ std::vector<IndexSet> local_blocks{local_indices, local_indices};
// for variety do not ghost the second component
- std::vector<IndexSet> ghost_blocks {ghost_indices, IndexSet()};
+ std::vector<IndexSet> ghost_blocks{ghost_indices, IndexSet()};
VEC vec(local_blocks, ghost_blocks, MPI_COMM_WORLD);
deallog << "type: " << Utilities::type_to_string(vec) << std::endl;
- deallog << "local size: " << vec.local_size() << std::endl;
+ deallog << "local size: " << vec.locally_owned_size() << std::endl;
deallog << "size: " << vec.size() << std::endl;
}
check_ghosted_parallel<TrilinosWrappers::MPI::Vector>();
// block vectors:
- check_ghosted_parallel_block<LinearAlgebra::distributed::BlockVector<double>>();
+ check_ghosted_parallel_block<
+ LinearAlgebra::distributed::BlockVector<double>>();
check_ghosted_parallel_block<PETScWrappers::MPI::BlockVector>();
check_ghosted_parallel_block<TrilinosWrappers::MPI::BlockVector>();
}
DEAL:0::size: 16
DEAL:0::type: dealii::TrilinosWrappers::MPI::Vector
DEAL:0::index set size: 4
-DEAL:0::local size: 5
+DEAL:0::local size: 4
DEAL:0::size: 16
DEAL:0::type: dealii::LinearAlgebra::distributed::BlockVector<double>
DEAL:0::local size: 8
DEAL:0::local size: 8
DEAL:0::size: 32
DEAL:0::type: dealii::TrilinosWrappers::MPI::BlockVector
-DEAL:0::local size: 9
+DEAL:0::local size: 8
DEAL:0::size: 32
DEAL:1::type: dealii::Vector<double>
DEAL:1::size: 16
DEAL:1::type: dealii::TrilinosWrappers::MPI::Vector
DEAL:1::index set size: 4
-DEAL:1::local size: 6
+DEAL:1::local size: 4
DEAL:1::size: 16
DEAL:1::type: dealii::LinearAlgebra::distributed::BlockVector<double>
DEAL:1::local size: 8
DEAL:1::local size: 8
DEAL:1::size: 32
DEAL:1::type: dealii::TrilinosWrappers::MPI::BlockVector
-DEAL:1::local size: 10
+DEAL:1::local size: 8
DEAL:1::size: 32
DEAL:2::size: 16
DEAL:2::type: dealii::TrilinosWrappers::MPI::Vector
DEAL:2::index set size: 4
-DEAL:2::local size: 6
+DEAL:2::local size: 4
DEAL:2::size: 16
DEAL:2::type: dealii::LinearAlgebra::distributed::BlockVector<double>
DEAL:2::local size: 8
DEAL:2::local size: 8
DEAL:2::size: 32
DEAL:2::type: dealii::TrilinosWrappers::MPI::BlockVector
-DEAL:2::local size: 10
+DEAL:2::local size: 8
DEAL:2::size: 32
DEAL:3::size: 16
DEAL:3::type: dealii::TrilinosWrappers::MPI::Vector
DEAL:3::index set size: 4
-DEAL:3::local size: 6
+DEAL:3::local size: 4
DEAL:3::size: 16
DEAL:3::type: dealii::LinearAlgebra::distributed::BlockVector<double>
DEAL:3::local size: 8
DEAL:3::local size: 8
DEAL:3::size: 32
DEAL:3::type: dealii::TrilinosWrappers::MPI::BlockVector
-DEAL:3::local size: 10
+DEAL:3::local size: 8
DEAL:3::size: 32