#endif
/**
- * Given the number of locally owned elements @p local_size,
- * create a 1:1 partitioning of the of elements across the MPI communicator @p comm.
- * The total size of elements is the sum of @p local_size across the MPI communicator.
- * Each process will store contiguous subset of indices, and the index set
- * on process p+1 starts at the index one larger than the last one stored on
- * process p.
+ * Given the number of locally owned elements @p locally_owned_size,
+ * create a 1:1 partitioning of the of elements across the MPI
+ * communicator @p comm. The total size of elements is the sum of @p
+ * locally_owned_size across the MPI communicator. Each process will
+ * store contiguous subset of indices, and the index set on process p+1
+ * starts at the index one larger than the last one stored on process p.
*/
std::vector<IndexSet>
create_ascending_partitioning(const MPI_Comm & comm,
- const IndexSet::size_type local_size);
+ const IndexSet::size_type locally_owned_size);
/**
* Given the total number of elements @p total_size, create an evenly
* the C++ standard library by returning iterators to the start and end
* of the <i>locally owned</i> elements of this vector.
*
- * It holds that end() - begin() == local_size().
+ * It holds that end() - begin() == locally_owned_size().
*
* @note For the CUDA memory space, the iterator points to memory on the
* device.
/**
* Read access to the data field specified by @p local_index. Locally
* owned indices can be accessed with indices
- * <code>[0,local_size)</code>, and ghost indices with indices
- * <code>[local_size,local_size+ n_ghost_entries]</code>.
+ * <code>[0,locally_owned_size)</code>, and ghost indices with indices
+ * <code>[locally_owned_size,locally_owned_size+ n_ghost_entries]</code>.
*
* Performance: Direct array access (fast).
*/
/**
* Read and write access to the data field specified by @p local_index.
* Locally owned indices can be accessed with indices
- * <code>[0,local_size)</code>, and ghost indices with indices
- * <code>[local_size,local_size+n_ghosts]</code>.
+ * <code>[0,locally_owned_size())</code>, and ghost indices with indices
+ * <code>[locally_owned_size(), locally_owned_size()+n_ghosts]</code>.
*
* Performance: Direct array access (fast).
*/
/**
* Constructor. Generate a block vector with @p n_blocks blocks, each of
* which is a parallel vector across @p communicator with @p block_size
- * elements of which @p local_size elements are stored on the present
- * process.
+ * elements of which @p locally_owned_size elements are stored on the
+ * present process.
*/
explicit BlockVector(const unsigned int n_blocks,
const MPI_Comm & communicator,
const size_type block_size,
- const size_type local_size);
+ const size_type locally_owned_size);
/**
* Copy constructor. Set all the properties of the parallel vector to
/**
* Reinitialize the BlockVector to contain @p n_blocks of size @p
- * block_size, each of which stores @p local_size elements locally. The
- * @p communicator argument denotes which MPI channel each of these
- * blocks shall communicate.
+ * block_size, each of which stores @p locally_owned_size elements
+ * locally. The @p communicator argument denotes which MPI channel each
+ * of these blocks shall communicate.
*
* If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
* zeros.
reinit(const unsigned int n_blocks,
const MPI_Comm & communicator,
const size_type block_size,
- const size_type local_size,
+ const size_type locally_owned_size,
const bool omit_zeroing_entries = false);
/**
* Reinitialize the BlockVector such that it contains
* <tt>block_sizes.size()</tt> blocks. Each block is reinitialized to
* dimension <tt>block_sizes[i]</tt>. Each of them stores
- * <tt>local_sizes[i]</tt> elements on the present process.
+ * <tt>locally_owned_sizes[i]</tt> elements on the present process.
*
* If the number of blocks is the same as before this function was
* called, all vectors remain the same and reinit() is called for each
void
reinit(const std::vector<size_type> &block_sizes,
const MPI_Comm & communicator,
- const std::vector<size_type> &local_sizes,
+ const std::vector<size_type> &locally_owned_sizes,
const bool omit_zeroing_entries = false);
/**
inline BlockVector::BlockVector(const unsigned int n_blocks,
const MPI_Comm & communicator,
const size_type block_size,
- const size_type local_size)
+ const size_type locally_owned_size)
{
- reinit(n_blocks, communicator, block_size, local_size);
+ reinit(n_blocks, communicator, block_size, locally_owned_size);
}
BlockVector::reinit(const unsigned int n_blocks,
const MPI_Comm & communicator,
const size_type block_size,
- const size_type local_size,
+ const size_type locally_owned_size,
const bool omit_zeroing_entries)
{
reinit(std::vector<size_type>(n_blocks, block_size),
communicator,
- std::vector<size_type>(n_blocks, local_size),
+ std::vector<size_type>(n_blocks, locally_owned_size),
omit_zeroing_entries);
}
inline void
BlockVector::reinit(const std::vector<size_type> &block_sizes,
const MPI_Comm & communicator,
- const std::vector<size_type> &local_sizes,
+ const std::vector<size_type> &locally_owned_sizes,
const bool omit_zeroing_entries)
{
this->block_indices.reinit(block_sizes);
for (unsigned int i = 0; i < this->n_blocks(); ++i)
this->components[i].reinit(communicator,
block_sizes[i],
- local_sizes[i],
+ locally_owned_sizes[i],
omit_zeroing_entries);
}
* Constructor. Set dimension to @p n and initialize all elements with
* zero.
*
- * @arg local_size denotes the size of the chunk that shall be stored on
- * the present process.
+ * @arg locally_owned_size denotes the size of the chunk that shall be
+ * stored on the present process.
*
* @arg communicator denotes the MPI communicator over which the
* different parts of the vector shall communicate
*/
explicit Vector(const MPI_Comm &communicator,
const size_type n,
- const size_type local_size);
-
+ const size_type locally_owned_size);
/**
* Copy-constructor from deal.II vectors. Sets the dimension to that of
* the given vector, and copies all elements.
*
- * @arg local_size denotes the size of the chunk that shall be stored on
- * the present process.
+ * @arg locally_owned_size denotes the size of the chunk that shall be
+ * stored on the present process.
*
* @arg communicator denotes the MPI communicator over which the
* different parts of the vector shall communicate
template <typename Number>
explicit Vector(const MPI_Comm & communicator,
const dealii::Vector<Number> &v,
- const size_type local_size);
+ const size_type locally_owned_size);
/**
* actually also reduces memory consumption, or if for efficiency the
* same amount of memory is used
*
- * @p local_size denotes how many of the @p N values shall be stored
- * locally on the present process. for less data.
+ * @p locally_owned_size denotes how many of the @p N values shall be
+ * stored locally on the present process. for less data.
*
* @p communicator denotes the MPI communicator henceforth to be used
* for this vector.
void
reinit(const MPI_Comm &communicator,
const size_type N,
- const size_type local_size,
+ const size_type locally_owned_size,
const bool omit_zeroing_entries = false);
/**
* The same applies as for the other @p reinit function.
*
* The elements of @p v are not copied, i.e. this function is the same
- * as calling <tt>reinit(v.size(), v.local_size(),
+ * as calling <tt>reinit(v.size(), v.locally_owned_size(),
* omit_zeroing_entries)</tt>.
*/
void
/**
* Create a vector of length @p n. For this class, we create a parallel
* vector. @p n denotes the total size of the vector to be created. @p
- * local_size denotes how many of these elements shall be stored
+ * locally_owned_size denotes how many of these elements shall be stored
* locally.
*/
virtual void
- create_vector(const size_type n, const size_type local_size);
+ create_vector(const size_type n, const size_type locally_owned_size);
/**
- * Create a vector of global length @p n, local size @p local_size and
- * with the specified ghost indices. Note that you need to call
- * update_ghost_values() before accessing those.
+ * Create a vector of global length @p n, local size @p
+ * locally_owned_size and with the specified ghost indices. Note that
+ * you need to call update_ghost_values() before accessing those.
*/
virtual void
create_vector(const size_type n,
- const size_type local_size,
+ const size_type locally_owned_size,
const IndexSet &ghostnodes);
template <typename number>
Vector::Vector(const MPI_Comm & communicator,
const dealii::Vector<number> &v,
- const size_type local_size)
+ const size_type locally_owned_size)
: communicator(communicator)
{
- Vector::create_vector(v.size(), local_size);
+ Vector::create_vector(v.size(), locally_owned_size);
*this = v;
}
* stored, the second the index of the one past the last one that is
* stored locally. If this is a sequential vector, then the result will be
* the pair (0,N), otherwise it will be a pair (i,i+n), where
- * <tt>n=local_size()</tt>.
+ * <tt>n=locally_owned_size()</tt>.
*/
std::pair<size_type, size_type>
local_range() const;
template <typename Number>
__global__ void
set_initial_guess_kernel(const types::global_dof_index offset,
- const unsigned int local_size,
+ const unsigned int locally_owned_size,
Number * values)
{
const unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
- if (index < local_size)
+ if (index < locally_owned_size)
values[index] = (index + offset) % 11;
}
std::vector<IndexSet>
create_ascending_partitioning(const MPI_Comm & comm,
- const IndexSet::size_type local_size)
+ const IndexSet::size_type locally_owned_size)
{
const unsigned int n_proc = n_mpi_processes(comm);
const std::vector<IndexSet::size_type> sizes =
- all_gather(comm, local_size);
+ all_gather(comm, locally_owned_size);
const auto total_size =
std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
std::vector<IndexSet>
create_ascending_partitioning(const MPI_Comm & /*comm*/,
- const IndexSet::size_type local_size)
+ const IndexSet::size_type locally_owned_size)
{
- return std::vector<IndexSet>(1, complete_index_set(local_size));
+ return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
}
IndexSet
&dof_handler.get_triangulation()))
{
#ifdef DEAL_II_WITH_MPI
- types::global_dof_index local_size =
+ types::global_dof_index locally_owned_size =
dof_handler.locally_owned_dofs().n_elements();
- MPI_Exscan(&local_size,
+ MPI_Exscan(&locally_owned_size,
&my_starting_index,
1,
DEAL_II_DOF_INDEX_MPI_TYPE,
Vector::Vector(const MPI_Comm &communicator,
const size_type n,
- const size_type local_size)
+ const size_type locally_owned_size)
: communicator(communicator)
{
- Vector::create_vector(n, local_size);
+ Vector::create_vector(n, locally_owned_size);
}
Vector::Vector(const MPI_Comm & communicator,
const VectorBase &v,
- const size_type local_size)
+ const size_type locally_owned_size)
: VectorBase(v)
, communicator(communicator)
{
//
// For the sake of backwards compatibility, preserve the behavior of the
// copy, but correct the ownership bug. Note that in both this (and the
- // original) implementation local_size is ultimately unused.
- (void)local_size;
+ // original) implementation locally_owned_size is ultimately unused.
+ (void)locally_owned_size;
}
void
- Vector::create_vector(const size_type n, const size_type local_size)
+ Vector::create_vector(const size_type n, const size_type locally_owned_size)
{
(void)n;
- AssertIndexRange(local_size, n + 1);
+ AssertIndexRange(locally_owned_size, n + 1);
ghosted = false;
const PetscErrorCode ierr =
- VecCreateMPI(communicator, local_size, PETSC_DETERMINE, &vector);
+ VecCreateMPI(communicator, locally_owned_size, PETSC_DETERMINE, &vector);
AssertThrow(ierr == 0, ExcPETScError(ierr));
Assert(size() == n, ExcDimensionMismatch(size(), n));
void
Vector::create_vector(const size_type n,
- const size_type local_size,
+ const size_type locally_owned_size,
const IndexSet &ghostnodes)
{
(void)n;
- AssertIndexRange(local_size, n + 1);
+ AssertIndexRange(locally_owned_size, n + 1);
ghosted = true;
ghost_indices = ghostnodes;
nullptr);
PetscErrorCode ierr = VecCreateGhost(communicator,
- local_size,
+ locally_owned_size,
PETSC_DETERMINE,
ghostindices.size(),
ptr,
ierr = VecGetOwnershipRange(vector, &begin, &end);
AssertThrow(ierr == 0, ExcPETScError(ierr));
- AssertDimension(local_size, static_cast<size_type>(end - begin));
+ AssertDimension(locally_owned_size, static_cast<size_type>(end - begin));
Vec l;
ierr = VecGhostGetLocalForm(vector, &l);
// see https://code.google.com/p/dealii/issues/detail?id=233
# if DEAL_II_PETSC_VERSION_LT(3, 6, 0)
PETScWrappers::MPI::Vector zero;
- zero.reinit(communicator, this->size(), local_size);
+ zero.reinit(communicator, this->size(), locally_owned_size);
*this = zero;
# endif
}