From 55f1a635087e4fec9fa98d3ec25ac2530a651087 Mon Sep 17 00:00:00 2001 From: David Wells Date: Mon, 25 May 2020 12:14:14 -0400 Subject: [PATCH] Rename local_size -> locally_owned_size in docs --- include/deal.II/base/mpi.h | 14 ++++----- include/deal.II/lac/la_parallel_vector.h | 10 +++---- include/deal.II/lac/petsc_block_vector.h | 30 +++++++++---------- include/deal.II/lac/petsc_vector.h | 37 ++++++++++++------------ include/deal.II/lac/petsc_vector_base.h | 2 +- include/deal.II/lac/precondition.h | 4 +-- source/base/mpi.cc | 8 ++--- source/dofs/dof_renumbering.cc | 4 +-- source/lac/petsc_parallel_vector.cc | 26 ++++++++--------- 9 files changed, 67 insertions(+), 68 deletions(-) diff --git a/include/deal.II/base/mpi.h b/include/deal.II/base/mpi.h index b138d3433f..00df4cbebf 100644 --- a/include/deal.II/base/mpi.h +++ b/include/deal.II/base/mpi.h @@ -445,16 +445,16 @@ namespace Utilities #endif /** - * Given the number of locally owned elements @p local_size, - * create a 1:1 partitioning of the of elements across the MPI communicator @p comm. - * The total size of elements is the sum of @p local_size across the MPI communicator. - * Each process will store contiguous subset of indices, and the index set - * on process p+1 starts at the index one larger than the last one stored on - * process p. + * Given the number of locally owned elements @p locally_owned_size, + * create a 1:1 partitioning of the of elements across the MPI + * communicator @p comm. The total size of elements is the sum of @p + * locally_owned_size across the MPI communicator. Each process will + * store contiguous subset of indices, and the index set on process p+1 + * starts at the index one larger than the last one stored on process p. */ std::vector create_ascending_partitioning(const MPI_Comm & comm, - const IndexSet::size_type local_size); + const IndexSet::size_type locally_owned_size); /** * Given the total number of elements @p total_size, create an evenly diff --git a/include/deal.II/lac/la_parallel_vector.h b/include/deal.II/lac/la_parallel_vector.h index 107b9a2e79..a9f50291b5 100644 --- a/include/deal.II/lac/la_parallel_vector.h +++ b/include/deal.II/lac/la_parallel_vector.h @@ -950,7 +950,7 @@ namespace LinearAlgebra * the C++ standard library by returning iterators to the start and end * of the locally owned elements of this vector. * - * It holds that end() - begin() == local_size(). + * It holds that end() - begin() == locally_owned_size(). * * @note For the CUDA memory space, the iterator points to memory on the * device. @@ -1032,8 +1032,8 @@ namespace LinearAlgebra /** * Read access to the data field specified by @p local_index. Locally * owned indices can be accessed with indices - * [0,local_size), and ghost indices with indices - * [local_size,local_size+ n_ghost_entries]. + * [0,locally_owned_size), and ghost indices with indices + * [locally_owned_size,locally_owned_size+ n_ghost_entries]. * * Performance: Direct array access (fast). */ @@ -1043,8 +1043,8 @@ namespace LinearAlgebra /** * Read and write access to the data field specified by @p local_index. * Locally owned indices can be accessed with indices - * [0,local_size), and ghost indices with indices - * [local_size,local_size+n_ghosts]. + * [0,locally_owned_size()), and ghost indices with indices + * [locally_owned_size(), locally_owned_size()+n_ghosts]. * * Performance: Direct array access (fast). */ diff --git a/include/deal.II/lac/petsc_block_vector.h b/include/deal.II/lac/petsc_block_vector.h index 9689d093c0..b92f5c3e5d 100644 --- a/include/deal.II/lac/petsc_block_vector.h +++ b/include/deal.II/lac/petsc_block_vector.h @@ -90,13 +90,13 @@ namespace PETScWrappers /** * Constructor. Generate a block vector with @p n_blocks blocks, each of * which is a parallel vector across @p communicator with @p block_size - * elements of which @p local_size elements are stored on the present - * process. + * elements of which @p locally_owned_size elements are stored on the + * present process. */ explicit BlockVector(const unsigned int n_blocks, const MPI_Comm & communicator, const size_type block_size, - const size_type local_size); + const size_type locally_owned_size); /** * Copy constructor. Set all the properties of the parallel vector to @@ -151,9 +151,9 @@ namespace PETScWrappers /** * Reinitialize the BlockVector to contain @p n_blocks of size @p - * block_size, each of which stores @p local_size elements locally. The - * @p communicator argument denotes which MPI channel each of these - * blocks shall communicate. + * block_size, each of which stores @p locally_owned_size elements + * locally. The @p communicator argument denotes which MPI channel each + * of these blocks shall communicate. * * If omit_zeroing_entries==false, the vector is filled with * zeros. @@ -162,14 +162,14 @@ namespace PETScWrappers reinit(const unsigned int n_blocks, const MPI_Comm & communicator, const size_type block_size, - const size_type local_size, + const size_type locally_owned_size, const bool omit_zeroing_entries = false); /** * Reinitialize the BlockVector such that it contains * block_sizes.size() blocks. Each block is reinitialized to * dimension block_sizes[i]. Each of them stores - * local_sizes[i] elements on the present process. + * locally_owned_sizes[i] elements on the present process. * * If the number of blocks is the same as before this function was * called, all vectors remain the same and reinit() is called for each @@ -188,7 +188,7 @@ namespace PETScWrappers void reinit(const std::vector &block_sizes, const MPI_Comm & communicator, - const std::vector &local_sizes, + const std::vector &locally_owned_sizes, const bool omit_zeroing_entries = false); /** @@ -292,9 +292,9 @@ namespace PETScWrappers inline BlockVector::BlockVector(const unsigned int n_blocks, const MPI_Comm & communicator, const size_type block_size, - const size_type local_size) + const size_type locally_owned_size) { - reinit(n_blocks, communicator, block_size, local_size); + reinit(n_blocks, communicator, block_size, locally_owned_size); } @@ -365,12 +365,12 @@ namespace PETScWrappers BlockVector::reinit(const unsigned int n_blocks, const MPI_Comm & communicator, const size_type block_size, - const size_type local_size, + const size_type locally_owned_size, const bool omit_zeroing_entries) { reinit(std::vector(n_blocks, block_size), communicator, - std::vector(n_blocks, local_size), + std::vector(n_blocks, locally_owned_size), omit_zeroing_entries); } @@ -379,7 +379,7 @@ namespace PETScWrappers inline void BlockVector::reinit(const std::vector &block_sizes, const MPI_Comm & communicator, - const std::vector &local_sizes, + const std::vector &locally_owned_sizes, const bool omit_zeroing_entries) { this->block_indices.reinit(block_sizes); @@ -389,7 +389,7 @@ namespace PETScWrappers for (unsigned int i = 0; i < this->n_blocks(); ++i) this->components[i].reinit(communicator, block_sizes[i], - local_sizes[i], + locally_owned_sizes[i], omit_zeroing_entries); } diff --git a/include/deal.II/lac/petsc_vector.h b/include/deal.II/lac/petsc_vector.h index 0b85363bf9..25ccf1a8fa 100644 --- a/include/deal.II/lac/petsc_vector.h +++ b/include/deal.II/lac/petsc_vector.h @@ -170,8 +170,8 @@ namespace PETScWrappers * Constructor. Set dimension to @p n and initialize all elements with * zero. * - * @arg local_size denotes the size of the chunk that shall be stored on - * the present process. + * @arg locally_owned_size denotes the size of the chunk that shall be + * stored on the present process. * * @arg communicator denotes the MPI communicator over which the * different parts of the vector shall communicate @@ -184,15 +184,14 @@ namespace PETScWrappers */ explicit Vector(const MPI_Comm &communicator, const size_type n, - const size_type local_size); - + const size_type locally_owned_size); /** * Copy-constructor from deal.II vectors. Sets the dimension to that of * the given vector, and copies all elements. * - * @arg local_size denotes the size of the chunk that shall be stored on - * the present process. + * @arg locally_owned_size denotes the size of the chunk that shall be + * stored on the present process. * * @arg communicator denotes the MPI communicator over which the * different parts of the vector shall communicate @@ -200,7 +199,7 @@ namespace PETScWrappers template explicit Vector(const MPI_Comm & communicator, const dealii::Vector &v, - const size_type local_size); + const size_type locally_owned_size); /** @@ -308,8 +307,8 @@ namespace PETScWrappers * actually also reduces memory consumption, or if for efficiency the * same amount of memory is used * - * @p local_size denotes how many of the @p N values shall be stored - * locally on the present process. for less data. + * @p locally_owned_size denotes how many of the @p N values shall be + * stored locally on the present process. for less data. * * @p communicator denotes the MPI communicator henceforth to be used * for this vector. @@ -320,7 +319,7 @@ namespace PETScWrappers void reinit(const MPI_Comm &communicator, const size_type N, - const size_type local_size, + const size_type locally_owned_size, const bool omit_zeroing_entries = false); /** @@ -329,7 +328,7 @@ namespace PETScWrappers * The same applies as for the other @p reinit function. * * The elements of @p v are not copied, i.e. this function is the same - * as calling reinit(v.size(), v.local_size(), + * as calling reinit(v.size(), v.locally_owned_size(), * omit_zeroing_entries). */ void @@ -394,22 +393,22 @@ namespace PETScWrappers /** * Create a vector of length @p n. For this class, we create a parallel * vector. @p n denotes the total size of the vector to be created. @p - * local_size denotes how many of these elements shall be stored + * locally_owned_size denotes how many of these elements shall be stored * locally. */ virtual void - create_vector(const size_type n, const size_type local_size); + create_vector(const size_type n, const size_type locally_owned_size); /** - * Create a vector of global length @p n, local size @p local_size and - * with the specified ghost indices. Note that you need to call - * update_ghost_values() before accessing those. + * Create a vector of global length @p n, local size @p + * locally_owned_size and with the specified ghost indices. Note that + * you need to call update_ghost_values() before accessing those. */ virtual void create_vector(const size_type n, - const size_type local_size, + const size_type locally_owned_size, const IndexSet &ghostnodes); @@ -443,10 +442,10 @@ namespace PETScWrappers template Vector::Vector(const MPI_Comm & communicator, const dealii::Vector &v, - const size_type local_size) + const size_type locally_owned_size) : communicator(communicator) { - Vector::create_vector(v.size(), local_size); + Vector::create_vector(v.size(), locally_owned_size); *this = v; } diff --git a/include/deal.II/lac/petsc_vector_base.h b/include/deal.II/lac/petsc_vector_base.h index 5ec8af7888..6b7b6b12a1 100644 --- a/include/deal.II/lac/petsc_vector_base.h +++ b/include/deal.II/lac/petsc_vector_base.h @@ -381,7 +381,7 @@ namespace PETScWrappers * stored, the second the index of the one past the last one that is * stored locally. If this is a sequential vector, then the result will be * the pair (0,N), otherwise it will be a pair (i,i+n), where - * n=local_size(). + * n=locally_owned_size(). */ std::pair local_range() const; diff --git a/include/deal.II/lac/precondition.h b/include/deal.II/lac/precondition.h index cebff5da61..d620a1e259 100644 --- a/include/deal.II/lac/precondition.h +++ b/include/deal.II/lac/precondition.h @@ -2123,12 +2123,12 @@ namespace internal template __global__ void set_initial_guess_kernel(const types::global_dof_index offset, - const unsigned int local_size, + const unsigned int locally_owned_size, Number * values) { const unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; - if (index < local_size) + if (index < locally_owned_size) values[index] = (index + offset) % 11; } diff --git a/source/base/mpi.cc b/source/base/mpi.cc index 03e9d552a6..800fb89ad2 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -260,11 +260,11 @@ namespace Utilities std::vector create_ascending_partitioning(const MPI_Comm & comm, - const IndexSet::size_type local_size) + const IndexSet::size_type locally_owned_size) { const unsigned int n_proc = n_mpi_processes(comm); const std::vector sizes = - all_gather(comm, local_size); + all_gather(comm, locally_owned_size); const auto total_size = std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0)); @@ -716,9 +716,9 @@ namespace Utilities std::vector create_ascending_partitioning(const MPI_Comm & /*comm*/, - const IndexSet::size_type local_size) + const IndexSet::size_type locally_owned_size) { - return std::vector(1, complete_index_set(local_size)); + return std::vector(1, complete_index_set(locally_owned_size)); } IndexSet diff --git a/source/dofs/dof_renumbering.cc b/source/dofs/dof_renumbering.cc index 9ea1a95e33..f572a09eee 100644 --- a/source/dofs/dof_renumbering.cc +++ b/source/dofs/dof_renumbering.cc @@ -1382,9 +1382,9 @@ namespace DoFRenumbering &dof_handler.get_triangulation())) { #ifdef DEAL_II_WITH_MPI - types::global_dof_index local_size = + types::global_dof_index locally_owned_size = dof_handler.locally_owned_dofs().n_elements(); - MPI_Exscan(&local_size, + MPI_Exscan(&locally_owned_size, &my_starting_index, 1, DEAL_II_DOF_INDEX_MPI_TYPE, diff --git a/source/lac/petsc_parallel_vector.cc b/source/lac/petsc_parallel_vector.cc index 0651a33839..31539c03da 100644 --- a/source/lac/petsc_parallel_vector.cc +++ b/source/lac/petsc_parallel_vector.cc @@ -41,17 +41,17 @@ namespace PETScWrappers Vector::Vector(const MPI_Comm &communicator, const size_type n, - const size_type local_size) + const size_type locally_owned_size) : communicator(communicator) { - Vector::create_vector(n, local_size); + Vector::create_vector(n, locally_owned_size); } Vector::Vector(const MPI_Comm & communicator, const VectorBase &v, - const size_type local_size) + const size_type locally_owned_size) : VectorBase(v) , communicator(communicator) { @@ -63,8 +63,8 @@ namespace PETScWrappers // // For the sake of backwards compatibility, preserve the behavior of the // copy, but correct the ownership bug. Note that in both this (and the - // original) implementation local_size is ultimately unused. - (void)local_size; + // original) implementation locally_owned_size is ultimately unused. + (void)locally_owned_size; } @@ -251,14 +251,14 @@ namespace PETScWrappers void - Vector::create_vector(const size_type n, const size_type local_size) + Vector::create_vector(const size_type n, const size_type locally_owned_size) { (void)n; - AssertIndexRange(local_size, n + 1); + AssertIndexRange(locally_owned_size, n + 1); ghosted = false; const PetscErrorCode ierr = - VecCreateMPI(communicator, local_size, PETSC_DETERMINE, &vector); + VecCreateMPI(communicator, locally_owned_size, PETSC_DETERMINE, &vector); AssertThrow(ierr == 0, ExcPETScError(ierr)); Assert(size() == n, ExcDimensionMismatch(size(), n)); @@ -268,11 +268,11 @@ namespace PETScWrappers void Vector::create_vector(const size_type n, - const size_type local_size, + const size_type locally_owned_size, const IndexSet &ghostnodes) { (void)n; - AssertIndexRange(local_size, n + 1); + AssertIndexRange(locally_owned_size, n + 1); ghosted = true; ghost_indices = ghostnodes; @@ -285,7 +285,7 @@ namespace PETScWrappers nullptr); PetscErrorCode ierr = VecCreateGhost(communicator, - local_size, + locally_owned_size, PETSC_DETERMINE, ghostindices.size(), ptr, @@ -302,7 +302,7 @@ namespace PETScWrappers ierr = VecGetOwnershipRange(vector, &begin, &end); AssertThrow(ierr == 0, ExcPETScError(ierr)); - AssertDimension(local_size, static_cast(end - begin)); + AssertDimension(locally_owned_size, static_cast(end - begin)); Vec l; ierr = VecGhostGetLocalForm(vector, &l); @@ -329,7 +329,7 @@ namespace PETScWrappers // see https://code.google.com/p/dealii/issues/detail?id=233 # if DEAL_II_PETSC_VERSION_LT(3, 6, 0) PETScWrappers::MPI::Vector zero; - zero.reinit(communicator, this->size(), local_size); + zero.reinit(communicator, this->size(), locally_owned_size); *this = zero; # endif } -- 2.39.5