]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Rename local_size -> locally_owned_size in docs
authorDavid Wells <drwells@email.unc.edu>
Mon, 25 May 2020 16:14:14 +0000 (12:14 -0400)
committerDaniel Arndt <arndtd@ornl.gov>
Thu, 11 Feb 2021 16:48:50 +0000 (11:48 -0500)
include/deal.II/base/mpi.h
include/deal.II/lac/la_parallel_vector.h
include/deal.II/lac/petsc_block_vector.h
include/deal.II/lac/petsc_vector.h
include/deal.II/lac/petsc_vector_base.h
include/deal.II/lac/precondition.h
source/base/mpi.cc
source/dofs/dof_renumbering.cc
source/lac/petsc_parallel_vector.cc

index b138d3433fc915c5a4bd0be8cba053268c62c2fd..00df4cbebf6e603dd97cc6cc995a4175805218ad 100644 (file)
@@ -445,16 +445,16 @@ namespace Utilities
 #endif
 
     /**
-     * Given the number of locally owned elements @p local_size,
-     * create a 1:1 partitioning of the of elements across the MPI communicator @p comm.
-     * The total size of elements is the sum of @p local_size across the MPI communicator.
-     * Each process will store contiguous subset of indices, and the index set
-     * on process p+1 starts at the index one larger than the last one stored on
-     * process p.
+     * Given the number of locally owned elements @p locally_owned_size,
+     * create a 1:1 partitioning of the of elements across the MPI
+     * communicator @p comm. The total size of elements is the sum of @p
+     * locally_owned_size across the MPI communicator.  Each process will
+     * store contiguous subset of indices, and the index set on process p+1
+     * starts at the index one larger than the last one stored on process p.
      */
     std::vector<IndexSet>
     create_ascending_partitioning(const MPI_Comm &          comm,
-                                  const IndexSet::size_type local_size);
+                                  const IndexSet::size_type locally_owned_size);
 
     /**
      * Given the total number of elements @p total_size, create an evenly
index 107b9a2e79581b4ca09cce9ebecb933338b5eb58..a9f50291b548e8734fae54232e5c4c1686647441 100644 (file)
@@ -950,7 +950,7 @@ namespace LinearAlgebra
        * the C++ standard library by returning iterators to the start and end
        * of the <i>locally owned</i> elements of this vector.
        *
-       * It holds that end() - begin() == local_size().
+       * It holds that end() - begin() == locally_owned_size().
        *
        * @note For the CUDA memory space, the iterator points to memory on the
        * device.
@@ -1032,8 +1032,8 @@ namespace LinearAlgebra
       /**
        * Read access to the data field specified by @p local_index. Locally
        * owned indices can be accessed with indices
-       * <code>[0,local_size)</code>, and ghost indices with indices
-       * <code>[local_size,local_size+ n_ghost_entries]</code>.
+       * <code>[0,locally_owned_size)</code>, and ghost indices with indices
+       * <code>[locally_owned_size,locally_owned_size+ n_ghost_entries]</code>.
        *
        * Performance: Direct array access (fast).
        */
@@ -1043,8 +1043,8 @@ namespace LinearAlgebra
       /**
        * Read and write access to the data field specified by @p local_index.
        * Locally owned indices can be accessed with indices
-       * <code>[0,local_size)</code>, and ghost indices with indices
-       * <code>[local_size,local_size+n_ghosts]</code>.
+       * <code>[0,locally_owned_size())</code>, and ghost indices with indices
+       * <code>[locally_owned_size(), locally_owned_size()+n_ghosts]</code>.
        *
        * Performance: Direct array access (fast).
        */
index 9689d093c005f808e56431a71330777745bebe08..b92f5c3e5dba8104ad32645b96f1a0bf99a170c7 100644 (file)
@@ -90,13 +90,13 @@ namespace PETScWrappers
       /**
        * Constructor. Generate a block vector with @p n_blocks blocks, each of
        * which is a parallel vector across @p communicator with @p block_size
-       * elements of which @p local_size elements are stored on the present
-       * process.
+       * elements of which @p locally_owned_size elements are stored on the
+       * present process.
        */
       explicit BlockVector(const unsigned int n_blocks,
                            const MPI_Comm &   communicator,
                            const size_type    block_size,
-                           const size_type    local_size);
+                           const size_type    locally_owned_size);
 
       /**
        * Copy constructor. Set all the properties of the parallel vector to
@@ -151,9 +151,9 @@ namespace PETScWrappers
 
       /**
        * Reinitialize the BlockVector to contain @p n_blocks of size @p
-       * block_size, each of which stores @p local_size elements locally. The
-       * @p communicator argument denotes which MPI channel each of these
-       * blocks shall communicate.
+       * block_size, each of which stores @p locally_owned_size elements
+       * locally. The @p communicator argument denotes which MPI channel each
+       * of these blocks shall communicate.
        *
        * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
        * zeros.
@@ -162,14 +162,14 @@ namespace PETScWrappers
       reinit(const unsigned int n_blocks,
              const MPI_Comm &   communicator,
              const size_type    block_size,
-             const size_type    local_size,
+             const size_type    locally_owned_size,
              const bool         omit_zeroing_entries = false);
 
       /**
        * Reinitialize the BlockVector such that it contains
        * <tt>block_sizes.size()</tt> blocks. Each block is reinitialized to
        * dimension <tt>block_sizes[i]</tt>. Each of them stores
-       * <tt>local_sizes[i]</tt> elements on the present process.
+       * <tt>locally_owned_sizes[i]</tt> elements on the present process.
        *
        * If the number of blocks is the same as before this function was
        * called, all vectors remain the same and reinit() is called for each
@@ -188,7 +188,7 @@ namespace PETScWrappers
       void
       reinit(const std::vector<size_type> &block_sizes,
              const MPI_Comm &              communicator,
-             const std::vector<size_type> &local_sizes,
+             const std::vector<size_type> &locally_owned_sizes,
              const bool                    omit_zeroing_entries = false);
 
       /**
@@ -292,9 +292,9 @@ namespace PETScWrappers
     inline BlockVector::BlockVector(const unsigned int n_blocks,
                                     const MPI_Comm &   communicator,
                                     const size_type    block_size,
-                                    const size_type    local_size)
+                                    const size_type    locally_owned_size)
     {
-      reinit(n_blocks, communicator, block_size, local_size);
+      reinit(n_blocks, communicator, block_size, locally_owned_size);
     }
 
 
@@ -365,12 +365,12 @@ namespace PETScWrappers
     BlockVector::reinit(const unsigned int n_blocks,
                         const MPI_Comm &   communicator,
                         const size_type    block_size,
-                        const size_type    local_size,
+                        const size_type    locally_owned_size,
                         const bool         omit_zeroing_entries)
     {
       reinit(std::vector<size_type>(n_blocks, block_size),
              communicator,
-             std::vector<size_type>(n_blocks, local_size),
+             std::vector<size_type>(n_blocks, locally_owned_size),
              omit_zeroing_entries);
     }
 
@@ -379,7 +379,7 @@ namespace PETScWrappers
     inline void
     BlockVector::reinit(const std::vector<size_type> &block_sizes,
                         const MPI_Comm &              communicator,
-                        const std::vector<size_type> &local_sizes,
+                        const std::vector<size_type> &locally_owned_sizes,
                         const bool                    omit_zeroing_entries)
     {
       this->block_indices.reinit(block_sizes);
@@ -389,7 +389,7 @@ namespace PETScWrappers
       for (unsigned int i = 0; i < this->n_blocks(); ++i)
         this->components[i].reinit(communicator,
                                    block_sizes[i],
-                                   local_sizes[i],
+                                   locally_owned_sizes[i],
                                    omit_zeroing_entries);
     }
 
index 0b85363bf92c4eff06827003636c916a38e1f004..25ccf1a8faf145e4ebd16a919a81ab2d54dee9ae 100644 (file)
@@ -170,8 +170,8 @@ namespace PETScWrappers
        * Constructor. Set dimension to @p n and initialize all elements with
        * zero.
        *
-       * @arg local_size denotes the size of the chunk that shall be stored on
-       * the present process.
+       * @arg locally_owned_size denotes the size of the chunk that shall be
+       * stored on the present process.
        *
        * @arg communicator denotes the MPI communicator over which the
        * different parts of the vector shall communicate
@@ -184,15 +184,14 @@ namespace PETScWrappers
        */
       explicit Vector(const MPI_Comm &communicator,
                       const size_type n,
-                      const size_type local_size);
-
+                      const size_type locally_owned_size);
 
       /**
        * Copy-constructor from deal.II vectors. Sets the dimension to that of
        * the given vector, and copies all elements.
        *
-       * @arg local_size denotes the size of the chunk that shall be stored on
-       * the present process.
+       * @arg locally_owned_size denotes the size of the chunk that shall be
+       * stored on the present process.
        *
        * @arg communicator denotes the MPI communicator over which the
        * different parts of the vector shall communicate
@@ -200,7 +199,7 @@ namespace PETScWrappers
       template <typename Number>
       explicit Vector(const MPI_Comm &              communicator,
                       const dealii::Vector<Number> &v,
-                      const size_type               local_size);
+                      const size_type               locally_owned_size);
 
 
       /**
@@ -308,8 +307,8 @@ namespace PETScWrappers
        * actually also reduces memory consumption, or if for efficiency the
        * same amount of memory is used
        *
-       * @p local_size denotes how many of the @p N values shall be stored
-       * locally on the present process. for less data.
+       * @p locally_owned_size denotes how many of the @p N values shall be
+       * stored locally on the present process. for less data.
        *
        * @p communicator denotes the MPI communicator henceforth to be used
        * for this vector.
@@ -320,7 +319,7 @@ namespace PETScWrappers
       void
       reinit(const MPI_Comm &communicator,
              const size_type N,
-             const size_type local_size,
+             const size_type locally_owned_size,
              const bool      omit_zeroing_entries = false);
 
       /**
@@ -329,7 +328,7 @@ namespace PETScWrappers
        * The same applies as for the other @p reinit function.
        *
        * The elements of @p v are not copied, i.e. this function is the same
-       * as calling <tt>reinit(v.size(), v.local_size(),
+       * as calling <tt>reinit(v.size(), v.locally_owned_size(),
        * omit_zeroing_entries)</tt>.
        */
       void
@@ -394,22 +393,22 @@ namespace PETScWrappers
       /**
        * Create a vector of length @p n. For this class, we create a parallel
        * vector. @p n denotes the total size of the vector to be created. @p
-       * local_size denotes how many of these elements shall be stored
+       * locally_owned_size denotes how many of these elements shall be stored
        * locally.
        */
       virtual void
-      create_vector(const size_type n, const size_type local_size);
+      create_vector(const size_type n, const size_type locally_owned_size);
 
 
 
       /**
-       * Create a vector of global length @p n, local size @p local_size and
-       * with the specified ghost indices. Note that you need to call
-       * update_ghost_values() before accessing those.
+       * Create a vector of global length @p n, local size @p
+       * locally_owned_size and with the specified ghost indices. Note that
+       * you need to call update_ghost_values() before accessing those.
        */
       virtual void
       create_vector(const size_type n,
-                    const size_type local_size,
+                    const size_type locally_owned_size,
                     const IndexSet &ghostnodes);
 
 
@@ -443,10 +442,10 @@ namespace PETScWrappers
     template <typename number>
     Vector::Vector(const MPI_Comm &              communicator,
                    const dealii::Vector<number> &v,
-                   const size_type               local_size)
+                   const size_type               locally_owned_size)
       : communicator(communicator)
     {
-      Vector::create_vector(v.size(), local_size);
+      Vector::create_vector(v.size(), locally_owned_size);
 
       *this = v;
     }
index 5ec8af78887f14397bbdec08b602d62752ccde0d..6b7b6b12a164a80ff9d6cb9bd2f42b20c1487445 100644 (file)
@@ -381,7 +381,7 @@ namespace PETScWrappers
      * stored, the second the index of the one past the last one that is
      * stored locally. If this is a sequential vector, then the result will be
      * the pair (0,N), otherwise it will be a pair (i,i+n), where
-     * <tt>n=local_size()</tt>.
+     * <tt>n=locally_owned_size()</tt>.
      */
     std::pair<size_type, size_type>
     local_range() const;
index cebff5da61e848691f5b3a9fc25da3dbe507a362..d620a1e259bef26eca2d93b96f4ce2e7d1c69424 100644 (file)
@@ -2123,12 +2123,12 @@ namespace internal
     template <typename Number>
     __global__ void
     set_initial_guess_kernel(const types::global_dof_index offset,
-                             const unsigned int            local_size,
+                             const unsigned int            locally_owned_size,
                              Number *                      values)
 
     {
       const unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
-      if (index < local_size)
+      if (index < locally_owned_size)
         values[index] = (index + offset) % 11;
     }
 
index 03e9d552a6c30a7ddf4ae3d4982d96d33f13d279..800fb89ad2255fc290ce6f4c7a5f025fad749c85 100644 (file)
@@ -260,11 +260,11 @@ namespace Utilities
 
     std::vector<IndexSet>
     create_ascending_partitioning(const MPI_Comm &          comm,
-                                  const IndexSet::size_type local_size)
+                                  const IndexSet::size_type locally_owned_size)
     {
       const unsigned int                     n_proc = n_mpi_processes(comm);
       const std::vector<IndexSet::size_type> sizes =
-        all_gather(comm, local_size);
+        all_gather(comm, locally_owned_size);
       const auto total_size =
         std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
 
@@ -716,9 +716,9 @@ namespace Utilities
 
     std::vector<IndexSet>
     create_ascending_partitioning(const MPI_Comm & /*comm*/,
-                                  const IndexSet::size_type local_size)
+                                  const IndexSet::size_type locally_owned_size)
     {
-      return std::vector<IndexSet>(1, complete_index_set(local_size));
+      return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
     }
 
     IndexSet
index 9ea1a95e33b1e04e558584f4251d0ed6bfb0dde1..f572a09eee643429949ac37d2feff5e008d40d70 100644 (file)
@@ -1382,9 +1382,9 @@ namespace DoFRenumbering
             &dof_handler.get_triangulation()))
       {
 #ifdef DEAL_II_WITH_MPI
-        types::global_dof_index local_size =
+        types::global_dof_index locally_owned_size =
           dof_handler.locally_owned_dofs().n_elements();
-        MPI_Exscan(&local_size,
+        MPI_Exscan(&locally_owned_size,
                    &my_starting_index,
                    1,
                    DEAL_II_DOF_INDEX_MPI_TYPE,
index 0651a33839ee0cf07f39df34ecbe639e2221d6a1..31539c03da5644a34fa22a2c0fada849838f5a40 100644 (file)
@@ -41,17 +41,17 @@ namespace PETScWrappers
 
     Vector::Vector(const MPI_Comm &communicator,
                    const size_type n,
-                   const size_type local_size)
+                   const size_type locally_owned_size)
       : communicator(communicator)
     {
-      Vector::create_vector(n, local_size);
+      Vector::create_vector(n, locally_owned_size);
     }
 
 
 
     Vector::Vector(const MPI_Comm &  communicator,
                    const VectorBase &v,
-                   const size_type   local_size)
+                   const size_type   locally_owned_size)
       : VectorBase(v)
       , communicator(communicator)
     {
@@ -63,8 +63,8 @@ namespace PETScWrappers
       //
       // For the sake of backwards compatibility, preserve the behavior of the
       // copy, but correct the ownership bug. Note that in both this (and the
-      // original) implementation local_size is ultimately unused.
-      (void)local_size;
+      // original) implementation locally_owned_size is ultimately unused.
+      (void)locally_owned_size;
     }
 
 
@@ -251,14 +251,14 @@ namespace PETScWrappers
 
 
     void
-    Vector::create_vector(const size_type n, const size_type local_size)
+    Vector::create_vector(const size_type n, const size_type locally_owned_size)
     {
       (void)n;
-      AssertIndexRange(local_size, n + 1);
+      AssertIndexRange(locally_owned_size, n + 1);
       ghosted = false;
 
       const PetscErrorCode ierr =
-        VecCreateMPI(communicator, local_size, PETSC_DETERMINE, &vector);
+        VecCreateMPI(communicator, locally_owned_size, PETSC_DETERMINE, &vector);
       AssertThrow(ierr == 0, ExcPETScError(ierr));
 
       Assert(size() == n, ExcDimensionMismatch(size(), n));
@@ -268,11 +268,11 @@ namespace PETScWrappers
 
     void
     Vector::create_vector(const size_type n,
-                          const size_type local_size,
+                          const size_type locally_owned_size,
                           const IndexSet &ghostnodes)
     {
       (void)n;
-      AssertIndexRange(local_size, n + 1);
+      AssertIndexRange(locally_owned_size, n + 1);
       ghosted       = true;
       ghost_indices = ghostnodes;
 
@@ -285,7 +285,7 @@ namespace PETScWrappers
            nullptr);
 
       PetscErrorCode ierr = VecCreateGhost(communicator,
-                                           local_size,
+                                           locally_owned_size,
                                            PETSC_DETERMINE,
                                            ghostindices.size(),
                                            ptr,
@@ -302,7 +302,7 @@ namespace PETScWrappers
         ierr = VecGetOwnershipRange(vector, &begin, &end);
         AssertThrow(ierr == 0, ExcPETScError(ierr));
 
-        AssertDimension(local_size, static_cast<size_type>(end - begin));
+        AssertDimension(locally_owned_size, static_cast<size_type>(end - begin));
 
         Vec l;
         ierr = VecGhostGetLocalForm(vector, &l);
@@ -329,7 +329,7 @@ namespace PETScWrappers
       // see https://code.google.com/p/dealii/issues/detail?id=233
 #  if DEAL_II_PETSC_VERSION_LT(3, 6, 0)
       PETScWrappers::MPI::Vector zero;
-      zero.reinit(communicator, this->size(), local_size);
+      zero.reinit(communicator, this->size(), locally_owned_size);
       *this = zero;
 #  endif
     }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.