std::size_t
size() const;
+ /**
+ * Return local dimension of the vector. This is the sum of the local
+ * dimensions (i.e., values stored on the current processor) of all
+ * components.
+ */
+ std::size_t
+ local_size() const;
+
/**
* Return an index set that describes which elements of this vector are
* owned by the current processor. Note that this index set does not include
+template <class VectorType>
+inline std::size_t
+BlockVectorBase<VectorType>::local_size() const
+{
+ std::size_t local_size = 0;
+ for (unsigned int b = 0; b < n_blocks(); ++b)
+ local_size += block(b).local_size();
+ return local_size;
+}
+
+
+
template <class VectorType>
inline IndexSet
BlockVectorBase<VectorType>::locally_owned_elements() const
inline typename Vector<Number, MemorySpace>::size_type
Vector<Number, MemorySpace>::local_size() const
{
- return partitioner->local_size();
+ return locally_owned_size();
}
* This function returns the number of elements stored. It is smaller or
* equal to the dimension of the vector space that is modeled by an object
* of this kind. This dimension is return by size().
+ *
+ * @deprecated use local_size() instead.
*/
+ DEAL_II_DEPRECATED
size_type
n_elements() const;
+ /**
+ * Return the local size of the vector, i.e., the number of indices
+ * owned locally.
+ */
+ size_type
+ local_size() const;
+
+
/**
* Return the IndexSet that represents the indices of the elements stored.
*/
+ template <typename Number>
+ inline typename ReadWriteVector<Number>::size_type
+ ReadWriteVector<Number>::local_size() const
+ {
+ return stored_elements.n_elements();
+ }
+
+
+
template <typename Number>
inline const IndexSet &
ReadWriteVector<Number>::get_stored_elements() const
virtual size_type
size() const override;
+ /**
+ * Return the local size of the vector, i.e., the number of indices
+ * owned locally.
+ */
+ size_type
+ local_size() const;
+
/**
* Return the MPI communicator object in use with this object.
*/
virtual size_type
size() const override;
+ /**
+ * Return the local size of the vector, i.e., the number of indices
+ * owned locally.
+ */
+ size_type
+ local_size() const;
+
/**
* Return the MPI communicator object in use with this object.
*/
+ template <typename Number>
+ typename Vector<Number>::size_type
+ Vector<Number>::local_size() const
+ {
+ return vector->getLocalLength();
+ }
+
+
+
template <typename Number>
MPI_Comm
Vector<Number>::get_mpi_communicator() const
size_type
size() const;
+ /**
+ * Return local dimension of the vector. Since this vector does not support
+ * distributed data this is always the same value as size().
+ *
+ * @note This function exists for compatibility with
+ * LinearAlgebra::ReadWriteVector.
+ */
+ size_type
+ local_size() const;
+
/**
* Return whether the vector contains only elements with value zero. This
* function is mainly for internal consistency checks and should seldom be
}
+
+template <typename Number>
+inline typename Vector<Number>::size_type
+Vector<Number>::local_size() const
+{
+ return values.size();
+}
+
+
+
template <typename Number>
inline bool
Vector<Number>::in_local_range(const size_type) const
+ Vector::size_type
+ Vector::local_size() const
+ {
+ return vector->MyLength();
+ }
+
+
+
MPI_Comm
Vector::get_mpi_communicator() const
{
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 - 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// check Vector::local_size() for all supported vector types
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/la_parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_vector.h>
+#include <deal.II/lac/petsc_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
+#include <deal.II/lac/trilinos_epetra_vector.h>
+#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/vector.h>
+
+#include "../tests.h"
+
+
+template <typename VEC>
+void
+check_serial()
+{
+ const auto dofs_per_proc = 4;
+ VEC vec(dofs_per_proc);
+ deallog << "type: " << Utilities::type_to_string(vec) << std::endl;
+ deallog << "local size: " << vec.local_size() << std::endl;
+ deallog << "size: " << vec.size() << std::endl;
+}
+
+
+
+template <typename VEC>
+void
+check_unghosted_parallel()
+{
+ const auto n_procs = dealii::Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ const auto my_rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ const auto dofs_per_proc = 4;
+ const auto n_dofs = dofs_per_proc*n_procs;
+
+ IndexSet local_indices(n_dofs);
+ const auto my_dofs_begin = dofs_per_proc*my_rank;
+ const auto my_dofs_end = dofs_per_proc*(my_rank + 1);
+ local_indices.add_range(my_dofs_begin, my_dofs_end);
+ local_indices.compress();
+
+ VEC vec(local_indices, MPI_COMM_WORLD);
+ deallog << "type: " << Utilities::type_to_string(vec) << std::endl;
+ deallog << "index set size: " << local_indices.n_elements() << std::endl;
+ deallog << "local size: " << vec.local_size() << std::endl;
+ deallog << "size: " << vec.size() << std::endl;
+}
+
+
+
+template <typename VEC>
+void
+check_ghosted_parallel()
+{
+ const auto n_procs = dealii::Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ const auto my_rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ const auto dofs_per_proc = 4;
+ const auto n_dofs = dofs_per_proc*n_procs;
+
+ IndexSet local_indices(n_dofs);
+ IndexSet ghost_indices(n_dofs);
+ const auto my_dofs_begin = dofs_per_proc*my_rank;
+ const auto my_dofs_end = dofs_per_proc*(my_rank + 1);
+ local_indices.add_range(my_dofs_begin, my_dofs_end);
+ local_indices.compress();
+ if (my_rank == 0)
+ {
+ ghost_indices.add_index(my_dofs_end);
+ }
+ else
+ {
+ ghost_indices.add_index(my_dofs_begin - 1);
+ ghost_indices.add_index(my_dofs_end % n_dofs);
+ }
+
+ VEC vec(local_indices, ghost_indices, MPI_COMM_WORLD);
+ deallog << "type: " << Utilities::type_to_string(vec) << std::endl;
+ deallog << "index set size: " << local_indices.n_elements() << std::endl;
+ deallog << "local size: " << vec.local_size() << std::endl;
+ deallog << "size: " << vec.size() << std::endl;
+}
+
+
+
+template <typename VEC>
+void
+check_ghosted_parallel_block()
+{
+ const auto n_procs = dealii::Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ const auto my_rank = dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ const auto dofs_per_proc = 4;
+ const auto n_dofs = dofs_per_proc*n_procs;
+
+ IndexSet local_indices(n_dofs);
+ IndexSet ghost_indices(n_dofs);
+ const auto my_dofs_begin = dofs_per_proc*my_rank;
+ const auto my_dofs_end = dofs_per_proc*(my_rank + 1);
+ local_indices.add_range(my_dofs_begin, my_dofs_end);
+ local_indices.compress();
+ if (my_rank == 0)
+ {
+ ghost_indices.add_index(my_dofs_end);
+ }
+ else
+ {
+ ghost_indices.add_index(my_dofs_begin - 1);
+ ghost_indices.add_index(my_dofs_end % n_dofs);
+ }
+
+ std::vector<IndexSet> local_blocks {local_indices, local_indices};
+ // for variety do not ghost the second component
+ std::vector<IndexSet> ghost_blocks {ghost_indices, IndexSet()};
+
+ VEC vec(local_blocks, ghost_blocks, MPI_COMM_WORLD);
+ deallog << "type: " << Utilities::type_to_string(vec) << std::endl;
+ deallog << "local size: " << vec.local_size() << std::endl;
+ deallog << "size: " << vec.size() << std::endl;
+}
+
+
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+
+ // non-block vectors:
+ check_serial<Vector<double>>();
+ check_serial<LinearAlgebra::Vector<double>>();
+
+ check_unghosted_parallel<LinearAlgebra::EpetraWrappers::Vector>();
+ check_unghosted_parallel<LinearAlgebra::TpetraWrappers::Vector<double>>();
+
+ check_ghosted_parallel<LinearAlgebra::distributed::Vector<double>>();
+ check_ghosted_parallel<PETScWrappers::MPI::Vector>();
+ check_ghosted_parallel<TrilinosWrappers::MPI::Vector>();
+
+ // block vectors:
+ check_ghosted_parallel_block<LinearAlgebra::distributed::BlockVector<double>>();
+ check_ghosted_parallel_block<PETScWrappers::MPI::BlockVector>();
+ check_ghosted_parallel_block<TrilinosWrappers::MPI::BlockVector>();
+}
--- /dev/null
+
+DEAL:0::type: dealii::Vector<double>
+DEAL:0::local size: 4
+DEAL:0::size: 4
+DEAL:0::type: dealii::LinearAlgebra::Vector<double>
+DEAL:0::local size: 4
+DEAL:0::size: 4
+DEAL:0::type: dealii::LinearAlgebra::EpetraWrappers::Vector
+DEAL:0::index set size: 4
+DEAL:0::local size: 4
+DEAL:0::size: 16
+DEAL:0::type: dealii::LinearAlgebra::TpetraWrappers::Vector<double>
+DEAL:0::index set size: 4
+DEAL:0::local size: 4
+DEAL:0::size: 16
+DEAL:0::type: dealii::LinearAlgebra::distributed::Vector<double, dealii::MemorySpace::Host>
+DEAL:0::index set size: 4
+DEAL:0::local size: 4
+DEAL:0::size: 16
+DEAL:0::type: dealii::PETScWrappers::MPI::Vector
+DEAL:0::index set size: 4
+DEAL:0::local size: 4
+DEAL:0::size: 16
+DEAL:0::type: dealii::TrilinosWrappers::MPI::Vector
+DEAL:0::index set size: 4
+DEAL:0::local size: 5
+DEAL:0::size: 16
+DEAL:0::type: dealii::LinearAlgebra::distributed::BlockVector<double>
+DEAL:0::local size: 8
+DEAL:0::size: 32
+DEAL:0::type: dealii::PETScWrappers::MPI::BlockVector
+DEAL:0::local size: 8
+DEAL:0::size: 32
+DEAL:0::type: dealii::TrilinosWrappers::MPI::BlockVector
+DEAL:0::local size: 9
+DEAL:0::size: 32
+
+DEAL:1::type: dealii::Vector<double>
+DEAL:1::local size: 4
+DEAL:1::size: 4
+DEAL:1::type: dealii::LinearAlgebra::Vector<double>
+DEAL:1::local size: 4
+DEAL:1::size: 4
+DEAL:1::type: dealii::LinearAlgebra::EpetraWrappers::Vector
+DEAL:1::index set size: 4
+DEAL:1::local size: 4
+DEAL:1::size: 16
+DEAL:1::type: dealii::LinearAlgebra::TpetraWrappers::Vector<double>
+DEAL:1::index set size: 4
+DEAL:1::local size: 4
+DEAL:1::size: 16
+DEAL:1::type: dealii::LinearAlgebra::distributed::Vector<double, dealii::MemorySpace::Host>
+DEAL:1::index set size: 4
+DEAL:1::local size: 4
+DEAL:1::size: 16
+DEAL:1::type: dealii::PETScWrappers::MPI::Vector
+DEAL:1::index set size: 4
+DEAL:1::local size: 4
+DEAL:1::size: 16
+DEAL:1::type: dealii::TrilinosWrappers::MPI::Vector
+DEAL:1::index set size: 4
+DEAL:1::local size: 6
+DEAL:1::size: 16
+DEAL:1::type: dealii::LinearAlgebra::distributed::BlockVector<double>
+DEAL:1::local size: 8
+DEAL:1::size: 32
+DEAL:1::type: dealii::PETScWrappers::MPI::BlockVector
+DEAL:1::local size: 8
+DEAL:1::size: 32
+DEAL:1::type: dealii::TrilinosWrappers::MPI::BlockVector
+DEAL:1::local size: 10
+DEAL:1::size: 32
+
+
+DEAL:2::type: dealii::Vector<double>
+DEAL:2::local size: 4
+DEAL:2::size: 4
+DEAL:2::type: dealii::LinearAlgebra::Vector<double>
+DEAL:2::local size: 4
+DEAL:2::size: 4
+DEAL:2::type: dealii::LinearAlgebra::EpetraWrappers::Vector
+DEAL:2::index set size: 4
+DEAL:2::local size: 4
+DEAL:2::size: 16
+DEAL:2::type: dealii::LinearAlgebra::TpetraWrappers::Vector<double>
+DEAL:2::index set size: 4
+DEAL:2::local size: 4
+DEAL:2::size: 16
+DEAL:2::type: dealii::LinearAlgebra::distributed::Vector<double, dealii::MemorySpace::Host>
+DEAL:2::index set size: 4
+DEAL:2::local size: 4
+DEAL:2::size: 16
+DEAL:2::type: dealii::PETScWrappers::MPI::Vector
+DEAL:2::index set size: 4
+DEAL:2::local size: 4
+DEAL:2::size: 16
+DEAL:2::type: dealii::TrilinosWrappers::MPI::Vector
+DEAL:2::index set size: 4
+DEAL:2::local size: 6
+DEAL:2::size: 16
+DEAL:2::type: dealii::LinearAlgebra::distributed::BlockVector<double>
+DEAL:2::local size: 8
+DEAL:2::size: 32
+DEAL:2::type: dealii::PETScWrappers::MPI::BlockVector
+DEAL:2::local size: 8
+DEAL:2::size: 32
+DEAL:2::type: dealii::TrilinosWrappers::MPI::BlockVector
+DEAL:2::local size: 10
+DEAL:2::size: 32
+
+
+DEAL:3::type: dealii::Vector<double>
+DEAL:3::local size: 4
+DEAL:3::size: 4
+DEAL:3::type: dealii::LinearAlgebra::Vector<double>
+DEAL:3::local size: 4
+DEAL:3::size: 4
+DEAL:3::type: dealii::LinearAlgebra::EpetraWrappers::Vector
+DEAL:3::index set size: 4
+DEAL:3::local size: 4
+DEAL:3::size: 16
+DEAL:3::type: dealii::LinearAlgebra::TpetraWrappers::Vector<double>
+DEAL:3::index set size: 4
+DEAL:3::local size: 4
+DEAL:3::size: 16
+DEAL:3::type: dealii::LinearAlgebra::distributed::Vector<double, dealii::MemorySpace::Host>
+DEAL:3::index set size: 4
+DEAL:3::local size: 4
+DEAL:3::size: 16
+DEAL:3::type: dealii::PETScWrappers::MPI::Vector
+DEAL:3::index set size: 4
+DEAL:3::local size: 4
+DEAL:3::size: 16
+DEAL:3::type: dealii::TrilinosWrappers::MPI::Vector
+DEAL:3::index set size: 4
+DEAL:3::local size: 6
+DEAL:3::size: 16
+DEAL:3::type: dealii::LinearAlgebra::distributed::BlockVector<double>
+DEAL:3::local size: 8
+DEAL:3::size: 32
+DEAL:3::type: dealii::PETScWrappers::MPI::BlockVector
+DEAL:3::local size: 8
+DEAL:3::size: 32
+DEAL:3::type: dealii::TrilinosWrappers::MPI::BlockVector
+DEAL:3::local size: 10
+DEAL:3::size: 32
+