using MapType = Tpetra::Map<int, dealii::types::signed_global_dof_index>;
using VectorType =
Tpetra::Vector<Number, int, dealii::types::signed_global_dof_index>;
+ using ExportType =
+ Tpetra::Export<int, dealii::types::signed_global_dof_index>;
+ using ImportType =
+ Tpetra::Import<int, dealii::types::signed_global_dof_index>;
/**
* @name 1: Basic Object-handling
reference
operator()(const size_type index);
+ /**
+ * Provide read-only access to an element.
+ *
+ * When using a vector distributed with MPI, this operation only makes
+ * sense for elements that are actually present on the calling processor.
+ * Otherwise, an exception is thrown.
+ */
+ Number
+ operator()(const size_type index) const;
+
+ /**
+ * Provide access to a given element, both read and write.
+ *
+ * Exactly the same as operator().
+ */
+ reference
+ operator[](const size_type index);
+
+ /**
+ * Provide read-only access to an element.
+ *
+ * Exactly the same as operator().
+ */
+ Number
+ operator[](const size_type index) const;
+
/** @} */
/** @{ */
/**
- * This function always returns false and is present only for backward
- * compatibility.
+ * Return whether the vector has ghost elements or not.
*/
bool
has_ghost_elements() const;
size_type
locally_owned_size() const;
+ /**
+ * Return the state of the vector, i.e., whether compress() needs to be
+ * called after an operation requiring data exchange. A call to compress()
+ * is also needed when the method set() or add() has been called.
+ */
+ bool
+ is_compressed() const;
+
/**
* Return the underlying MPI communicator.
*/
* necessary after writing into a vector element-by-element and before
* anything else can be done on it.
*
+ * @param operation The compress mode (<code>Add</code> or <code>Insert</code>)
+ * in case the vector has not been written to since the last time this
+ * function was called. The argument is ignored if the vector has been
+ * added or written to since the last time compress() was called.
+ *
* See
* @ref GlossCompress "Compressing distributed objects"
* for more information.
*/
DeclException0(ExcVectorTypeNotCompatible);
+ /*
+ * Access to a an element that is not (locally-)owned.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException4(
+ ExcAccessToNonLocalElement,
+ size_type,
+ size_type,
+ size_type,
+ size_type,
+ << "You are trying to access element " << arg1
+ << " of a distributed vector, but this element is not stored "
+ << "on the current processor. Note: There are " << arg2
+ << " elements stored "
+ << "on the current processor from within the range [" << arg3 << ','
+ << arg4 << "] but Trilinos vectors need not store contiguous "
+ << "ranges on each processor, and not every element in "
+ << "this range may in fact be stored locally."
+ << "\n\n"
+ << "A common source for this kind of problem is that you "
+ << "are passing a 'fully distributed' vector into a function "
+ << "that needs read access to vector elements that correspond "
+ << "to degrees of freedom on ghost cells (or at least to "
+ << "'locally active' degrees of freedom that are not also "
+ << "'locally owned'). You need to pass a vector that has these "
+ << "elements as ghost entries.");
+
+ /**
+ * Missing index set.
+ *
+ * @ingroup Exceptions
+ */
+ DeclExceptionMsg(ExcMissingIndexSet,
+ "To compress a vector, a locally_relevant_dofs "
+ "index set, and a locally_owned_dofs index set "
+ "must be provided. These index sets must be "
+ "provided either when the vector is initialized "
+ "or when compress is called. See the documentation "
+ "of compress() for more information.");
+
/**
* Exception thrown by an error in Trilinos.
*
create_tpetra_comm_pattern(const IndexSet &source_index_set,
const MPI_Comm mpi_comm);
+ /**
+ * Store whether the vector has ghost elements or not.
+ *
+ * If the vector has no ghost elements, it can only access and modify
+ * entries included in the locally owned index set.
+ * And if the vector has ghost elements it can access and modify
+ * entries included in the locally relevant index set.
+ */
+ bool has_ghost;
+
/**
* Teuchos::RCP to the actual Tpetra vector object.
*/
Teuchos::RCP<VectorType> vector;
+ /**
+ * Pointer to the Tpetra::Map storing the owned elemenets
+ * per rank. This map has a one-to-one mapping.
+ */
+ Teuchos::RCP<MapType> locally_owned_map;
+
+ /**
+ * Pointer to the Tpetra::Map storing the relevant elements
+ * per rank. There are entries in this map, that belong to multiple
+ * ranks.
+ */
+ Teuchos::RCP<MapType> locally_relevant_map;
+
/**
* IndexSet of the elements of the last imported vector.
*/
- ::dealii::IndexSet source_stored_elements;
+ dealii::IndexSet source_stored_elements;
/**
* CommunicationPattern for the communication between the
inline bool
Vector<Number>::has_ghost_elements() const
{
- return false;
+ return has_ghost;
+ }
+
+
+
+ template <typename Number>
+ inline bool
+ Vector<Number>::is_compressed() const
+ {
+ return !has_ghost;
}
for (size_type i = 0; i < n_elements; ++i)
{
- const size_type row = indices[i];
- const TrilinosWrappers::types::int_type local_row =
+ const size_type row = indices[i];
+ TrilinosWrappers::types::int_type local_row =
vector->getMap()->getLocalElement(row);
+# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
+ Assert(
+ local_row != Teuchos::OrdinalTraits<int>::invalid(),
+ ExcAccessToNonLocalElement(row,
+ vector->getMap()->getLocalNumElements(),
+ vector->getMap()->getMinLocalIndex(),
+ vector->getMap()->getMaxLocalIndex()));
+# else
+ Assert(
+ local_row != Teuchos::OrdinalTraits<int>::invalid(),
+ ExcAccessToNonLocalElement(row,
+ vector->getMap()->getNodeNumElements(),
+ vector->getMap()->getMinLocalIndex(),
+ vector->getMap()->getMaxLocalIndex()));
+
+# endif
+
if (local_row != Teuchos::OrdinalTraits<int>::invalid())
vector_1d(local_row) += values[i];
}
for (size_type i = 0; i < n_elements; ++i)
{
- const size_type row = indices[i];
- const TrilinosWrappers::types::int_type local_row =
+ const size_type row = indices[i];
+ TrilinosWrappers::types::int_type local_row =
vector->getMap()->getLocalElement(row);
+# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
+ Assert(
+ local_row != Teuchos::OrdinalTraits<int>::invalid(),
+ ExcAccessToNonLocalElement(row,
+ vector->getMap()->getLocalNumElements(),
+ vector->getMap()->getMinLocalIndex(),
+ vector->getMap()->getMaxLocalIndex()));
+# else
+ Assert(
+ local_row != Teuchos::OrdinalTraits<int>::invalid(),
+ ExcAccessToNonLocalElement(row,
+ vector->getMap()->getNodeNumElements(),
+ vector->getMap()->getMinLocalIndex(),
+ vector->getMap()->getMaxLocalIndex()));
+# endif
+
if (local_row != Teuchos::OrdinalTraits<int>::invalid())
vector_1d(local_row) = values[i];
}
return internal::VectorReference(*this, index);
}
+ template <typename Number>
+ inline internal::VectorReference<Number>
+ Vector<Number>::operator[](const size_type index)
+ {
+ return operator()(index);
+ }
+
+ template <typename Number>
+ inline Number
+ Vector<Number>::operator[](const size_type index) const
+ {
+ return operator()(index);
+ }
+
# ifndef DOXYGEN
// VectorReference
template <typename Number>
Vector<Number>::Vector()
: Subscriptor()
+ , has_ghost(false)
, vector(Utilities::Trilinos::internal::make_rcp<VectorType>(
Utilities::Trilinos::internal::make_rcp<MapType>(
0,
template <typename Number>
Vector<Number>::Vector(const Vector<Number> &V)
: Subscriptor()
+ , has_ghost(V.has_ghost)
, vector(Utilities::Trilinos::internal::make_rcp<VectorType>(
V.trilinos_vector(),
Teuchos::Copy))
+ , locally_owned_map(V.locally_owned_map)
+ , locally_relevant_map(V.locally_relevant_map)
{}
template <typename Number>
Vector<Number>::Vector(const Teuchos::RCP<VectorType> V)
: Subscriptor()
+ , has_ghost(false)
, vector(V)
- {}
+ {
+ if (vector->getMap()->isOneToOne())
+ {
+ locally_owned_map =
+ Teuchos::rcp_const_cast<MapType>(vector->getMap());
+ }
+ else
+ {
+ locally_relevant_map =
+ Teuchos::rcp_const_cast<MapType>(vector->getMap());
+ }
+ }
Vector<Number>::Vector(const IndexSet ¶llel_partitioner,
const MPI_Comm communicator)
: Subscriptor()
- , vector(Utilities::Trilinos::internal::make_rcp<VectorType>(
- parallel_partitioner.make_tpetra_map_rcp(communicator, false)))
- {}
+ , has_ghost(false)
+ , locally_owned_map(
+ parallel_partitioner.make_tpetra_map_rcp(communicator, false))
+ {
+ vector = Utilities::Trilinos::internal::make_rcp<VectorType>(
+ locally_owned_map);
+ }
const MPI_Comm communicator,
const bool omit_zeroing_entries)
{
- Teuchos::RCP<MapType> input_map =
+ // release memory before reallocation
+ vector.reset();
+ locally_owned_map.reset();
+ locally_relevant_map.reset();
+ source_stored_elements.clear();
+
+ // Here we assume, that the index set is non-overlapping.
+ // If the index set is overlapping the function
+ // make_tpetra_map_rcp() will throw an assert.
+ has_ghost = false;
+ locally_owned_map =
parallel_partitioner.make_tpetra_map_rcp(communicator, false);
- if (vector->getMap()->isSameAs(*input_map) == false)
- vector = Utilities::Trilinos::internal::make_rcp<VectorType>(input_map);
- else if (omit_zeroing_entries == false)
+ if (!vector->getMap()->isSameAs(*locally_owned_map))
+ vector = Utilities::Trilinos::internal::make_rcp<VectorType>(
+ locally_owned_map);
+ if (!omit_zeroing_entries)
{
vector->putScalar(0.);
}
const IndexSet &ghost_entries,
const MPI_Comm communicator)
{
+ // release memory before reallocation
+ vector.reset();
+ locally_owned_map.reset();
+ locally_relevant_map.reset();
+
+ locally_owned_map =
+ locally_owned_entries.make_tpetra_map_rcp(communicator, false);
+
IndexSet parallel_partitioner = locally_owned_entries;
parallel_partitioner.add_indices(ghost_entries);
-
- Teuchos::RCP<MapType> input_map =
+ locally_relevant_map =
parallel_partitioner.make_tpetra_map_rcp(communicator, true);
- vector = Utilities::Trilinos::internal::make_rcp<VectorType>(input_map);
+ vector = Utilities::Trilinos::internal::make_rcp<VectorType>(
+ locally_relevant_map);
+
+ has_ghost = false;
}
ArrayView<Number> &elements) const
{
AssertDimension(indices.size(), elements.size());
- const auto &vector = trilinos_vector();
- const auto &map = vector.getMap();
# if DEAL_II_TRILINOS_VERSION_GTE(13, 2, 0)
- auto vector_2d = vector.template getLocalView<Kokkos::HostSpace>(
+ auto vector_2d = vector->template getLocalView<Kokkos::HostSpace>(
Tpetra::Access::ReadOnly);
# else
/*
* Let us choose to simply ignore this problem for such an old
* Trilinos version.
*/
- auto vector_2d = vector.template getLocalView<Kokkos::HostSpace>();
+ auto vector_2d = vector->template getLocalView<Kokkos::HostSpace>();
# endif
auto vector_1d = Kokkos::subview(vector_2d, Kokkos::ALL(), 0);
for (unsigned int i = 0; i < indices.size(); ++i)
{
AssertIndexRange(indices[i], size());
- const auto trilinos_i = map->getLocalElement(
- static_cast<TrilinosWrappers::types::int_type>(indices[i]));
- elements[i] = vector_1d(trilinos_i);
+ const size_type row = indices[i];
+ TrilinosWrappers::types::int_type local_row =
+ vector->getMap()->getLocalElement(row);
+
+
+ Assert(
+ local_row != Teuchos::OrdinalTraits<int>::invalid(),
+# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
+ ExcAccessToNonLocalElement(row,
+ vector->getMap()->getLocalNumElements(),
+ vector->getMap()->getMinLocalIndex(),
+ vector->getMap()->getMaxLocalIndex()));
+# else
+ ExcAccessToNonLocalElement(row,
+ vector->getMap()->getNodeNumElements(),
+ vector->getMap()->getMinLocalIndex(),
+ vector->getMap()->getMaxLocalIndex()));
+
+# endif
+
+ if (local_row != Teuchos::OrdinalTraits<int>::invalid())
+ elements[i] = vector_1d(local_row);
}
}
// - First case: both vectors have the same layout.
// - Second case: both vectors have the same size but different layout.
// - Third case: the vectors have different size.
- if (vector->getMap()->isSameAs(*(V.trilinos_vector().getMap())))
- *vector = V.trilinos_vector();
+ if (vector->getMap()->isSameAs(*V.vector->getMap()))
+ {
+ *vector = *V.vector;
+ }
+ else if (size() == V.size())
+ {
+ // We expect the origin vector to have a one-to-one map, otherwise
+ // we can not call Import
+ Assert(V.vector->getMap()->isOneToOne(),
+ ExcMessage(
+ "You are trying to map one vector distributed "
+ "between processors, where some elements belong "
+ "to multiple processors, onto another distribution "
+ "pattern, where some elements belong to multiple "
+ "processors. It is unclear how to deal with elements "
+ "in the vector belonging to multiple processors. "
+ "Therefore, compress() must be called on this "
+ "vector first."));
+
+ Teuchos::RCP<const ImportType> importer =
+ Tpetra::createImport(V.vector->getMap(), vector->getMap());
+
+ // Since we are distributing the vector from a one-to-one map
+ // we can always use the VectorOperation::insert / Tpetra::INSERT
+ // here.
+ vector->doImport(*V.vector, *importer, Tpetra::INSERT);
+ }
else
{
- if (size() == V.size())
- {
- Tpetra::Import<int, types::signed_global_dof_index> data_exchange(
- vector->getMap(), V.trilinos_vector().getMap());
-
- vector->doImport(V.trilinos_vector(),
- data_exchange,
- Tpetra::REPLACE);
- }
- else
- vector = Utilities::Trilinos::internal::make_rcp<VectorType>(
- V.trilinos_vector());
+ vector.reset();
+ vector = Utilities::Trilinos::internal::make_rcp<VectorType>(
+ V.vector->getMap());
+ Tpetra::deep_copy(*vector, *V.vector);
+
+ has_ghost = V.has_ghost;
+ locally_owned_map = V.locally_owned_map;
+ locally_relevant_map = V.locally_relevant_map;
+ source_stored_elements = V.source_stored_elements;
+ tpetra_comm_pattern = V.tpetra_comm_pattern;
}
return *this;
"LinearAlgebra::TpetraWrappers::CommunicationPattern."));
}
- Teuchos::RCP<const Tpetra::Export<int, types::signed_global_dof_index>>
- tpetra_export = tpetra_comm_pattern->get_tpetra_export_rcp();
+ Teuchos::RCP<const ExportType> tpetra_export =
+ tpetra_comm_pattern->get_tpetra_export_rcp();
VectorType source_vector(tpetra_export->getSourceMap());
device_type::memory_space>();
# endif
}
+ Tpetra::CombineMode tpetra_operation = Tpetra::ZERO;
if (operation == VectorOperation::insert)
- vector->doExport(source_vector, *tpetra_export, Tpetra::REPLACE);
+ tpetra_operation = Tpetra::INSERT;
else if (operation == VectorOperation::add)
- vector->doExport(source_vector, *tpetra_export, Tpetra::ADD);
+ tpetra_operation = Tpetra::ADD;
else
- AssertThrow(false, ExcNotImplemented());
+ Assert(false, ExcNotImplemented());
+
+ vector->doExport(source_vector, *tpetra_export, tpetra_operation);
}
}
+
template <typename Number>
void
Vector<Number>::import_elements(const ReadWriteVector<Number> &V,
// elements, maybe there is a better workaround.
Tpetra::Vector<Number, int, types::signed_global_dof_index> dummy(
vector->getMap(), false);
- Tpetra::Import<int, types::signed_global_dof_index> data_exchange(
- V.trilinos_vector().getMap(), dummy.getMap());
-
+ ImportType data_exchange(V.trilinos_vector().getMap(),
+ dummy.getMap());
dummy.doImport(V.trilinos_vector(), data_exchange, Tpetra::INSERT);
vector->update(1.0, dummy, 1.0);
+ template <typename Number>
+ Number
+ Vector<Number>::operator()(const size_type index) const
+ {
+ // Get the local index
+ const TrilinosWrappers::types::int_type local_index =
+ vector->getMap()->getLocalElement(
+ static_cast<TrilinosWrappers::types::int_type>(index));
+
+ Number value = 0.0;
+
+ // If the element is not present on the current processor, we can't
+ // continue. This is the main difference to the el() function.
+ if (local_index == -1)
+ {
+# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
+ Assert(
+ false,
+ ExcAccessToNonLocalElement(index,
+ vector->getMap()->getLocalNumElements(),
+ vector->getMap()->getMinLocalIndex(),
+ vector->getMap()->getMaxLocalIndex()));
+# else
+ Assert(
+ false,
+ ExcAccessToNonLocalElement(index,
+ vector->getMap()->getNodeNumElements(),
+ vector->getMap()->getMinLocalIndex(),
+ vector->getMap()->getMaxLocalIndex()));
+# endif
+ }
+ else
+ value = vector->getData()[local_index];
+
+ return value;
+ }
+
+
+
template <typename Number>
void
Vector<Number>::add(const Number a)
template <typename Number>
void
- Vector<Number>::compress(const VectorOperation::values /*operation*/)
- {}
+ Vector<Number>::compress(const VectorOperation::values operation)
+ {
+ Assert(has_ghost == false,
+ ExcMessage("Calling compress() is only useful if a vector "
+ "has been written into, but this is a vector with ghost "
+ "elements and consequently is read-only. It does "
+ "not make sense to call compress() for such "
+ "vectors."));
+
+ if (!vector->getMap()->isOneToOne())
+ {
+ Tpetra::CombineMode tpetra_operation = Tpetra::ZERO;
+ if (operation == VectorOperation::insert)
+ tpetra_operation = Tpetra::INSERT;
+ else if (operation == VectorOperation::add)
+ tpetra_operation = Tpetra::ADD;
+ else
+ Assert(false, ExcNotImplemented());
+
+ if (locally_relevant_map.is_null())
+ locally_relevant_map =
+ Teuchos::rcp_const_cast<MapType>(vector->getMap());
+
+ Assert(!locally_relevant_map.is_null(), ExcMissingIndexSet());
+ Assert(!locally_owned_map.is_null(), ExcMissingIndexSet());
+
+ VectorType dummy = VectorType(locally_owned_map.getConst());
+
+ Teuchos::RCP<const ExportType> exporter =
+ Tpetra::createExport(locally_relevant_map.getConst(),
+ locally_owned_map.getConst());
+ dummy.doExport(*vector, *exporter, tpetra_operation);
+
+ *vector = dummy;
+ }
+ }
+
template <typename Number>
auto vector_1d = Kokkos::subview(vector_2d, Kokkos::ALL(), 0);
const size_t local_length = vector->getLocalLength();
- if (across)
- for (unsigned int i = 0; i < local_length; ++i)
- out << vector_1d(i) << ' ';
+ if (size() != local_length)
+ {
+ out << "size:" << size() << " locally_owned_size:" << local_length
+ << " :" << std::endl;
+ for (size_type i = 0; i < local_length; ++i)
+ {
+ const TrilinosWrappers::types::int_type global_row =
+ vector->getMap()->getGlobalElement(i);
+ out << "[" << global_row << "]: " << vector_1d(i) << std::endl;
+ }
+ }
else
- for (unsigned int i = 0; i < local_length; ++i)
- out << vector_1d(i) << std::endl;
- out << std::endl;
+ {
+ if (across)
+ for (unsigned int i = 0; i < local_length; ++i)
+ out << vector_1d(i) << ' ';
+ else
+ for (unsigned int i = 0; i < local_length; ++i)
+ out << vector_1d(i) << std::endl;
+ out << std::endl;
+ }
// restore the representation
// of the vector
const MPI_Comm mpi_comm)
{
source_stored_elements = source_index_set;
- tpetra_comm_pattern = Utilities::Trilinos::internal::make_rcp<
- TpetraWrappers::CommunicationPattern>(locally_owned_elements(),
- source_index_set,
- mpi_comm);
+
+ tpetra_comm_pattern =
+ Teuchos::rcp(new TpetraWrappers::CommunicationPattern(
+ locally_owned_elements(), source_index_set, mpi_comm));
}
} // namespace TpetraWrappers
} // namespace LinearAlgebra