@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
+ @DEAL_II_EXPAND_TPETRA_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_BLOCKVECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
+ @DEAL_II_EXPAND_TPETRA_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_VECTOR_REAL@;
@DEAL_II_EXPAND_TRILINOS_MPI_BLOCKVECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
+ @DEAL_II_EXPAND_TPETRA_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_VECTOR_REAL@;
}
EXTERNAL_PARALLEL_VECTORS := { @DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_BLOCKVECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
+ @DEAL_II_EXPAND_TPETRA_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_BLOCKVECTOR@
}
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
+ @DEAL_II_EXPAND_TPETRA_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_VECTOR@;
}
)
FOREACH(_module
- Amesos Epetra Ifpack AztecOO Teuchos ML MueLu
+ Amesos Epetra Ifpack AztecOO Teuchos Tpetra ML MueLu
)
ITEM_MATCHES(_module_found ${_module} ${Trilinos_PACKAGE_LIST})
IF(_module_found)
SET(DEAL_II_EXPAND_TRILINOS_MPI_VECTOR "TrilinosWrappers::MPI::Vector")
IF (TRILINOS_WITH_MPI)
SET(DEAL_II_EXPAND_EPETRA_VECTOR "LinearAlgebra::EpetraWrappers::Vector")
+ SET(DEAL_II_EXPAND_TPETRA_VECTOR "LinearAlgebra::TpetraWrappers::Vector")
ENDIF()
IF(${DEAL_II_TRILINOS_WITH_SACADO})
# Note: Only CMake 3.0 and greater support line continuation with the "\" character
<li> ROL (optional),
<li> Sacado (optional),
<li> Teuchos,
+ <li> Tpetra,
<li> Zoltan (optional).
</ul>
-DTrilinos_ENABLE_MueLu=ON \
-DTrilinos_ENABLE_ML=ON \
-DTrilinos_ENABLE_ROL=ON \
+ -DTrilinos_ENABLE_Tpetra=ON \
-DTrilinos_ENABLE_Zoltan=ON \
-DTrilinos_VERBOSE_CONFIGURE=OFF \
-DTPL_ENABLE_MPI=ON \
#ifdef DEAL_II_WITH_TRILINOS
# include <Epetra_Map.h>
+# include <Tpetra_Map.hpp>
#endif
#if defined(DEAL_II_WITH_MPI) || defined(DEAL_II_WITH_PETSC)
Epetra_Map
make_trilinos_map(const MPI_Comm &communicator = MPI_COMM_WORLD,
const bool overlapping = false) const;
+
+ Tpetra::Map<>
+ make_tpetra_map(const MPI_Comm &communicator = MPI_COMM_WORLD,
+ const bool overlapping = false) const;
#endif
const Epetra_Comm &
comm_self();
+ const Teuchos::RCP<const Teuchos::Comm<int>> &
+ tpetra_comm_self();
+
/**
* Given a communicator, duplicate it. If the given communicator is
* serial, that means to just return a copy of itself. On the other hand,
+ template <typename ForwardIterator>
+ static void
+ extract_subvector_to(const LinearAlgebra::TpetraWrappers::Vector &values,
+ const types::global_dof_index *cache_begin,
+ const types::global_dof_index *cache_end,
+ ForwardIterator local_values_begin)
+ {
+ std::vector<unsigned int> sorted_indices_pos =
+ sort_indices(cache_begin, cache_end);
+ const unsigned int cache_size = cache_end - cache_begin;
+ std::vector<types::global_dof_index> cache_indices(cache_size);
+ for (unsigned int i = 0; i < cache_size; ++i)
+ cache_indices[i] = *(cache_begin + sorted_indices_pos[i]);
+
+ IndexSet index_set(cache_indices.back() + 1);
+ index_set.add_indices(cache_indices.begin(), cache_indices.end());
+ index_set.compress();
+ LinearAlgebra::ReadWriteVector<double> read_write_vector(index_set);
+ read_write_vector.import(values, VectorOperation::insert);
+
+ // Copy the elements from read_write_vector and reorder them.
+ for (unsigned int i = 0; i < cache_size; ++i, ++local_values_begin)
+ *local_values_begin = read_write_vector[sorted_indices_pos[i]];
+ }
+
+
+
template <typename ForwardIterator>
static void
extract_subvector_to(const LinearAlgebra::EpetraWrappers::Vector &values,
# ifdef DEAL_II_WITH_MPI
+ template <int dim, int spacedim>
+ void
+ reinit_distributed(const DoFHandler<dim, spacedim> & dh,
+ LinearAlgebra::TpetraWrappers::Vector &vector)
+ {
+ const parallel::distributed::Triangulation<dim, spacedim> *parallel_tria =
+ dynamic_cast<
+ const parallel::distributed::Triangulation<dim, spacedim> *>(
+ &dh.get_triangulation());
+ Assert(parallel_tria != nullptr, ExcNotImplemented());
+
+ const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
+ vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
+ }
+
template <int dim, int spacedim>
void
reinit_distributed(const DoFHandler<dim, spacedim> & dh,
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
{
AssertThrow(false, ExcNotImplemented());
}
+
+ template <int dim, int spacedim>
+ void
+ back_interpolate(
+ const DoFHandler<dim, spacedim> &,
+ const AffineConstraints<
+ typename LinearAlgebra::TpetraWrappers::Vector::value_type> &,
+ const LinearAlgebra::TpetraWrappers::Vector &,
+ const DoFHandler<dim, spacedim> &,
+ const AffineConstraints<
+ typename LinearAlgebra::TpetraWrappers::Vector::value_type> &,
+ LinearAlgebra::TpetraWrappers::Vector &)
+ {
+ AssertThrow(false, ExcNotImplemented());
+ }
#endif
#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/trilinos_epetra_communication_pattern.h>
# include <deal.II/lac/trilinos_epetra_vector.h>
+# include <deal.II/lac/trilinos_tpetra_vector.h>
# include <Epetra_MultiVector.h>
std::shared_ptr<const CommunicationPatternBase>());
# ifdef DEAL_II_WITH_MPI
+ /**
+ * Imports all the elements present in the vector's IndexSet from the input
+ * vector @p tpetra_vec. VectorOperation::values @p operation is used to
+ * decide if the elements in @p V should be added to the current vector or
+ * replace the current elements. The last parameter can be used if the same
+ * communication pattern is used multiple times. This can be used to improve
+ * performance.
+ */
+ void
+ import(const TpetraWrappers::Vector &tpetra_vec,
+ VectorOperation::values operation,
+ const std::shared_ptr<const CommunicationPatternBase>
+ &communication_pattern =
+ std::shared_ptr<const CommunicationPatternBase>());
+
/**
* Imports all the elements present in the vector's IndexSet from the input
* vector @p epetra_vec. VectorOperation::values @p operation is used to
protected:
#ifdef DEAL_II_WITH_TRILINOS
+ /**
+ * Import all the elements present in the vector's IndexSet from the input
+ * vector @p tpetra_vector. This is an helper function and it should not be
+ * used directly.
+ */
+ void
+ import(const Tpetra::Vector<> &tpetra_vector,
+ const IndexSet & locally_owned_elements,
+ VectorOperation::values operation,
+ const MPI_Comm & mpi_comm,
+ const std::shared_ptr<const CommunicationPatternBase>
+ &communication_pattern);
+
/**
* Import all the elements present in the vector's IndexSet from the input
* vector @p multivector. This is an helper function and it should not be
#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
/**
- * Return a EpetraWrappers::Communication pattern and store it for future
+ * Return a TpetraWrappers::CommunicationPattern and store it for future
+ * use.
+ */
+ TpetraWrappers::CommunicationPattern
+ create_tpetra_comm_pattern(const IndexSet &source_index_set,
+ const MPI_Comm &mpi_comm);
+
+ /**
+ * Return a EpetraWrappers::CommunicationPattern and store it for future
* use.
*/
EpetraWrappers::CommunicationPattern
#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/lac/trilinos_epetra_communication_pattern.h>
-# include <deal.II/lac/trilinos_epetra_vector.h>
# include <deal.II/lac/trilinos_vector.h>
# include <Epetra_Import.h>
#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+ template <typename Number>
+ void
+ ReadWriteVector<Number>::import(
+ const Tpetra::Vector<> &vector,
+ const IndexSet & source_elements,
+ VectorOperation::values operation,
+ const MPI_Comm & mpi_comm,
+ const std::shared_ptr<const CommunicationPatternBase>
+ &communication_pattern)
+ {
+ std::shared_ptr<const TpetraWrappers::CommunicationPattern>
+ tpetra_comm_pattern;
+
+ // If no communication pattern is given, create one. Otherwise, use the one
+ // given.
+ if (communication_pattern == nullptr)
+ {
+ // The first time import is called, we create a communication pattern.
+ // Check if the communication pattern already exists and if it can be
+ // reused.
+ if ((source_elements.size() == source_stored_elements.size()) &&
+ (source_elements == source_stored_elements))
+ {
+ tpetra_comm_pattern = std::dynamic_pointer_cast<
+ const TpetraWrappers::CommunicationPattern>(comm_pattern);
+ if (tpetra_comm_pattern == nullptr)
+ tpetra_comm_pattern =
+ std::make_shared<const TpetraWrappers::CommunicationPattern>(
+ create_tpetra_comm_pattern(source_elements, mpi_comm));
+ }
+ else
+ tpetra_comm_pattern =
+ std::make_shared<const TpetraWrappers::CommunicationPattern>(
+ create_tpetra_comm_pattern(source_elements, mpi_comm));
+ }
+ else
+ {
+ tpetra_comm_pattern =
+ std::dynamic_pointer_cast<const TpetraWrappers::CommunicationPattern>(
+ communication_pattern);
+ AssertThrow(tpetra_comm_pattern != nullptr,
+ ExcMessage(
+ std::string("The communication pattern is not of type ") +
+ "LinearAlgebra::TpetraWrappers::CommunicationPattern."));
+ }
+
+ Tpetra::Export<> tpetra_export(tpetra_comm_pattern->get_tpetra_export());
+
+ Tpetra::Vector<> target_vector(tpetra_export.getSourceMap());
+ target_vector.doImport(vector, tpetra_export, Tpetra::REPLACE);
+
+ const auto *new_values = target_vector.getData().get();
+ const auto size = target_vector.getLocalLength();
+
+ using size_type = std::decay<decltype(size)>::type;
+
+ Assert(size == 0 || values != nullptr, ExcInternalError("Export failed."));
+ AssertDimension(size, stored_elements.n_elements());
+
+ if (operation == VectorOperation::insert)
+ {
+ for (size_type i = 0; i < size; ++i)
+ values[i] = new_values[i];
+ }
+ else if (operation == VectorOperation::add)
+ {
+ for (size_type i = 0; i < size; ++i)
+ values[i] += new_values[i];
+ }
+ else if (operation == VectorOperation::min)
+ {
+ for (size_type i = 0; i < size; ++i)
+ if (std::real(new_values[i]) - std::real(values[i]) < 0.0)
+ values[i] = new_values[i];
+ }
+ else if (operation == VectorOperation::max)
+ {
+ for (size_type i = 0; i < size; ++i)
+ if (std::real(new_values[i]) - std::real(values[i]) > 0.0)
+ values[i] = new_values[i];
+ }
+ else
+ AssertThrow(false, ExcNotImplemented());
+ }
+
+
+
template <typename Number>
void
ReadWriteVector<Number>::import(
+ template <typename Number>
+ void
+ ReadWriteVector<Number>::import(
+ const LinearAlgebra::TpetraWrappers::Vector &trilinos_vec,
+ VectorOperation::values operation,
+ const std::shared_ptr<const CommunicationPatternBase>
+ &communication_pattern)
+ {
+ import(trilinos_vec.trilinos_vector(),
+ trilinos_vec.locally_owned_elements(),
+ operation,
+ trilinos_vec.get_mpi_communicator(),
+ communication_pattern);
+ }
+
+
+
template <typename Number>
void
ReadWriteVector<Number>::import(
#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+ template <typename Number>
+ TpetraWrappers::CommunicationPattern
+ ReadWriteVector<Number>::create_tpetra_comm_pattern(
+ const IndexSet &source_index_set,
+ const MPI_Comm &mpi_comm)
+ {
+ source_stored_elements = source_index_set;
+ TpetraWrappers::CommunicationPattern epetra_comm_pattern(
+ source_stored_elements, stored_elements, mpi_comm);
+ comm_pattern = std::make_shared<TpetraWrappers::CommunicationPattern>(
+ source_stored_elements, stored_elements, mpi_comm);
+
+ return epetra_comm_pattern;
+ }
+
+
template <typename Number>
EpetraWrappers::CommunicationPattern
ReadWriteVector<Number>::create_epetra_comm_pattern(
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/full_matrix.h>
# include <deal.II/lac/trilinos_epetra_vector.h>
+# include <deal.II/lac/trilinos_tpetra_vector.h>
# include <deal.II/lac/trilinos_vector.h>
# include <deal.II/lac/vector_memory.h>
# include <deal.II/lac/vector_operation.h>
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_trilinos_tpetra_communication_pattern_h
+#define dealii_trilinos_tpetra_communication_pattern_h
+
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_WITH_TRILINOS
+
+# ifdef DEAL_II_WITH_MPI
+
+# include <deal.II/lac/communication_pattern_base.h>
+
+# include <Tpetra_Import.hpp>
+
+# include <memory>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+ namespace TpetraWrappers
+ {
+ /**
+ * This class implements a wrapper to Tpetra::Import and Tpetra::Export.
+ */
+ class CommunicationPattern : public CommunicationPatternBase
+ {
+ public:
+ /**
+ * Reinitialize the communication pattern. The first argument @p
+ * vector_space_vector_index_set is the index set associated to a
+ * VectorSpaceVector object. The second argument @p
+ * read_write_vector_index_set is the index set associated to a
+ * ReadWriteVector object.
+ */
+ CommunicationPattern(const IndexSet &vector_space_vector_index_set,
+ const IndexSet &read_write_vector_index_set,
+ const MPI_Comm &communicator);
+
+ /**
+ * Reinitialize the object.
+ */
+ virtual void
+ reinit(const IndexSet &vector_space_vector_index_set,
+ const IndexSet &read_write_vector_index_set,
+ const MPI_Comm &communicator) override;
+
+ /**
+ * Return the underlying MPI communicator.
+ */
+ virtual const MPI_Comm &
+ get_mpi_communicator() const override;
+
+ /**
+ * Return the underlying Tpetra::Import object.
+ */
+ const Tpetra::Import<> &
+ get_tpetra_import() const;
+
+ /**
+ * Return the underlying Tpetra::Export object.
+ */
+ const Tpetra::Export<> &
+ get_tpetra_export() const;
+
+ private:
+ /**
+ * Shared pointer to the MPI communicator used.
+ */
+ std::shared_ptr<const MPI_Comm> comm;
+
+ /**
+ * Shared pointer to the Tpetra::Import object used.
+ */
+ std::unique_ptr<Tpetra::Import<>> tpetra_import;
+
+ /**
+ * Shared pointer to the Tpetra::Export object used.
+ */
+ std::unique_ptr<Tpetra::Export<>> tpetra_export;
+ };
+ } // end of namespace TpetraWrappers
+} // end of namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
+
+# endif
+
+#endif
+
+#endif
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_trilinos_tpetra_vector_h
+#define dealii_trilinos_tpetra_vector_h
+
+
+#include <deal.II/base/config.h>
+
+#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+
+# include <deal.II/base/index_set.h>
+# include <deal.II/base/subscriptor.h>
+
+# include <deal.II/lac/trilinos_tpetra_communication_pattern.h>
+# include <deal.II/lac/vector_operation.h>
+# include <deal.II/lac/vector_space_vector.h>
+# include <deal.II/lac/vector_type_traits.h>
+
+# include <Teuchos_Comm.hpp>
+# include <Teuchos_OrdinalTraits.hpp>
+# include <Tpetra_Core.hpp>
+# include <Tpetra_Vector.hpp>
+# include <Tpetra_Version.hpp>
+# include <mpi.h>
+
+# include <memory>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+ // Forward declaration
+ template <typename Number>
+ class ReadWriteVector;
+
+ namespace TpetraWrappers
+ {
+ /**
+ * This class implements a wrapper to the Trilinos distributed vector
+ * class Tpetra::Vector. This class is derived from the
+ * LinearAlgebra::VectorSpaceVector class. Note however that Tpetra only
+ * works with Number = double.
+ *
+ * @ingroup TrilinosWrappers
+ * @ingroup Vectors
+ * @author Daniel Arndt, 2018
+ */
+ class Vector : public VectorSpaceVector<double>, public Subscriptor
+ {
+ public:
+ /**
+ * Constructor. Create a vector of dimension zero.
+ */
+ Vector();
+
+ /**
+ * Copy constructor. Sets the dimension and the partitioning to that of
+ * the given vector and copies all elements.
+ */
+ Vector(const Vector &V);
+
+ /**
+ * This constructor takes an IndexSet that defines how to distribute the
+ * individual components among the MPI processors. Since it also
+ * includes information about the size of the vector, this is all we
+ * need to generate a %parallel vector.
+ */
+ explicit Vector(const IndexSet ¶llel_partitioner,
+ const MPI_Comm &communicator);
+
+ /**
+ * Reinit functionality. This function destroys the old vector content
+ * and generates a new one based on the input partitioning. The flag
+ * <tt>omit_zeroing_entries</tt> determines whether the vector should be
+ * filled with zero (false) or left untouched (true).
+ */
+ void
+ reinit(const IndexSet ¶llel_partitioner,
+ const MPI_Comm &communicator,
+ const bool omit_zeroing_entries = false);
+
+ /**
+ * Change the dimension to that of the vector V. The elements of V are not
+ * copied.
+ */
+ virtual void
+ reinit(const VectorSpaceVector<double> &V,
+ const bool omit_zeroing_entries = false) override;
+
+ /**
+ * Copy function. This function takes a Vector and copies all the
+ * elements. The Vector will have the same parallel distribution as @p
+ * V.
+ */
+ Vector &
+ operator=(const Vector &V);
+
+ /**
+ * Sets all elements of the vector to the scalar @p s. This operation is
+ * only allowed if @p s is equal to zero.
+ */
+ virtual Vector &
+ operator=(const double s) override;
+
+ /**
+ * Imports all the elements present in the vector's IndexSet from the
+ * input
+ * vector @p V. VectorOperation::values @p operation is used to decide if
+ * the elements in @p V should be added to the current vector or replace the
+ * current elements. The last parameter can be used if the same
+ * communication pattern is used multiple times. This can be used to
+ * improve performance.
+ */
+ virtual void
+ import(
+ const ReadWriteVector<double> & V,
+ VectorOperation::values operation,
+ std::shared_ptr<const CommunicationPatternBase> communication_pattern =
+ std::shared_ptr<const CommunicationPatternBase>()) override;
+
+ /**
+ * Multiply the entire vector by a fixed factor.
+ */
+ virtual Vector &
+ operator*=(const double factor) override;
+
+ /**
+ * Divide the entire vector by a fixed factor.
+ */
+ virtual Vector &
+ operator/=(const double factor) override;
+
+ /**
+ * Add the vector @p V to the present one.
+ */
+ virtual Vector &
+ operator+=(const VectorSpaceVector<double> &V) override;
+
+ /**
+ * Subtract the vector @p V from the present one.
+ */
+ virtual Vector &
+ operator-=(const VectorSpaceVector<double> &V) override;
+
+ /**
+ * Return the scalar product of two vectors. The vectors need to have the
+ * same layout.
+ */
+ virtual double
+ operator*(const VectorSpaceVector<double> &V) const override;
+
+ /**
+ * Add @p a to all components. Note that @p is a scalar not a vector.
+ */
+ virtual void
+ add(const double a) override;
+
+ /**
+ * Simple addition of a multiple of a vector, i.e. <tt>*this +=
+ * a*V</tt>. The vectors need to have the same layout.
+ */
+ virtual void
+ add(const double a, const VectorSpaceVector<double> &V) override;
+
+ /**
+ * Multiple addition of multiple of a vector, i.e. <tt>*this> +=
+ * a*V+b*W</tt>. The vectors need to have the same layout.
+ */
+ virtual void
+ add(const double a,
+ const VectorSpaceVector<double> &V,
+ const double b,
+ const VectorSpaceVector<double> &W) override;
+
+ /**
+ * Scaling and simple addition of a multiple of a vector, i.e. <tt>*this
+ * = s*(*this)+a*V</tt>.
+ */
+ virtual void
+ sadd(const double s,
+ const double a,
+ const VectorSpaceVector<double> &V) override;
+
+ /**
+ * Scale each element of this vector by the corresponding element in the
+ * argument. This function is mostly meant to simulate multiplication
+ * (and immediate re-assignment) by a diagonal scaling matrix. The
+ * vectors need to have the same layout.
+ */
+ virtual void
+ scale(const VectorSpaceVector<double> &scaling_factors) override;
+
+ /**
+ * Assignment <tt>*this = a*V</tt>.
+ */
+ virtual void
+ equ(const double a, const VectorSpaceVector<double> &V) override;
+
+ /**
+ * Return whether the vector contains only elements with value zero.
+ */
+ virtual bool
+ all_zero() const override;
+
+ /**
+ * Return the mean value of the element of this vector.
+ */
+ virtual double
+ mean_value() const override;
+
+ /**
+ * Return the l<sub>1</sub> norm of the vector (i.e., the sum of the
+ * absolute values of all entries among all processors).
+ */
+ virtual double
+ l1_norm() const override;
+
+ /**
+ * Return the l<sub>2</sub> norm of the vector (i.e., the square root of
+ * the sum of the square of all entries among all processors).
+ */
+ virtual double
+ l2_norm() const override;
+
+ /**
+ * Return the maximum norm of the vector (i.e., the maximum absolute value
+ * among all entries and among all processors).
+ */
+ virtual double
+ linfty_norm() const override;
+
+ /**
+ * Performs a combined operation of a vector addition and a subsequent
+ * inner product, returning the value of the inner product. In other
+ * words, the result of this function is the same as if the user called
+ * @code
+ * this->add(a, V);
+ * return_value = *this * W;
+ * @endcode
+ *
+ * The reason this function exists is that this operation involves less
+ * memory transfer than calling the two functions separately. This
+ * method only needs to load three vectors, @p this, @p V, @p W, whereas
+ * calling separate methods means to load the calling vector @p this
+ * twice. Since most vector operations are memory transfer limited, this
+ * reduces the time by 25\% (or 50\% if @p W equals @p this).
+ *
+ * The vectors need to have the same layout.
+ *
+ * For complex-valued vectors, the scalar product in the second step is
+ * implemented as
+ * $\left<v,w\right>=\sum_i v_i \bar{w_i}$.
+ */
+ virtual double
+ add_and_dot(const double a,
+ const VectorSpaceVector<double> &V,
+ const VectorSpaceVector<double> &W) override;
+ /**
+ * This function always returns false and is present only for backward
+ * compatibility.
+ */
+ bool
+ has_ghost_elements() const;
+
+ /**
+ * Return the global size of the vector, equal to the sum of the number of
+ * locally owned indices among all processors.
+ */
+ virtual size_type
+ size() const override;
+
+ /**
+ * Return the MPI communicator object in use with this object.
+ */
+ MPI_Comm
+ get_mpi_communicator() const;
+
+ /**
+ * Return an index set that describes which elements of this vector are
+ * owned by the current processor. As a consequence, the index sets
+ * returned on different processors if this is a distributed vector will
+ * form disjoint sets that add up to the complete index set. Obviously, if
+ * a vector is created on only one processor, then the result would
+ * satisfy
+ * @code
+ * vec.locally_owned_elements() == complete_index_set(vec.size())
+ * @endcode
+ */
+ virtual ::dealii::IndexSet
+ locally_owned_elements() const override;
+
+ /**
+ * Return a const reference to the underlying Trilinos
+ * Tpetra::Vector class.
+ */
+ const Tpetra::Vector<> &
+ trilinos_vector() const;
+
+ /**
+ * Return a (modifyable) reference to the underlying Trilinos
+ * Tpetra::Vector class.
+ */
+ Tpetra::Vector<> &
+ trilinos_vector();
+
+ /**
+ * Prints the vector to the output stream @p out.
+ */
+ virtual void
+ print(std::ostream & out,
+ const unsigned int precision = 3,
+ const bool scientific = true,
+ const bool across = true) const override;
+
+ /**
+ * Return the memory consumption of this class in bytes.
+ */
+ virtual std::size_t
+ memory_consumption() const override;
+
+ /**
+ * The vectors have different partitioning, i.e. they have use different
+ * IndexSet.
+ */
+ DeclException0(ExcDifferentParallelPartitioning);
+
+ /**
+ * Attempt to perform an operation between two incompatible vector types.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException0(ExcVectorTypeNotCompatible);
+
+ /**
+ * Exception thrown by an error in Trilinos.
+ *
+ * @ingroup Exceptions
+ */
+ DeclException1(ExcTrilinosError,
+ int,
+ << "An error with error number " << arg1
+ << " occurred while calling a Trilinos function");
+
+ private:
+ /**
+ * Create the CommunicationPattern for the communication between the
+ * IndexSet @p source_index_set and the current vector based
+ * on the communicator @p mpi_comm.
+ */
+ void
+ create_tpetra_comm_pattern(const IndexSet &source_index_set,
+ const MPI_Comm &mpi_comm);
+
+ /**
+ * Pointer to the actual Tpetra vector object.
+ */
+ std::unique_ptr<Tpetra::Vector<>> vector;
+
+ /**
+ * IndexSet of the elements of the last imported vector.
+ */
+ ::dealii::IndexSet source_stored_elements;
+
+ /**
+ * CommunicationPattern for the communication between the
+ * source_stored_elements IndexSet and the current vector.
+ */
+ std::shared_ptr<const TpetraWrappers::CommunicationPattern>
+ tpetra_comm_pattern;
+ };
+
+
+ inline bool
+ Vector::has_ghost_elements() const
+ {
+ return false;
+ }
+ } // namespace TpetraWrappers
+} // namespace LinearAlgebra
+
+
+/**
+ * Declare dealii::LinearAlgebra::TpetraWrappers::Vector as distributed vector.
+ */
+template <>
+struct is_serial_vector<LinearAlgebra::TpetraWrappers::Vector> : std::false_type
+{};
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
+
+#endif
#include <deal.II/lac/trilinos_epetra_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
DEAL_II_NAMESPACE_OPEN
vector[0][trilinos_i] = value;
}
-
template <>
inline double
ElementAccess<LinearAlgebra::EpetraWrappers::Vector>::get(
return vector[0][trilinos_i];
}
+
+
+
+ template <>
+ inline void
+ ElementAccess<LinearAlgebra::TpetraWrappers::Vector>::add(
+ const double value,
+ const types::global_dof_index i,
+ LinearAlgebra::TpetraWrappers::Vector &V)
+ {
+ // Extract local indices in the vector.
+ Tpetra::Vector<> vector = V.trilinos_vector();
+ TrilinosWrappers::types::int_type trilinos_i =
+ vector.getMap()->getLocalElement(
+ static_cast<TrilinosWrappers::types::int_type>(i));
+
+ vector.sync<Kokkos::HostSpace>();
+ auto vector_2d = vector.getLocalView<Kokkos::HostSpace>();
+ auto vector_1d = Kokkos::subview(vector_2d, Kokkos::ALL(), 0);
+ // We're going to modify the data on host.
+ vector.modify<Kokkos::HostSpace>();
+ vector_1d(trilinos_i) += value;
+ vector.sync<Tpetra::Vector<>::device_type::memory_space>();
+ }
+
+
+
+ template <>
+ inline void
+ ElementAccess<LinearAlgebra::TpetraWrappers::Vector>::set(
+ const double value,
+ const types::global_dof_index i,
+ LinearAlgebra::TpetraWrappers::Vector &V)
+ {
+ // Extract local indices in the vector.
+ Tpetra::Vector<> vector = V.trilinos_vector();
+ TrilinosWrappers::types::int_type trilinos_i =
+ vector.getMap()->getLocalElement(
+ static_cast<TrilinosWrappers::types::int_type>(i));
+
+ vector.sync<Kokkos::HostSpace>();
+ auto vector_2d = vector.getLocalView<Kokkos::HostSpace>();
+ auto vector_1d = Kokkos::subview(vector_2d, Kokkos::ALL(), 0);
+ // We're going to modify the data on host.
+ vector.modify<Kokkos::HostSpace>();
+ vector_1d(trilinos_i) = value;
+ vector.sync<Tpetra::Vector<>::device_type::memory_space>();
+ }
+
+
+ template <>
+ inline double
+ ElementAccess<LinearAlgebra::TpetraWrappers::Vector>::get(
+ const LinearAlgebra::TpetraWrappers::Vector &V,
+ const types::global_dof_index i)
+ {
+ // Extract local indices in the vector.
+ Tpetra::Vector<> vector = V.trilinos_vector();
+ TrilinosWrappers::types::int_type trilinos_i =
+ vector.getMap()->getLocalElement(
+ static_cast<TrilinosWrappers::types::int_type>(i));
+
+ vector.sync<Kokkos::HostSpace>();
+ auto vector_2d = vector.getLocalView<Kokkos::HostSpace>();
+ auto vector_1d = Kokkos::subview(vector_2d, Kokkos::ALL(), 0);
+ // We're going to modify the data on host.
+ return vector_1d(trilinos_i);
+ }
#endif
} // namespace internal
};
# ifdef DEAL_II_WITH_MPI
+ template <>
+ struct MatrixSelector<dealii::LinearAlgebra::TpetraWrappers::Vector>
+ {
+ using Sparsity = ::dealii::TrilinosWrappers::SparsityPattern;
+ using Matrix = ::dealii::TrilinosWrappers::SparseMatrix;
+
+ static const bool requires_distributed_sparsity_pattern = false;
+
+ template <typename SparsityPatternType, typename DoFHandlerType>
+ static void
+ reinit(Matrix &matrix,
+ Sparsity &,
+ int level,
+ const SparsityPatternType &sp,
+ DoFHandlerType & dh)
+ {
+ const parallel::Triangulation<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
+ *dist_tria = dynamic_cast<
+ const parallel::Triangulation<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(
+ &(dh.get_triangulation()));
+ MPI_Comm communicator =
+ dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
+ matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
+ dh.locally_owned_mg_dofs(level),
+ sp,
+ communicator,
+ true);
+ }
+ };
+
template <>
struct MatrixSelector<dealii::LinearAlgebra::EpetraWrappers::Vector>
{
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/vector_memory.h>
#ifdef DEAL_II_WITH_TRILINOS
+Tpetra::Map<>
+IndexSet::make_tpetra_map(const MPI_Comm &communicator,
+ const bool overlapping) const
+{
+ compress();
+ (void)communicator;
+
+# ifdef DEBUG
+ if (!overlapping)
+ {
+ const size_type n_global_elements =
+ Utilities::MPI::sum(n_elements(), communicator);
+ Assert(n_global_elements == size(),
+ ExcMessage("You are trying to create an Tpetra::Map object "
+ "that partitions elements of an index set "
+ "between processors. However, the union of the "
+ "index sets on different processors does not "
+ "contain all indices exactly once: the sum of "
+ "the number of entries the various processors "
+ "want to store locally is " +
+ Utilities::to_string(n_global_elements) +
+ " whereas the total size of the object to be "
+ "allocated is " +
+ Utilities::to_string(size()) +
+ ". In other words, there are "
+ "either indices that are not spoken for "
+ "by any processor, or there are indices that are "
+ "claimed by multiple processors."));
+ }
+# endif
+
+ // Find out if the IndexSet is ascending and 1:1. This corresponds to a
+ // linear Tpetra::Map. Overlapping IndexSets are never 1:1.
+ const bool linear =
+ overlapping ? false : is_ascending_and_one_to_one(communicator);
+ if (linear)
+ return Tpetra::Map<>(size(),
+ n_elements(),
+ 0,
+# ifdef DEAL_II_WITH_MPI
+ Teuchos::rcp(new Teuchos::MpiComm<int>(communicator))
+# else
+ Teuchos::rcp(new Teuchos::Comm<int>())
+# endif
+ );
+ else
+ {
+ std::vector<size_type> indices;
+ fill_index_vector(indices);
+ std::vector<int> int_indices(indices.size());
+ std::copy(indices.begin(), indices.end(), int_indices.begin());
+ return Tpetra::Map<>(size(),
+ (n_elements() > 0 ? int_indices.data() : nullptr),
+ n_elements(),
+ 0,
+# ifdef DEAL_II_WITH_MPI
+ Teuchos::rcp(new Teuchos::MpiComm<int>(communicator))
+# else
+ Teuchos::rcp(new Teuchos::Comm<int>())
+# endif
+ );
+ }
+}
+
+
+
Epetra_Map
IndexSet::make_trilinos_map(const MPI_Comm &communicator,
const bool overlapping) const
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
+ const Teuchos::RCP<const Teuchos::Comm<int>> &
+ tpetra_comm_self()
+ {
+# ifdef DEAL_II_WITH_MPI
+ static auto communicator = Teuchos::RCP<const Teuchos::Comm<int>>(
+ new Teuchos::MpiComm<int>(MPI_COMM_SELF));
+# else
+ static auto communicator =
+ Teuchos::RCP<const Teuchos::Comm<int>>(new Teuchos::Comm<int>());
+# endif
+
+ return communicator;
+ }
+
const Epetra_Comm &
comm_self()
{
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/vector_element_access.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
trilinos_solver.cc
trilinos_sparse_matrix.cc
trilinos_sparsity_pattern.cc
+ trilinos_tpetra_communication_pattern.cc
+ trilinos_tpetra_vector.cc
trilinos_vector.cc
)
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/solver.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/vector_memory.h>
{
return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
}
+
+ template <>
+ double *
+ begin(LinearAlgebra::TpetraWrappers::Vector &V)
+ {
+ return V.trilinos_vector().getDataNonConst().get();
+ }
+
+ template <>
+ const double *
+ begin(const LinearAlgebra::TpetraWrappers::Vector &V)
+ {
+ return V.trilinos_vector().getData().get();
+ }
+
+ template <>
+ double *
+ end(LinearAlgebra::TpetraWrappers::Vector &V)
+ {
+ return V.trilinos_vector().getDataNonConst().get() +
+ V.trilinos_vector().getLocalLength();
+ }
+
+ template <>
+ const double *
+ end(const LinearAlgebra::TpetraWrappers::Vector &V)
+ {
+ return V.trilinos_vector().getData().get() +
+ V.trilinos_vector().getLocalLength();
+ }
# endif
} // namespace internal
dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
# ifdef DEAL_II_WITH_MPI
+ template void
+ SparseMatrix::vmult(
+ dealii::LinearAlgebra::TpetraWrappers::Vector &,
+ const dealii::LinearAlgebra::TpetraWrappers::Vector &) const;
+
template void
SparseMatrix::vmult(
dealii::LinearAlgebra::EpetraWrappers::Vector &,
dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
# ifdef DEAL_II_WITH_MPI
+ template void
+ SparseMatrix::Tvmult(
+ dealii::LinearAlgebra::TpetraWrappers::Vector &,
+ const dealii::LinearAlgebra::TpetraWrappers::Vector &) const;
+
template void
SparseMatrix::Tvmult(
dealii::LinearAlgebra::EpetraWrappers::Vector &,
dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
# ifdef DEAL_II_WITH_MPI
+ template void
+ SparseMatrix::vmult_add(
+ dealii::LinearAlgebra::TpetraWrappers::Vector &,
+ const dealii::LinearAlgebra::TpetraWrappers::Vector &) const;
+
template void
SparseMatrix::vmult_add(
dealii::LinearAlgebra::EpetraWrappers::Vector &,
dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
# ifdef DEAL_II_WITH_MPI
+ template void
+ SparseMatrix::Tvmult_add(
+ dealii::LinearAlgebra::TpetraWrappers::Vector &,
+ const dealii::LinearAlgebra::TpetraWrappers::Vector &) const;
+
template void
SparseMatrix::Tvmult_add(
dealii::LinearAlgebra::EpetraWrappers::Vector &,
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/std_cxx14/memory.h>
+
+#include <deal.II/lac/trilinos_tpetra_communication_pattern.h>
+
+#ifdef DEAL_II_WITH_TRILINOS
+
+# ifdef DEAL_II_WITH_MPI
+
+# include <deal.II/base/index_set.h>
+
+# include <Tpetra_Map.hpp>
+
+# include <memory>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+ namespace TpetraWrappers
+ {
+ CommunicationPattern::CommunicationPattern(
+ const IndexSet &vector_space_vector_index_set,
+ const IndexSet &read_write_vector_index_set,
+ const MPI_Comm &communicator)
+ {
+ // virtual functions called in constructors and destructors never use the
+ // override in a derived class
+ // for clarity be explicit on which function is called
+ CommunicationPattern::reinit(vector_space_vector_index_set,
+ read_write_vector_index_set,
+ communicator);
+ }
+
+
+
+ void
+ CommunicationPattern::reinit(const IndexSet &vector_space_vector_index_set,
+ const IndexSet &read_write_vector_index_set,
+ const MPI_Comm &communicator)
+ {
+ comm = std::make_shared<const MPI_Comm>(communicator);
+
+ auto vector_space_vector_map = Teuchos::rcp(new Tpetra::Map<>(
+ vector_space_vector_index_set.make_tpetra_map(*comm, false)));
+ auto read_write_vector_map = Teuchos::rcp(new Tpetra::Map<>(
+ read_write_vector_index_set.make_tpetra_map(*comm, true)));
+
+ // Target map is read_write_vector_map
+ // Source map is vector_space_vector_map. This map must have uniquely
+ // owned GID.
+ tpetra_import =
+ std_cxx14::make_unique<Tpetra::Import<>>(read_write_vector_map,
+ vector_space_vector_map);
+ tpetra_export =
+ std_cxx14::make_unique<Tpetra::Export<>>(read_write_vector_map,
+ vector_space_vector_map);
+ }
+
+
+
+ const MPI_Comm &
+ CommunicationPattern::get_mpi_communicator() const
+ {
+ return *comm;
+ }
+
+
+
+ const Tpetra::Import<> &
+ CommunicationPattern::get_tpetra_import() const
+ {
+ return *tpetra_import;
+ }
+
+
+
+ const Tpetra::Export<> &
+ CommunicationPattern::get_tpetra_export() const
+ {
+ return *tpetra_export;
+ }
+ } // namespace TpetraWrappers
+} // namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
+
+# endif
+
+#endif
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/std_cxx14/memory.h>
+
+#include <deal.II/lac/trilinos_tpetra_vector.h>
+
+#ifdef DEAL_II_WITH_TRILINOS
+
+# ifdef DEAL_II_WITH_MPI
+
+# include <deal.II/base/index_set.h>
+
+# include <deal.II/lac/read_write_vector.h>
+
+# include <boost/io/ios_state.hpp>
+
+# include <Teuchos_DefaultMpiComm.hpp>
+# include <Tpetra_Import_decl.hpp>
+# include <Tpetra_Map_decl.hpp>
+
+# include <memory>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+ namespace TpetraWrappers
+ {
+ Vector::Vector()
+ : vector(new Tpetra::Vector<>(Teuchos::RCP<Tpetra::Map<>>(
+ new Tpetra::Map<>(0, 0, Utilities::Trilinos::tpetra_comm_self()))))
+ {}
+
+
+
+ Vector::Vector(const Vector &V)
+ : Subscriptor()
+ , vector(new Tpetra::Vector<>(V.trilinos_vector(), Teuchos::Copy))
+ {}
+
+
+
+ Vector::Vector(const IndexSet ¶llel_partitioner,
+ const MPI_Comm &communicator)
+ : vector(new Tpetra::Vector<>(Teuchos::rcp(new Tpetra::Map<>(
+ parallel_partitioner.make_tpetra_map(communicator, false)))))
+ {}
+
+
+
+ void
+ Vector::reinit(const IndexSet ¶llel_partitioner,
+ const MPI_Comm &communicator,
+ const bool omit_zeroing_entries)
+ {
+ Tpetra::Map<> input_map =
+ parallel_partitioner.make_tpetra_map(communicator, false);
+ if (vector->getMap()->isSameAs(input_map) == false)
+ vector = std_cxx14::make_unique<Tpetra::Vector<>>(
+ Teuchos::rcp(new Tpetra::Map<>(input_map)));
+ else if (omit_zeroing_entries == false)
+ {
+ vector->putScalar(0.);
+ }
+ }
+
+
+
+ void
+ Vector::reinit(const VectorSpaceVector<double> &V,
+ const bool omit_zeroing_entries)
+ {
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&V) != nullptr,
+ ExcVectorTypeNotCompatible());
+
+ // Downcast V. If fails, throws an exception.
+ const Vector &down_V = dynamic_cast<const Vector &>(V);
+
+ reinit(down_V.locally_owned_elements(),
+ down_V.get_mpi_communicator(),
+ omit_zeroing_entries);
+ }
+
+
+
+ Vector &
+ Vector::operator=(const Vector &V)
+ {
+ // Distinguish three cases:
+ // - First case: both vectors have the same layout.
+ // - Second case: both vectors have the same size but different layout.
+ // - Third case: the vectors have different size.
+ if (vector->getMap()->isSameAs(*(V.trilinos_vector().getMap())))
+ *vector = V.trilinos_vector();
+ else
+ {
+ if (size() == V.size())
+ {
+ Tpetra::Import<> data_exchange(vector->getMap(),
+ V.trilinos_vector().getMap());
+
+ vector->doImport(V.trilinos_vector(),
+ data_exchange,
+ Tpetra::REPLACE);
+ }
+ else
+ vector =
+ std_cxx14::make_unique<Tpetra::Vector<>>(V.trilinos_vector());
+ }
+
+ return *this;
+ }
+
+
+
+ Vector &
+ Vector::operator=(const double s)
+ {
+ Assert(s == 0., ExcMessage("Only 0 can be assigned to a vector."));
+
+ vector->putScalar(s);
+
+ return *this;
+ }
+
+
+
+ void
+ Vector::import(
+ const ReadWriteVector<double> & V,
+ VectorOperation::values operation,
+ std::shared_ptr<const CommunicationPatternBase> communication_pattern)
+ {
+ // If no communication pattern is given, create one. Otherwsie, use the
+ // one given.
+ if (communication_pattern == nullptr)
+ {
+ // The first time import is called, a communication pattern is
+ // created. Check if the communication pattern already exists and if
+ // it can be reused.
+ if ((source_stored_elements.size() !=
+ V.get_stored_elements().size()) ||
+ (source_stored_elements != V.get_stored_elements()))
+ {
+ const Teuchos::MpiComm<int> *mpi_comm =
+ dynamic_cast<const Teuchos::MpiComm<int> *>(
+ vector->getMap()->getComm().get());
+ Assert(mpi_comm != nullptr, ExcInternalError());
+ create_tpetra_comm_pattern(V.get_stored_elements(),
+ *(mpi_comm->getRawMpiComm())());
+ }
+ }
+ else
+ {
+ tpetra_comm_pattern = std::dynamic_pointer_cast<
+ const TpetraWrappers::CommunicationPattern>(communication_pattern);
+ AssertThrow(
+ tpetra_comm_pattern != nullptr,
+ ExcMessage(
+ std::string("The communication pattern is not of type ") +
+ "LinearAlgebra::TpetraWrappers::CommunicationPattern."));
+ }
+
+ Tpetra::Export<> tpetra_export(tpetra_comm_pattern->get_tpetra_export());
+ Tpetra::Vector<> source_vector(tpetra_export.getSourceMap());
+
+ source_vector.sync<Kokkos::HostSpace>();
+ auto x_2d = source_vector.getLocalView<Kokkos::HostSpace>();
+ auto x_1d = Kokkos::subview(x_2d, Kokkos::ALL(), 0);
+ source_vector.modify<Kokkos::HostSpace>();
+ const size_t localLength = source_vector.getLocalLength();
+ auto values_it = V.begin();
+ for (size_t k = 0; k < localLength; ++k)
+ x_1d(k) = *values_it++;
+ source_vector.sync<Tpetra::Vector<double>::device_type::memory_space>();
+ if (operation == VectorOperation::insert)
+ vector->doExport(source_vector, tpetra_export, Tpetra::REPLACE);
+ else if (operation == VectorOperation::add)
+ vector->doExport(source_vector, tpetra_export, Tpetra::ADD);
+ else
+ AssertThrow(false, ExcNotImplemented());
+ }
+
+
+
+ Vector &
+ Vector::operator*=(const double factor)
+ {
+ AssertIsFinite(factor);
+ vector->scale(factor);
+
+ return *this;
+ }
+
+
+
+ Vector &
+ Vector::operator/=(const double factor)
+ {
+ AssertIsFinite(factor);
+ Assert(factor != 0., ExcZero());
+ *this *= 1. / factor;
+
+ return *this;
+ }
+
+
+
+ Vector &
+ Vector::operator+=(const VectorSpaceVector<double> &V)
+ {
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&V) != nullptr,
+ ExcVectorTypeNotCompatible());
+
+ // Downcast V. If fails, throws an exception.
+ const Vector &down_V = dynamic_cast<const Vector &>(V);
+ // If the maps are the same we can Update right away.
+ if (vector->getMap()->isSameAs(*(down_V.trilinos_vector().getMap())))
+ {
+ vector->update(1., down_V.trilinos_vector(), 1.);
+ }
+ else
+ {
+ Assert(this->size() == down_V.size(),
+ ExcDimensionMismatch(this->size(), down_V.size()));
+
+ // TODO: The code doesn't work as expected so we use a workaround.
+ /*Tpetra::Export<> data_exchange(vector->getMap(),
+ down_V.trilinos_vector().getMap());
+ vector->doExport(down_V.trilinos_vector(),
+ data_exchange,
+ Tpetra::ADD);*/
+
+ Tpetra::Vector<> dummy(vector->getMap(), false);
+ Tpetra::Import<> data_exchange(dummy.getMap(),
+ down_V.trilinos_vector().getMap());
+
+ dummy.doExport(down_V.trilinos_vector(),
+ data_exchange,
+ Tpetra::REPLACE);
+
+ vector->update(1.0, dummy, 1.0);
+ }
+
+ return *this;
+ }
+
+
+
+ Vector &
+ Vector::operator-=(const VectorSpaceVector<double> &V)
+ {
+ this->add(-1., V);
+
+ return *this;
+ }
+
+
+
+ double Vector::operator*(const VectorSpaceVector<double> &V) const
+ {
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&V) != nullptr,
+ ExcVectorTypeNotCompatible());
+
+ // Downcast V. If fails, throws an exception.
+ const Vector &down_V = dynamic_cast<const Vector &>(V);
+ Assert(this->size() == down_V.size(),
+ ExcDimensionMismatch(this->size(), down_V.size()));
+ Assert(vector->getMap()->isSameAs(*down_V.trilinos_vector().getMap()),
+ ExcDifferentParallelPartitioning());
+
+ return vector->dot(down_V.trilinos_vector());
+ }
+
+
+
+ void
+ Vector::add(const double a)
+ {
+ AssertIsFinite(a);
+
+ vector->sync<Kokkos::HostSpace>();
+ auto vector_2d = vector->getLocalView<Kokkos::HostSpace>();
+ auto vector_1d = Kokkos::subview(vector_2d, Kokkos::ALL(), 0);
+ vector->modify<Kokkos::HostSpace>();
+ const size_t localLength = vector->getLocalLength();
+ for (size_t k = 0; k < localLength; ++k)
+ {
+ vector_1d(k) += a;
+ }
+ vector->sync<Tpetra::Vector<double>::device_type::memory_space>();
+ }
+
+
+
+ void
+ Vector::add(const double a, const VectorSpaceVector<double> &V)
+ {
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&V) != nullptr,
+ ExcVectorTypeNotCompatible());
+
+ // Downcast V. If fails, throws an exception.
+ const Vector &down_V = dynamic_cast<const Vector &>(V);
+ AssertIsFinite(a);
+ Assert(vector->getMap()->isSameAs(*(down_V.trilinos_vector().getMap())),
+ ExcDifferentParallelPartitioning());
+
+ vector->update(a, down_V.trilinos_vector(), 1.);
+ }
+
+
+
+ void
+ Vector::add(const double a,
+ const VectorSpaceVector<double> &V,
+ const double b,
+ const VectorSpaceVector<double> &W)
+ {
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&V) != nullptr,
+ ExcVectorTypeNotCompatible());
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&W) != nullptr,
+ ExcVectorTypeNotCompatible());
+
+ // Downcast V. If fails, throws an exception.
+ const Vector &down_V = dynamic_cast<const Vector &>(V);
+ // Downcast W. If fails, throws an exception.
+ const Vector &down_W = dynamic_cast<const Vector &>(W);
+ Assert(vector->getMap()->isSameAs(*(down_V.trilinos_vector().getMap())),
+ ExcDifferentParallelPartitioning());
+ Assert(vector->getMap()->isSameAs(*(down_W.trilinos_vector().getMap())),
+ ExcDifferentParallelPartitioning());
+ AssertIsFinite(a);
+ AssertIsFinite(b);
+
+ vector->update(
+ a, down_V.trilinos_vector(), b, down_W.trilinos_vector(), 1.);
+ }
+
+
+
+ void
+ Vector::sadd(const double s,
+ const double a,
+ const VectorSpaceVector<double> &V)
+ {
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&V) != nullptr,
+ ExcVectorTypeNotCompatible());
+
+ *this *= s;
+ // Downcast V. It fails, throws an exception.
+ const Vector &down_V = dynamic_cast<const Vector &>(V);
+ Vector tmp(down_V);
+ tmp *= a;
+ *this += tmp;
+ }
+
+
+
+ void
+ Vector::scale(const VectorSpaceVector<double> &scaling_factors)
+ {
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&scaling_factors) != nullptr,
+ ExcVectorTypeNotCompatible());
+
+ // Downcast scaling_factors. If fails, throws an exception.
+ const Vector &down_scaling_factors =
+ dynamic_cast<const Vector &>(scaling_factors);
+ Assert(vector->getMap()->isSameAs(
+ *(down_scaling_factors.trilinos_vector().getMap())),
+ ExcDifferentParallelPartitioning());
+
+ vector->elementWiseMultiply(1.,
+ *down_scaling_factors.vector,
+ *vector,
+ 0.);
+ }
+
+
+
+ void
+ Vector::equ(const double a, const VectorSpaceVector<double> &V)
+ {
+ // Check that casting will work.
+ Assert(dynamic_cast<const Vector *>(&V) != nullptr,
+ ExcVectorTypeNotCompatible());
+
+ // Downcast V. If fails, throws an exception.
+ const Vector &down_V = dynamic_cast<const Vector &>(V);
+ // If we don't have the same map, copy.
+ if (vector->getMap()->isSameAs(*down_V.trilinos_vector().getMap()) ==
+ false)
+ this->sadd(0., a, V);
+ else
+ {
+ // Otherwise, just update
+ vector->update(a, down_V.trilinos_vector(), 0.);
+ }
+ }
+
+
+
+ bool
+ Vector::all_zero() const
+ {
+ // get a representation of the vector and
+ // loop over all the elements
+ double * start_ptr = vector->getDataNonConst().get();
+ const double *ptr = start_ptr,
+ *eptr = start_ptr + vector->getLocalLength();
+ unsigned int flag = 0;
+ while (ptr != eptr)
+ {
+ if (*ptr != 0)
+ {
+ flag = 1;
+ break;
+ }
+ ++ptr;
+ }
+
+ // Check that the vector is zero on _all_ processors.
+ const Teuchos::MpiComm<int> *mpi_comm =
+ dynamic_cast<const Teuchos::MpiComm<int> *>(
+ vector->getMap()->getComm().get());
+ Assert(mpi_comm != nullptr, ExcInternalError());
+ unsigned int num_nonzero =
+ Utilities::MPI::sum(flag, *(mpi_comm->getRawMpiComm())());
+
+ return num_nonzero == 0;
+ }
+
+
+
+ double
+ Vector::mean_value() const
+ {
+ return vector->meanValue();
+ }
+
+
+
+ double
+ Vector::l1_norm() const
+ {
+ return vector->norm1();
+ }
+
+
+
+ double
+ Vector::l2_norm() const
+ {
+ return vector->norm2();
+ }
+
+
+
+ double
+ Vector::linfty_norm() const
+ {
+ return vector->normInf();
+ }
+
+
+
+ double
+ Vector::add_and_dot(const double a,
+ const VectorSpaceVector<double> &V,
+ const VectorSpaceVector<double> &W)
+ {
+ this->add(a, V);
+
+ return *this * W;
+ }
+
+
+
+ Vector::size_type
+ Vector::size() const
+ {
+ return vector->getGlobalLength();
+ }
+
+
+
+ MPI_Comm
+ Vector::get_mpi_communicator() const
+ {
+ const auto tpetra_comm = dynamic_cast<const Teuchos::MpiComm<int> *>(
+ vector->getMap()->getComm().get());
+ Assert(tpetra_comm != nullptr, ExcInternalError());
+ return *(tpetra_comm->getRawMpiComm())();
+ }
+
+
+
+ ::dealii::IndexSet
+ Vector::locally_owned_elements() const
+ {
+ IndexSet is(size());
+
+ // easy case: local range is contiguous
+ if (vector->getMap()->isContiguous())
+ {
+# ifndef DEAL_II_WITH_64BIT_INDICES
+ is.add_range(vector->getMap()->getMinGlobalIndex(),
+ vector->getMap()->getMaxGlobalIndex() + 1);
+# else
+ is.add_range(vector->getMap()->getMinGlobalIndex(),
+ vector->getMap()->getMaxGlobalIndex() + 1);
+# endif
+ }
+ else if (vector->getLocalLength() > 0)
+ {
+ const size_type n_indices = vector->getLocalLength();
+ auto vector_indices = vector->getMap()->getMyGlobalIndices();
+ is.add_indices((unsigned int *)&vector_indices[0],
+ (unsigned int *)&vector_indices[0] + n_indices);
+ }
+ is.compress();
+
+ return is;
+ }
+
+
+
+ const Tpetra::Vector<> &
+ Vector::trilinos_vector() const
+ {
+ return *vector;
+ }
+
+
+
+ Tpetra::Vector<> &
+ Vector::trilinos_vector()
+ {
+ return *vector;
+ }
+
+
+
+ void
+ Vector::print(std::ostream & out,
+ const unsigned int precision,
+ const bool scientific,
+ const bool across) const
+ {
+ AssertThrow(out, ExcIO());
+ boost::io::ios_flags_saver restore_flags(out);
+
+ // Get a representation of the vector and loop over all
+ // the elements
+ const auto val = vector->get1dView();
+
+ out.precision(precision);
+ if (scientific)
+ out.setf(std::ios::scientific, std::ios::floatfield);
+ else
+ out.setf(std::ios::fixed, std::ios::floatfield);
+
+ vector->sync<Kokkos::HostSpace>();
+ auto vector_2d = vector->getLocalView<Kokkos::HostSpace>();
+ auto vector_1d = Kokkos::subview(vector_2d, Kokkos::ALL(), 0);
+ const size_t local_length = vector->getLocalLength();
+
+ if (across)
+ for (unsigned int i = 0; i < local_length; ++i)
+ out << i << "->" << vector->getMap()->getGlobalElement(i) << ": "
+ << vector_1d(i) << std::endl;
+ // out << val[i] << ' ';
+ else
+ for (unsigned int i = 0; i < local_length; ++i)
+ out << val[i] << std::endl;
+ out << std::endl;
+
+ // restore the representation
+ // of the vector
+ AssertThrow(out, ExcIO());
+ }
+
+
+
+ std::size_t
+ Vector::memory_consumption() const
+ {
+ return sizeof(*this) +
+ vector->getLocalLength() *
+ (sizeof(double) + sizeof(TrilinosWrappers::types::int_type));
+ }
+
+
+
+ void
+ Vector::create_tpetra_comm_pattern(const IndexSet &source_index_set,
+ const MPI_Comm &mpi_comm)
+ {
+ source_stored_elements = source_index_set;
+ tpetra_comm_pattern =
+ std::make_shared<TpetraWrappers::CommunicationPattern>(
+ locally_owned_elements(), source_index_set, mpi_comm);
+ }
+ } // namespace TpetraWrappers
+} // namespace LinearAlgebra
+
+DEAL_II_NAMESPACE_CLOSE
+
+# endif
+
+#endif
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/vector_memory.templates.h>
#include <deal.II/lac/la_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/vector_element_access.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/read_write_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+// Check LinearAlgebra::TpetraWrappers::Vector assignment and import
+
+
+void
+test()
+{
+ IndexSet parallel_partitioner_1(10);
+ IndexSet parallel_partitioner_2(10);
+ unsigned int rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ if (rank == 0)
+ {
+ parallel_partitioner_1.add_range(0, 5);
+ parallel_partitioner_2.add_range(0, 3);
+ }
+ else
+ {
+ parallel_partitioner_1.add_range(5, 10);
+ parallel_partitioner_2.add_range(3, 10);
+ }
+ parallel_partitioner_1.compress();
+ parallel_partitioner_2.compress();
+ LinearAlgebra::TpetraWrappers::Vector a;
+ LinearAlgebra::TpetraWrappers::Vector b(parallel_partitioner_1,
+ MPI_COMM_WORLD);
+ LinearAlgebra::TpetraWrappers::Vector c(b);
+
+ AssertThrow(a.size() == 0, ExcMessage("Vector has the wrong size."));
+ AssertThrow(b.size() == 10, ExcMessage("Vector has the wrong size."));
+ AssertThrow(c.size() == 10, ExcMessage("Vector has the wrong size."));
+
+ a.reinit(parallel_partitioner_2, MPI_COMM_WORLD);
+ AssertThrow(a.size() == 10, ExcMessage("Vector has the wrong size."));
+
+ AssertThrow(parallel_partitioner_1 == b.locally_owned_elements(),
+ ExcMessage("IndexSet has been modified."));
+ AssertThrow(parallel_partitioner_2 == a.locally_owned_elements(),
+ ExcMessage("IndexSet has been modified."));
+
+ IndexSet read_write_index_set(10);
+ if (rank == 0)
+ read_write_index_set.add_range(0, 6);
+ else
+ read_write_index_set.add_range(4, 10);
+ read_write_index_set.compress();
+
+ LinearAlgebra::ReadWriteVector<double> read_write_1(read_write_index_set);
+ LinearAlgebra::ReadWriteVector<double> read_write_2(read_write_index_set);
+ LinearAlgebra::ReadWriteVector<double> read_write_3(read_write_index_set);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 6; ++i)
+ {
+ read_write_1[i] = i;
+ read_write_2[i] = 5. + i;
+ }
+ }
+ else
+ {
+ for (unsigned int i = 4; i < 10; ++i)
+ {
+ read_write_1[i] = i;
+ read_write_2[i] = 5. + i;
+ }
+ }
+
+ a.import(read_write_2, VectorOperation::insert);
+ AssertThrow(a.size() == 10, ExcMessage("Vector has the wrong size."));
+
+ read_write_3.import(a, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 6; ++i)
+ {
+ AssertThrow(read_write_2[i] == read_write_3[i],
+ ExcMessage("Vector a has been modified."));
+ }
+ }
+ else
+ {
+ for (unsigned int i = 4; i < 10; ++i)
+ AssertThrow(read_write_2[i] == read_write_3[i],
+ ExcMessage("Vector a has been modified."));
+ }
+
+ b.import(read_write_1, VectorOperation::insert);
+ AssertThrow(b.size() == 10, ExcMessage("Vector has the wrong size."));
+
+ read_write_3.import(b, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 6; ++i)
+ AssertThrow(read_write_1[i] == read_write_3[i],
+ ExcMessage("Vector b has been modified."));
+ }
+ else
+ {
+ for (unsigned int i = 4; i < 10; ++i)
+ AssertThrow(read_write_1[i] == read_write_3[i],
+ ExcMessage("Vector b has been modified."));
+ }
+
+ c.import(read_write_2, VectorOperation::insert);
+ AssertThrow(c.size() == 10, ExcMessage("Vector has the wrong size."));
+
+ read_write_3.import(c, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 6; ++i)
+ AssertThrow(read_write_2[i] == read_write_3[i],
+ ExcMessage("Vector c has been modified."));
+ }
+ else
+ {
+ for (unsigned int i = 4; i < 10; ++i)
+ AssertThrow(read_write_2[i] == read_write_3[i],
+ ExcMessage("Vector c has been modified."));
+ }
+
+ a *= 2;
+ read_write_3.import(a, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 6; ++i)
+ AssertThrow(2. * read_write_2[i] == read_write_3[i],
+ ExcMessage("Problem in operator *=."));
+ }
+ else
+ {
+ for (unsigned int i = 4; i < 10; ++i)
+ AssertThrow(2. * read_write_2[i] == read_write_3[i],
+ ExcMessage("Problem in operator *=."));
+ }
+
+ c /= 2.;
+ read_write_3.import(c, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 6; ++i)
+ AssertThrow(0.5 * read_write_2[i] == read_write_3[i],
+ ExcMessage("Problem in operator /=."));
+ }
+ else
+ {
+ for (unsigned int i = 4; i < 10; ++i)
+ AssertThrow(0.5 * read_write_2[i] == read_write_3[i],
+ ExcMessage("Problem in operator /=."));
+ }
+
+ b += a;
+ read_write_3.import(b, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 6; ++i)
+ AssertThrow(2. * read_write_2[i] + read_write_1[i] == read_write_3[i],
+ ExcMessage("Problem in operator +=."));
+ }
+ else
+ {
+ for (unsigned int i = 4; i < 10; ++i)
+ AssertThrow(2. * read_write_2[i] + read_write_1[i] == read_write_3[i],
+ ExcMessage("Problem in operator +=."));
+ }
+
+ b -= c;
+ read_write_3.import(b, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 6; ++i)
+ AssertThrow(1.5 * read_write_2[i] + read_write_1[i] == read_write_3[i],
+ ExcMessage("Problem in operator -=."));
+ }
+ else
+ {
+ for (unsigned int i = 4; i < 10; ++i)
+ AssertThrow(1.5 * read_write_2[i] + read_write_1[i] == read_write_3[i],
+ ExcMessage("Problem in operator -=."));
+ }
+
+ b.import(read_write_1, VectorOperation::insert);
+ c.import(read_write_1, VectorOperation::insert);
+ const double val = b * c;
+ AssertThrow(val == 285., ExcMessage("Problem in operator *."));
+}
+
+
+int
+main(int argc, char **argv)
+{
+ initlog();
+ deallog.depth_console(0);
+
+ Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);
+
+ test();
+
+ deallog << "OK" << std::endl;
+
+ return 0;
+}
--- /dev/null
+
+DEAL::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/read_write_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+// Check LinearAlgebra::TpetraWrappers::Vector add and sadd.
+
+void
+test()
+{
+ IndexSet parallel_partitioner_1(10);
+ IndexSet parallel_partitioner_2(10);
+ unsigned int rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ if (rank == 0)
+ {
+ parallel_partitioner_1.add_range(0, 5);
+ parallel_partitioner_2.add_range(0, 3);
+ }
+ else
+ {
+ parallel_partitioner_1.add_range(5, 10);
+ parallel_partitioner_2.add_range(3, 10);
+ }
+ parallel_partitioner_1.compress();
+ parallel_partitioner_2.compress();
+ LinearAlgebra::TpetraWrappers::Vector a(parallel_partitioner_1,
+ MPI_COMM_WORLD);
+ LinearAlgebra::TpetraWrappers::Vector b(parallel_partitioner_1,
+ MPI_COMM_WORLD);
+ LinearAlgebra::TpetraWrappers::Vector c(parallel_partitioner_2,
+ MPI_COMM_WORLD);
+
+ IndexSet read_write_index_set(10);
+ if (rank == 0)
+ read_write_index_set.add_range(0, 5);
+ else
+ read_write_index_set.add_range(5, 10);
+ read_write_index_set.compress();
+
+ LinearAlgebra::ReadWriteVector<double> read_write_1(read_write_index_set);
+ LinearAlgebra::ReadWriteVector<double> read_write_2(read_write_index_set);
+ LinearAlgebra::ReadWriteVector<double> read_write_3(read_write_index_set);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 5; ++i)
+ {
+ read_write_1[i] = i;
+ read_write_2[i] = 5. + i;
+ }
+ }
+ else
+ {
+ for (unsigned int i = 5; i < 10; ++i)
+ {
+ read_write_1[i] = i;
+ read_write_2[i] = 5. + i;
+ }
+ }
+
+ a.import(read_write_1, VectorOperation::insert);
+ b.import(read_write_2, VectorOperation::insert);
+ c.import(read_write_2, VectorOperation::insert);
+
+ a.add(1.);
+ read_write_3.import(a, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 5; ++i)
+ AssertThrow(1. + read_write_1[i] == read_write_3[i],
+ ExcMessage("Problem in add(scalar)."));
+ }
+ else
+ {
+ for (unsigned int i = 5; i < 10; ++i)
+ AssertThrow(1. + read_write_1[i] == read_write_3[i],
+ ExcMessage("Problem in add(scalar)."));
+ }
+
+ a.add(2., b);
+ read_write_3.import(a, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 5; ++i)
+ AssertThrow(1. + read_write_1[i] + 2. * read_write_2[i] ==
+ read_write_3[i],
+ ExcMessage("Problem in add(scalar,Vector)."));
+ }
+ else
+ {
+ for (unsigned int i = 5; i < 10; ++i)
+ AssertThrow(1. + read_write_1[i] + 2. * read_write_2[i] ==
+ read_write_3[i],
+ ExcMessage("Problem in add(scalar,Vector)."));
+ }
+
+
+ LinearAlgebra::TpetraWrappers::Vector d(a);
+ a.add(2., b, 3., d);
+ read_write_3.import(a, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 5; ++i)
+ AssertThrow(4. + 4. * read_write_1[i] + 10. * read_write_2[i] ==
+ read_write_3[i],
+ ExcMessage("Problem in add(scalar,Vector,scalar,Vector)."));
+ }
+ else
+ {
+ for (unsigned int i = 5; i < 10; ++i)
+ AssertThrow(4. + 4. * read_write_1[i] + 10. * read_write_2[i] ==
+ read_write_3[i],
+ ExcMessage("Problem in add(scalar,Vector,scalar,Vector)."));
+ }
+
+
+ a.import(read_write_1, VectorOperation::insert);
+ a.sadd(3., 2., c);
+ read_write_3.import(a, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 5; ++i)
+ AssertThrow(3. * read_write_1[i] + 2. * read_write_2[i] ==
+ read_write_3[i],
+ ExcMessage("Problem in sadd(scalar,scalar,Vector)."));
+ }
+ else
+ {
+ for (unsigned int i = 5; i < 10; ++i)
+ AssertThrow(3. * read_write_1[i] + 2. * read_write_2[i] ==
+ read_write_3[i],
+ ExcMessage("Problem in sadd(scalar,scalar,Vector)."));
+ }
+
+
+ a.import(read_write_1, VectorOperation::insert);
+ a.scale(b);
+ read_write_3.import(a, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 5; ++i)
+ AssertThrow(read_write_1[i] * read_write_2[i] == read_write_3[i],
+ ExcMessage("Problem in scale."));
+ }
+ else
+ {
+ for (unsigned int i = 5; i < 10; ++i)
+ AssertThrow(read_write_1[i] * read_write_2[i] == read_write_3[i],
+ ExcMessage("Problem in scale."));
+ }
+
+
+ a.equ(2., c);
+ read_write_3.import(a, VectorOperation::insert);
+ if (rank == 0)
+ {
+ for (unsigned int i = 0; i < 5; ++i)
+ AssertThrow(2. * read_write_2[i] == read_write_3[i],
+ ExcMessage("Problem in scale."));
+ }
+ else
+ {
+ for (unsigned int i = 5; i < 10; ++i)
+ AssertThrow(2. * read_write_2[i] == read_write_3[i],
+ ExcMessage("Problem in equ."));
+ }
+
+
+ AssertThrow(b.l1_norm() == 95., ExcMessage("Problem in l1_norm."));
+
+ const double eps = 1e-6;
+ AssertThrow(std::fabs(b.l2_norm() - 31.3847096) < eps,
+ ExcMessage("Problem in l2_norm"));
+
+ AssertThrow(b.linfty_norm() == 14., ExcMessage("Problem in linfty_norm."));
+
+ a.import(read_write_1, VectorOperation::insert);
+ const double val = a.add_and_dot(2., a, b);
+ AssertThrow(val == 1530., ExcMessage("Problem in add_and_dot"));
+}
+
+
+int
+main(int argc, char **argv)
+{
+ initlog();
+ deallog.depth_console(0);
+
+ Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);
+
+ test();
+
+ deallog << "OK" << std::endl;
+
+ return 0;
+}
--- /dev/null
+
+DEAL::OK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Check LinearAlgebra::TpetraWrappers::Vector::import
+// for VectorOperation::add and check LinearAlgebra::ReadWriteVector
+// for VectorOperation::add/min/max.
+
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/read_write_vector.h>
+#include <deal.II/lac/trilinos_tpetra_vector.h>
+
+#include <iostream>
+#include <vector>
+
+#include "../tests.h"
+
+void
+test()
+{
+ unsigned int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ IndexSet locally_owned(n_procs * 2);
+ locally_owned.add_range(my_id * 2, my_id * 2 + 2);
+ locally_owned.compress();
+
+ LinearAlgebra::TpetraWrappers::Vector v(locally_owned, MPI_COMM_WORLD);
+
+ IndexSet workaround_set(n_procs * 2);
+ unsigned int my_first_index = (my_id * 2 + 2) % (n_procs * 2);
+ unsigned int my_second_index = my_id * 2 + 1;
+ workaround_set.add_index(my_first_index);
+ workaround_set.add_index(my_second_index);
+ workaround_set.add_index(0);
+ workaround_set.compress();
+ LinearAlgebra::ReadWriteVector<double> rw_vector(workaround_set);
+
+ rw_vector(my_first_index) = my_id + 10;
+ rw_vector(my_second_index) = my_id + 100;
+ rw_vector(0) = 1.;
+ // rw_vector(2) = 1.;
+ v.import(rw_vector, VectorOperation::add);
+ deallog << "Tpetra first import add:" << std::endl;
+ v.print(deallog.get_file_stream());
+ rw_vector.print(deallog.get_file_stream());
+
+ rw_vector(my_first_index) = my_id + 20;
+ rw_vector(my_second_index) = my_id + 200;
+ rw_vector(0) = 2.;
+ // rw_vector(2) = 3.;
+ v.import(rw_vector, VectorOperation::add);
+ deallog << "Tpetra second import add:" << std::endl;
+ v.print(deallog.get_file_stream());
+ rw_vector.print(deallog.get_file_stream());
+
+ rw_vector.import(v, VectorOperation::add);
+ deallog << "ReadWrite import add:" << std::endl;
+ rw_vector.print(deallog.get_file_stream());
+
+ rw_vector(my_first_index) = my_id + 100;
+ rw_vector(my_second_index) = 1;
+ rw_vector(0) = 4.;
+ // rw_vector(2) = 3.;
+ rw_vector.import(v, VectorOperation::min);
+ deallog << "ReadWrite import min:" << std::endl;
+ rw_vector.print(deallog.get_file_stream());
+
+ rw_vector(my_first_index) = my_id + 100;
+ rw_vector(my_second_index) = 1;
+ rw_vector(0) = 4.;
+ // rw_vector(2) = 3.;
+ rw_vector.import(v, VectorOperation::max);
+ deallog << "ReadWrite import max:" << std::endl;
+ rw_vector.print(deallog.get_file_stream());
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(
+ argc, argv, testing_max_num_threads());
+
+ MPILogInitAll log;
+ test();
+}
--- /dev/null
+
+DEAL:0::Tpetra first import add:
+0->0: 2.000e+00
+1->1: 1.000e+02
+
+IndexSet: {[0,2]}
+
+[0]: 1.000e+00
+[1]: 1.000e+02
+[2]: 1.000e+01
+DEAL:0::Tpetra second import add:
+0->0: 4.000e+00
+1->1: 2.000e+02
+
+IndexSet: {[0,2]}
+
+[0]: 2.000e+00
+[1]: 2.000e+02
+[2]: 2.000e+01
+DEAL:0::ReadWrite import add:
+IndexSet: {[0,2]}
+
+[0]: 6.000e+00
+[1]: 4.000e+02
+[2]: 5.000e+01
+DEAL:0::ReadWrite import min:
+IndexSet: {[0,2]}
+
+[0]: 4.000e+00
+[1]: 1.000e+00
+[2]: 3.000e+01
+DEAL:0::ReadWrite import max:
+IndexSet: {[0,2]}
+
+[0]: 4.000e+00
+[1]: 2.000e+02
+[2]: 1.000e+02
+
+DEAL:1::Tpetra first import add:
+0->2: 1.000e+01
+1->3: 1.010e+02
+
+IndexSet: {0, 3}
+
+[0]: 1.000e+00
+[3]: 1.010e+02
+DEAL:1::Tpetra second import add:
+0->2: 3.000e+01
+1->3: 2.010e+02
+
+IndexSet: {0, 3}
+
+[0]: 2.000e+00
+[3]: 2.010e+02
+DEAL:1::ReadWrite import add:
+IndexSet: {0, 3}
+
+[0]: 6.000e+00
+[3]: 4.020e+02
+DEAL:1::ReadWrite import min:
+IndexSet: {0, 3}
+
+[0]: 4.000e+00
+[3]: 1.000e+00
+DEAL:1::ReadWrite import max:
+IndexSet: {0, 3}
+
+[0]: 4.000e+00
+[3]: 2.010e+02
+
--- /dev/null
+
+DEAL:0::Tpetra first import add:
+0->0: 4.000e+00
+1->1: 1.000e+02
+
+IndexSet: {[0,2]}
+
+[0]: 1.000e+00
+[1]: 1.000e+02
+[2]: 1.000e+01
+DEAL:0::Tpetra second import add:
+0->0: 8.000e+00
+1->1: 2.000e+02
+
+IndexSet: {[0,2]}
+
+[0]: 2.000e+00
+[1]: 2.000e+02
+[2]: 2.000e+01
+DEAL:0::ReadWrite import add:
+IndexSet: {[0,2]}
+
+[0]: 1.000e+01
+[1]: 4.000e+02
+[2]: 5.000e+01
+DEAL:0::ReadWrite import min:
+IndexSet: {[0,2]}
+
+[0]: 4.000e+00
+[1]: 1.000e+00
+[2]: 3.000e+01
+DEAL:0::ReadWrite import max:
+IndexSet: {[0,2]}
+
+[0]: 8.000e+00
+[1]: 2.000e+02
+[2]: 1.000e+02
+
+DEAL:1::Tpetra first import add:
+0->2: 1.000e+01
+1->3: 1.010e+02
+
+IndexSet: {0, [3,4]}
+
+[0]: 1.000e+00
+[3]: 1.010e+02
+[4]: 1.100e+01
+DEAL:1::Tpetra second import add:
+0->2: 3.000e+01
+1->3: 2.010e+02
+
+IndexSet: {0, [3,4]}
+
+[0]: 2.000e+00
+[3]: 2.010e+02
+[4]: 2.100e+01
+DEAL:1::ReadWrite import add:
+IndexSet: {0, [3,4]}
+
+[0]: 1.000e+01
+[3]: 4.020e+02
+[4]: 5.300e+01
+DEAL:1::ReadWrite import min:
+IndexSet: {0, [3,4]}
+
+[0]: 4.000e+00
+[3]: 1.000e+00
+[4]: 3.200e+01
+DEAL:1::ReadWrite import max:
+IndexSet: {0, [3,4]}
+
+[0]: 8.000e+00
+[3]: 2.010e+02
+[4]: 1.010e+02
+
+
+DEAL:2::Tpetra first import add:
+0->4: 1.100e+01
+1->5: 1.020e+02
+
+IndexSet: {0, [5,6]}
+
+[0]: 1.000e+00
+[5]: 1.020e+02
+[6]: 1.200e+01
+DEAL:2::Tpetra second import add:
+0->4: 3.200e+01
+1->5: 2.020e+02
+
+IndexSet: {0, [5,6]}
+
+[0]: 2.000e+00
+[5]: 2.020e+02
+[6]: 2.200e+01
+DEAL:2::ReadWrite import add:
+IndexSet: {0, [5,6]}
+
+[0]: 1.000e+01
+[5]: 4.040e+02
+[6]: 5.600e+01
+DEAL:2::ReadWrite import min:
+IndexSet: {0, [5,6]}
+
+[0]: 4.000e+00
+[5]: 1.000e+00
+[6]: 3.400e+01
+DEAL:2::ReadWrite import max:
+IndexSet: {0, [5,6]}
+
+[0]: 8.000e+00
+[5]: 2.020e+02
+[6]: 1.020e+02
+
+
+DEAL:3::Tpetra first import add:
+0->6: 1.200e+01
+1->7: 1.030e+02
+
+IndexSet: {0, 7}
+
+[0]: 1.000e+00
+[7]: 1.030e+02
+DEAL:3::Tpetra second import add:
+0->6: 3.400e+01
+1->7: 2.030e+02
+
+IndexSet: {0, 7}
+
+[0]: 2.000e+00
+[7]: 2.030e+02
+DEAL:3::ReadWrite import add:
+IndexSet: {0, 7}
+
+[0]: 1.000e+01
+[7]: 4.060e+02
+DEAL:3::ReadWrite import min:
+IndexSet: {0, 7}
+
+[0]: 4.000e+00
+[7]: 1.000e+00
+DEAL:3::ReadWrite import max:
+IndexSet: {0, 7}
+
+[0]: 8.000e+00
+[7]: 2.030e+02
+