)
FOREACH(_module
- Amesos Epetra Ifpack AztecOO Teuchos Tpetra ML MueLu
+ Amesos Epetra Ifpack AztecOO Teuchos ML MueLu
)
ITEM_MATCHES(_module_found ${_module} ${Trilinos_PACKAGE_LIST})
IF(_module_found)
CHECK_MPI_INTERFACE(TRILINOS ${var})
IF (${var})
- FOREACH(_optional_module EpetraExt ROL Sacado Zoltan)
+ FOREACH(_optional_module EpetraExt ROL Sacado Tpetra Zoltan)
ITEM_MATCHES(_module_found ${_optional_module} ${Trilinos_PACKAGE_LIST})
IF(_module_found)
MESSAGE(STATUS "Found ${_optional_module}")
SET(DEAL_II_EXPAND_TRILINOS_MPI_VECTOR "TrilinosWrappers::MPI::Vector")
IF (TRILINOS_WITH_MPI)
SET(DEAL_II_EXPAND_EPETRA_VECTOR "LinearAlgebra::EpetraWrappers::Vector")
- SET(DEAL_II_EXPAND_TPETRA_VECTOR_DOUBLE "LinearAlgebra::TpetraWrappers::Vector<double>")
- SET(DEAL_II_EXPAND_TPETRA_VECTOR_FLOAT "LinearAlgebra::TpetraWrappers::Vector<float>")
+ IF (${DEAL_II_TRILINOS_WITH_TPETRA})
+ SET(DEAL_II_EXPAND_TPETRA_VECTOR_DOUBLE "LinearAlgebra::TpetraWrappers::Vector<double>")
+ SET(DEAL_II_EXPAND_TPETRA_VECTOR_FLOAT "LinearAlgebra::TpetraWrappers::Vector<float>")
+ ENDIF()
ENDIF()
IF(${DEAL_II_TRILINOS_WITH_SACADO})
# Note: Only CMake 3.0 and greater support line continuation with the "\" character
#cmakedefine DEAL_II_TRILINOS_WITH_EPETRAEXT
#cmakedefine DEAL_II_TRILINOS_WITH_ROL
#cmakedefine DEAL_II_TRILINOS_WITH_SACADO
+#cmakedefine DEAL_II_TRILINOS_WITH_TPETRA
#cmakedefine DEAL_II_TRILINOS_WITH_ZOLTAN
#ifdef DEAL_II_WITH_TRILINOS
# include <Epetra_Map.h>
-# include <Tpetra_Map.hpp>
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+# include <Tpetra_Map.hpp>
+# endif
#endif
#if defined(DEAL_II_WITH_MPI) || defined(DEAL_II_WITH_PETSC)
make_trilinos_map(const MPI_Comm &communicator = MPI_COMM_WORLD,
const bool overlapping = false) const;
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
Tpetra::Map<int, types::global_dof_index>
make_tpetra_map(const MPI_Comm &communicator = MPI_COMM_WORLD,
const bool overlapping = false) const;
+# endif
#endif
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename ForwardIterator, typename Number>
static void
extract_subvector_to(
for (unsigned int i = 0; i < cache_size; ++i, ++local_values_begin)
*local_values_begin = read_write_vector[sorted_indices_pos[i]];
}
+# endif
# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <int dim, int spacedim, typename Number>
void
reinit_distributed(const DoFHandler<dim, spacedim> & dh,
const IndexSet &locally_owned_dofs = dh.locally_owned_dofs();
vector.reinit(locally_owned_dofs, parallel_tria->get_communicator());
}
+# endif
template <int dim, int spacedim>
void
AssertThrow(false, ExcNotImplemented());
}
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <int dim, int spacedim, typename Number>
void
back_interpolate(
{
AssertThrow(false, ExcNotImplemented());
}
+# endif
#endif
std::shared_ptr<const CommunicationPatternBase>());
# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
/**
* Imports all the elements present in the vector's IndexSet from the input
* vector @p tpetra_vec. VectorOperation::values @p operation is used to
const std::shared_ptr<const CommunicationPatternBase>
&communication_pattern =
std::shared_ptr<const CommunicationPatternBase>());
+# endif
/**
* Imports all the elements present in the vector's IndexSet from the input
protected:
#ifdef DEAL_II_WITH_TRILINOS
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
/**
* Import all the elements present in the vector's IndexSet from the input
* vector @p tpetra_vector. This is an helper function and it should not be
const MPI_Comm & mpi_comm,
const std::shared_ptr<const CommunicationPatternBase>
&communication_pattern);
+# endif
/**
* Import all the elements present in the vector's IndexSet from the input
resize_val(const size_type new_allocated_size);
#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
/**
* Return a TpetraWrappers::CommunicationPattern and store it for future
* use.
TpetraWrappers::CommunicationPattern
create_tpetra_comm_pattern(const IndexSet &source_index_set,
const MPI_Comm &mpi_comm);
+# endif
/**
* Return a EpetraWrappers::CommunicationPattern and store it for future
#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
void
ReadWriteVector<Number>::import(
AssertThrow(false, ExcNotImplemented());
}
}
+# endif
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
void
ReadWriteVector<Number>::import(
trilinos_vec.get_mpi_communicator(),
communication_pattern);
}
+# endif
#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
TpetraWrappers::CommunicationPattern
ReadWriteVector<Number>::create_tpetra_comm_pattern(
return epetra_comm_pattern;
}
+# endif
+
template <typename Number>
#include <deal.II/base/config.h>
-#ifdef DEAL_II_WITH_TRILINOS
+#if defined(DEAL_II_TRILINOS_WITH_TPETRA) && defined(DEAL_II_WITH_MPI)
-# ifdef DEAL_II_WITH_MPI
+# include <deal.II/lac/communication_pattern_base.h>
-# include <deal.II/lac/communication_pattern_base.h>
+# include <Tpetra_Export.hpp>
+# include <Tpetra_Import.hpp>
-# include <Tpetra_Export.hpp>
-# include <Tpetra_Import.hpp>
-
-# include <memory>
+# include <memory>
DEAL_II_NAMESPACE_OPEN
DEAL_II_NAMESPACE_CLOSE
-# endif
-
#endif
#endif
#include <deal.II/base/config.h>
-#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_TRILINOS_WITH_TPETRA) && defined(DEAL_II_WITH_MPI)
# include <deal.II/base/index_set.h>
# include <deal.II/base/subscriptor.h>
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <>
inline void
ElementAccess<LinearAlgebra::TpetraWrappers::Vector<double>>::add(
// We're going to modify the data on host.
return vector_1d(trilinos_i);
}
+# endif
#endif
} // namespace internal
};
# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
struct MatrixSelector<dealii::LinearAlgebra::TpetraWrappers::Vector<Number>>
{
true);
}
};
+# endif
template <>
struct MatrixSelector<dealii::LinearAlgebra::EpetraWrappers::Vector>
#ifdef DEAL_II_WITH_TRILINOS
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
Tpetra::Map<int, types::global_dof_index>
IndexSet::make_tpetra_map(const MPI_Comm &communicator,
compress();
(void)communicator;
-# ifdef DEBUG
+# ifdef DEBUG
if (!overlapping)
{
const size_type n_global_elements =
"by any processor, or there are indices that are "
"claimed by multiple processors."));
}
-# endif
+# endif
// Find out if the IndexSet is ascending and 1:1. This corresponds to a
// linear Tpetra::Map. Overlapping IndexSets are never 1:1.
size(),
n_elements(),
0,
-# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_WITH_MPI
Teuchos::rcp(new Teuchos::MpiComm<int>(communicator))
-# else
+# else
Teuchos::rcp(new Teuchos::Comm<int>())
-# endif
+# endif
);
else
{
size(),
arr_view,
0,
-# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_WITH_MPI
Teuchos::rcp(new Teuchos::MpiComm<int>(communicator))
-# else
+# else
Teuchos::rcp(new Teuchos::Comm<int>())
-# endif
+# endif
);
}
}
+# endif
#endif
-
bool
IndexSet::is_ascending_and_one_to_one(const MPI_Comm &communicator) const
{
return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
}
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
Number *
begin(LinearAlgebra::TpetraWrappers::Vector<Number> &V)
return V.trilinos_vector().getData().get() +
V.trilinos_vector().getLocalLength();
}
+# endif
# endif
} // namespace internal
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template void
SparseMatrix::vmult(
dealii::LinearAlgebra::TpetraWrappers::Vector<double> &,
SparseMatrix::vmult(
dealii::LinearAlgebra::TpetraWrappers::Vector<float> &,
const dealii::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
+# endif
template void
SparseMatrix::vmult(
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template void
SparseMatrix::Tvmult(
dealii::LinearAlgebra::TpetraWrappers::Vector<double> &,
SparseMatrix::Tvmult(
dealii::LinearAlgebra::TpetraWrappers::Vector<float> &,
const dealii::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
+# endif
template void
SparseMatrix::Tvmult(
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template void
SparseMatrix::vmult_add(
dealii::LinearAlgebra::TpetraWrappers::Vector<double> &,
SparseMatrix::vmult_add(
dealii::LinearAlgebra::TpetraWrappers::Vector<float> &,
const dealii::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
+# endif
template void
SparseMatrix::vmult_add(
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
# ifdef DEAL_II_WITH_MPI
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template void
SparseMatrix::Tvmult_add(
dealii::LinearAlgebra::TpetraWrappers::Vector<double> &,
SparseMatrix::Tvmult_add(
dealii::LinearAlgebra::TpetraWrappers::Vector<float> &,
const dealii::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
+# endif
template void
SparseMatrix::Tvmult_add(
#include <deal.II/lac/trilinos_tpetra_communication_pattern.h>
-#ifdef DEAL_II_WITH_TRILINOS
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
# ifdef DEAL_II_WITH_MPI
#include <deal.II/lac/trilinos_tpetra_vector.h>
-#ifdef DEAL_II_WITH_TRILINOS
+#ifdef DEAL_II_TRILINOS_WITH_TPETRA
# ifdef DEAL_II_WITH_MPI