# Configuration for the trilinos library:
#
+SET(FEATURE_TRILINOS_DEPENDS MPI)
MACRO(FEATURE_TRILINOS_FIND_EXTERNAL var)
FIND_PACKAGE(TRILINOS)
# Trilinos has to be configured with the same MPI configuration as
# deal.II.
#
- IF( (TRILINOS_WITH_MPI AND NOT DEAL_II_WITH_MPI)
- OR
- (NOT TRILINOS_WITH_MPI AND DEAL_II_WITH_MPI))
+ IF(NOT TRILINOS_WITH_MPI)
MESSAGE(STATUS "Could not find a sufficient Trilinos installation: "
- "Trilinos has to be configured with the same MPI configuration as deal.II."
+ "Trilinos has to have MPI support enabled."
)
SET(TRILINOS_ADDITIONAL_ERROR_STRING
${TRILINOS_ADDITIONAL_ERROR_STRING}
"The Trilinos installation (found at \"${TRILINOS_DIR}\")\n"
- "has to be configured with the same MPI configuration as deal.II, but found:\n"
- " DEAL_II_WITH_MPI = ${DEAL_II_WITH_MPI}\n"
+ "has to be configured with MPI support, but found:\n"
" TRILINOS_WITH_MPI = ${TRILINOS_WITH_MPI}\n"
)
SET(${var} FALSE)
ENDIF()
- #
- # deal.II has to be configured with MPI if both Trilinos and PETSc are
- # enabled.
- #
- IF(DEAL_II_WITH_TRILINOS AND DEAL_II_WITH_PETSC AND NOT DEAL_II_WITH_MPI)
- MESSAGE(STATUS "Incompatible configuration settings: "
- "MPI must be enabled to use both Trilinos and PETSc, as both libraries "
- "provide mutually incompatible MPI stubs."
- )
- SET(TRILINOS_ADDITIONAL_ERROR_STRING
- ${TRILINOS_ADDITIONAL_ERROR_STRING}
- "Incompatible Trilinos and PETSc libraries found. Both libraries were "
- "configured without MPI support and cannot be used at the same time due "
- "to incompatible MPI stub files. Either reconfigure deal.II, Trilinos, "
- "and PETSc with MPI support, or disable one of the libraries.\n"
- )
- SET(${var} FALSE)
- ENDIF()
-
#
# Trilinos has to be configured with 32bit indices if deal.II uses
# unsigned int.
"TrilinosWrappers::BlockSparseMatrix")
SET(DEAL_II_EXPAND_TRILINOS_MPI_BLOCKVECTOR "TrilinosWrappers::MPI::BlockVector")
SET(DEAL_II_EXPAND_TRILINOS_MPI_VECTOR "TrilinosWrappers::MPI::Vector")
- IF (TRILINOS_WITH_MPI)
- SET(DEAL_II_EXPAND_EPETRA_VECTOR "LinearAlgebra::EpetraWrappers::Vector")
- IF (${DEAL_II_TRILINOS_WITH_TPETRA})
- SET(DEAL_II_EXPAND_TPETRA_VECTOR_DOUBLE "LinearAlgebra::TpetraWrappers::Vector<double>")
- SET(DEAL_II_EXPAND_TPETRA_VECTOR_FLOAT "LinearAlgebra::TpetraWrappers::Vector<float>")
- IF (${DEAL_II_WITH_COMPLEX_NUMBERS})
- SET(DEAL_II_EXPAND_TPETRA_VECTOR_COMPLEX_DOUBLE "LinearAlgebra::TpetraWrappers::Vector<std::complex<double>>")
- SET(DEAL_II_EXPAND_TPETRA_VECTOR_COMPLEX_FLOAT "LinearAlgebra::TpetraWrappers::Vector<std::complex<float>>")
- ENDIF()
+ SET(DEAL_II_EXPAND_EPETRA_VECTOR "LinearAlgebra::EpetraWrappers::Vector")
+ IF (${DEAL_II_TRILINOS_WITH_TPETRA})
+ SET(DEAL_II_EXPAND_TPETRA_VECTOR_DOUBLE "LinearAlgebra::TpetraWrappers::Vector<double>")
+ SET(DEAL_II_EXPAND_TPETRA_VECTOR_FLOAT "LinearAlgebra::TpetraWrappers::Vector<float>")
+ IF (${DEAL_II_WITH_COMPLEX_NUMBERS})
+ SET(DEAL_II_EXPAND_TPETRA_VECTOR_COMPLEX_DOUBLE "LinearAlgebra::TpetraWrappers::Vector<std::complex<double>>")
+ SET(DEAL_II_EXPAND_TPETRA_VECTOR_COMPLEX_FLOAT "LinearAlgebra::TpetraWrappers::Vector<std::complex<float>>")
ENDIF()
ENDIF()
IF(${DEAL_II_TRILINOS_WITH_SACADO})
--- /dev/null
+Changed: Trilinos support in deal.II requires both deal.II and Trilinos to be
+configured with MPI support.
+<br>
+(Daniel Arndt, 2021/06/02)
-#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_WITH_TRILINOS)
static std::vector<unsigned int>
sort_indices(const types::global_dof_index *v_begin,
const types::global_dof_index *v_end)
#ifdef DEAL_II_WITH_TRILINOS
-# ifdef DEAL_II_WITH_MPI
/**
* Initialize this ReadWriteVector by supplying access to all locally
* available entries in the given ghosted or non-ghosted vector.
*/
void
reinit(const TrilinosWrappers::MPI::Vector &trilinos_vec);
-# endif
#endif
/**
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern = {});
-# ifdef DEAL_II_WITH_MPI
-# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
/**
* Imports all the elements present in the vector's IndexSet from the input
* vector @p tpetra_vec. VectorOperation::values @p operation is used to
VectorOperation::values operation,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern = {});
-# endif
+# endif
/**
* Imports all the elements present in the vector's IndexSet from the input
VectorOperation::values operation,
const std::shared_ptr<const Utilities::MPI::CommunicationPatternBase>
&communication_pattern = {});
-# endif
#endif
#ifdef DEAL_II_WITH_CUDA
void
resize_val(const size_type new_allocated_size);
-#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_WITH_TRILINOS)
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
/**
* Return a TpetraWrappers::CommunicationPattern and store it for future
-#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_WITH_TRILINOS)
template <typename Number>
void
ReadWriteVector<Number>::reinit(
-#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_WITH_TRILINOS)
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
void
-#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_WITH_TRILINOS)
# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
TpetraWrappers::CommunicationPattern
#ifdef DEAL_II_WITH_TRILINOS
-# ifdef DEAL_II_WITH_MPI
+# include <deal.II/base/communication_pattern_base.h>
-# include <deal.II/base/communication_pattern_base.h>
+# include <Epetra_Import.h>
-# include <Epetra_Import.h>
-
-# include <memory>
+# include <memory>
DEAL_II_NAMESPACE_OPEN
DEAL_II_NAMESPACE_CLOSE
-# endif
-
#endif
#endif
#include <deal.II/base/config.h>
-#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_WITH_TRILINOS)
# include <deal.II/base/index_set.h>
# include <deal.II/base/subscriptor.h>
# include <deal.II/lac/la_parallel_vector.h>
# include <deal.II/lac/trilinos_vector.h>
-# include <memory>
-
-# ifdef DEAL_II_WITH_MPI
-# include <Epetra_MpiComm.h>
-# else
-# include <Epetra_SerialComm.h>
-# endif
# include <Epetra_Map.h>
+# include <Epetra_MpiComm.h>
# include <Epetra_MultiVector.h>
# include <Epetra_RowMatrix.h>
# include <Epetra_Vector.h>
# include <Teuchos_ParameterList.hpp>
+# include <memory>
+
// forward declarations
# ifndef DOXYGEN
class Ifpack_Preconditioner;
* Internal communication pattern in case the matrix needs to be copied
* from deal.II format.
*/
-# ifdef DEAL_II_WITH_MPI
Epetra_MpiComm communicator;
-# else
- Epetra_SerialComm communicator;
-# endif
/**
* Internal Trilinos map in case the matrix needs to be copied from
# include <Epetra_Export.h>
# include <Epetra_FECrsMatrix.h>
# include <Epetra_Map.h>
+# include <Epetra_MpiComm.h>
# include <Epetra_MultiVector.h>
# include <Epetra_Operator.h>
+# include <mpi.h>
# include <cmath>
# include <memory>
# include <type_traits>
# include <vector>
-# ifdef DEAL_II_WITH_MPI
-# include <Epetra_MpiComm.h>
-# include <mpi.h>
-# else
-# include <Epetra_SerialComm.h>
-# endif
DEAL_II_NAMESPACE_OPEN
* Internal communication pattern in case the matrix needs to be copied
* from deal.II format.
*/
-# ifdef DEAL_II_WITH_MPI
Epetra_MpiComm communicator;
-# else
- Epetra_SerialComm communicator;
-# endif
/**
* Epetra_Map that sets the partitioning of the domain space of
# include <Epetra_FECrsGraph.h>
# include <Epetra_Map.h>
+# include <Epetra_MpiComm.h>
+# include <mpi.h>
# include <cmath>
# include <memory>
# include <vector>
-# ifdef DEAL_II_WITH_MPI
-# include <Epetra_MpiComm.h>
-# include <mpi.h>
-# else
-# include <Epetra_SerialComm.h>
-# endif
DEAL_II_NAMESPACE_OPEN
#include <deal.II/base/config.h>
-#if defined(DEAL_II_TRILINOS_WITH_TPETRA) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_TRILINOS_WITH_TPETRA)
# include <deal.II/base/communication_pattern_base.h>
#include <deal.II/base/config.h>
-#if defined(DEAL_II_TRILINOS_WITH_TPETRA) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_TRILINOS_WITH_TPETRA)
# include <deal.II/base/index_set.h>
# include <deal.II/base/subscriptor.h>
#ifdef DEAL_II_TRILINOS_WITH_TPETRA
-# ifdef DEAL_II_WITH_MPI
+# include <deal.II/base/index_set.h>
-# include <deal.II/base/index_set.h>
+# include <deal.II/lac/read_write_vector.h>
-# include <deal.II/lac/read_write_vector.h>
+# include <boost/io/ios_state.hpp>
-# include <boost/io/ios_state.hpp>
+# include <Teuchos_DefaultMpiComm.hpp>
+# include <Tpetra_Import_def.hpp>
+# include <Tpetra_Map_def.hpp>
-# include <Teuchos_DefaultMpiComm.hpp>
-# include <Tpetra_Import_def.hpp>
-# include <Tpetra_Map_def.hpp>
-
-# include <memory>
+# include <memory>
DEAL_II_NAMESPACE_OPEN
DEAL_II_NAMESPACE_CLOSE
-# endif
-
#endif
#endif
# include <deal.II/lac/vector_type_traits.h>
# include <Epetra_ConfigDefs.h>
+# include <Epetra_FEVector.h>
+# include <Epetra_LocalMap.h>
+# include <Epetra_Map.h>
+# include <Epetra_MpiComm.h>
+# include <mpi.h>
# include <memory>
# include <utility>
# include <vector>
-# ifdef DEAL_II_WITH_MPI // only if MPI is installed
-# include <Epetra_MpiComm.h>
-# include <mpi.h>
-# else
-# include <Epetra_SerialComm.h>
-# endif
-# include <Epetra_FEVector.h>
-# include <Epetra_LocalMap.h>
-# include <Epetra_Map.h>
DEAL_II_NAMESPACE_OPEN
{
static MPI_Comm comm;
-# ifdef DEAL_II_WITH_MPI
-
const Epetra_MpiComm *mpi_comm =
dynamic_cast<const Epetra_MpiComm *>(&vector->Map().Comm());
comm = mpi_comm->Comm();
-# else
-
- comm = MPI_COMM_SELF;
-
-# endif
-
return comm;
}
-#if defined(DEAL_II_WITH_TRILINOS) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_WITH_TRILINOS)
template <>
inline void
ElementAccess<LinearAlgebra::EpetraWrappers::Vector>::add(
-#if defined(DEAL_II_TRILINOS_WITH_TPETRA) && defined(DEAL_II_WITH_MPI)
+#if defined(DEAL_II_TRILINOS_WITH_TPETRA)
template <>
inline void
VectorHelper<LinearAlgebra::TpetraWrappers::Vector<double>>::extract(
#ifdef DEAL_II_WITH_TRILINOS
-# ifdef DEAL_II_WITH_MPI
+# include <deal.II/base/index_set.h>
-# include <deal.II/base/index_set.h>
+# include <Epetra_Map.h>
-# include <Epetra_Map.h>
-
-# include <memory>
+# include <memory>
DEAL_II_NAMESPACE_OPEN
DEAL_II_NAMESPACE_CLOSE
-# endif
-
#endif
#ifdef DEAL_II_WITH_TRILINOS
-# ifdef DEAL_II_WITH_MPI
+# include <deal.II/base/index_set.h>
-# include <deal.II/base/index_set.h>
+# include <deal.II/lac/read_write_vector.h>
-# include <deal.II/lac/read_write_vector.h>
+# include <boost/io/ios_state.hpp>
-# include <boost/io/ios_state.hpp>
+# include <Epetra_Import.h>
+# include <Epetra_Map.h>
+# include <Epetra_MpiComm.h>
-# include <Epetra_Import.h>
-# include <Epetra_Map.h>
-# include <Epetra_MpiComm.h>
-
-# include <memory>
+# include <memory>
DEAL_II_NAMESPACE_OPEN
Assert(this->size() == down_V.size(),
ExcDimensionMismatch(this->size(), down_V.size()));
-# if DEAL_II_TRILINOS_VERSION_GTE(11, 11, 0)
+# if DEAL_II_TRILINOS_VERSION_GTE(11, 11, 0)
Epetra_Import data_exchange(vector->Map(),
down_V.trilinos_vector().Map());
const int ierr = vector->Import(down_V.trilinos_vector(),
Epetra_AddLocalAlso);
Assert(ierr == 0, ExcTrilinosError(ierr));
(void)ierr;
-# else
+# else
// In versions older than 11.11 the Import function is broken for
// adding Hence, we provide a workaround in this case
ierr = vector->Update(1.0, dummy, 1.0);
Assert(ierr == 0, ExcTrilinosError(ierr));
(void)ierr;
-# endif
+# endif
}
return *this;
Vector::size_type
Vector::size() const
{
-# ifndef DEAL_II_WITH_64BIT_INDICES
+# ifndef DEAL_II_WITH_64BIT_INDICES
return vector->GlobalLength();
-# else
+# else
return vector->GlobalLength64();
-# endif
+# endif
}
// easy case: local range is contiguous
if (vector->Map().LinearMap())
{
-# ifndef DEAL_II_WITH_64BIT_INDICES
+# ifndef DEAL_II_WITH_64BIT_INDICES
is.add_range(vector->Map().MinMyGID(), vector->Map().MaxMyGID() + 1);
-# else
+# else
is.add_range(vector->Map().MinMyGID64(),
vector->Map().MaxMyGID64() + 1);
-# endif
+# endif
}
else if (vector->Map().NumMyElements() > 0)
{
const size_type n_indices = vector->Map().NumMyElements();
-# ifndef DEAL_II_WITH_64BIT_INDICES
+# ifndef DEAL_II_WITH_64BIT_INDICES
unsigned int *vector_indices =
reinterpret_cast<unsigned int *>(vector->Map().MyGlobalElements());
-# else
+# else
size_type *vector_indices =
reinterpret_cast<size_type *>(vector->Map().MyGlobalElements64());
-# endif
+# endif
is.add_indices(vector_indices, vector_indices + n_indices);
}
is.compress();
DEAL_II_NAMESPACE_CLOSE
-# endif
-
#endif
namespace TrilinosWrappers
{
PreconditionBase::PreconditionBase()
-# ifdef DEAL_II_WITH_MPI
: communicator(MPI_COMM_SELF)
-# endif
{}
PreconditionBase::PreconditionBase(const PreconditionBase &base)
: Subscriptor()
, preconditioner(base.preconditioner)
- ,
-# ifdef DEAL_II_WITH_MPI
- communicator(base.communicator)
- ,
-# endif
- vector_distributor(new Epetra_Map(*base.vector_distributor))
+ , communicator(base.communicator)
+ , vector_distributor(new Epetra_Map(*base.vector_distributor))
{}
PreconditionBase::clear()
{
preconditioner.reset();
-# ifdef DEAL_II_WITH_MPI
communicator = MPI_COMM_SELF;
-# endif
vector_distributor.reset();
}
MPI_Comm
PreconditionBase::get_mpi_communicator() const
{
-# ifdef DEAL_II_WITH_MPI
return communicator.Comm();
-# else
- return MPI_COMM_SELF;
-# endif
}
return V.end();
}
-# ifdef DEAL_II_WITH_MPI
template <>
double *
begin(LinearAlgebra::EpetraWrappers::Vector &V)
return V.trilinos_vector()[0] + V.trilinos_vector().MyLength();
}
-# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template <typename Number>
Number *
begin(LinearAlgebra::TpetraWrappers::Vector<Number> &V)
return V.trilinos_vector().getData().get() +
V.trilinos_vector().getLocalLength();
}
-# endif
# endif
} // namespace internal
MPI_Comm
SparseMatrix::get_mpi_communicator() const
{
-# ifdef DEAL_II_WITH_MPI
-
const Epetra_MpiComm *mpi_comm =
dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
Assert(mpi_comm != nullptr, ExcInternalError());
return mpi_comm->Comm();
-# else
-
- return MPI_COMM_SELF;
-
-# endif
}
} // namespace TrilinosWrappers
{
namespace internal
{
- namespace
- {
-# ifndef DEAL_II_WITH_MPI
- Epetra_Map
- make_serial_Epetra_map(const IndexSet &serial_partitioning)
- {
- // See IndexSet::make_trilinos_map
- return Epetra_Map(
- TrilinosWrappers::types::int_type(serial_partitioning.size()),
- TrilinosWrappers::types::int_type(serial_partitioning.n_elements()),
- 0,
- Epetra_SerialComm());
- }
-# endif
- } // namespace
-
namespace LinearOperatorImplementation
{
TrilinosPayload::TrilinosPayload()
: use_transpose(false)
- ,
-# ifdef DEAL_II_WITH_MPI
- communicator(MPI_COMM_SELF)
+ , communicator(MPI_COMM_SELF)
, domain_map(IndexSet().make_trilinos_map(communicator.Comm()))
, range_map(IndexSet().make_trilinos_map(communicator.Comm()))
-# else
- domain_map(internal::make_serial_Epetra_map(IndexSet()))
- , range_map(internal::make_serial_Epetra_map(IndexSet()))
-# endif
{
vmult = [](Range &, const Domain &) {
Assert(false,
const TrilinosWrappers::SparseMatrix &matrix_exemplar,
const TrilinosWrappers::SparseMatrix &matrix)
: use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
- ,
-# ifdef DEAL_II_WITH_MPI
- communicator(matrix_exemplar.get_mpi_communicator())
+ , communicator(matrix_exemplar.get_mpi_communicator())
, domain_map(
matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
communicator.Comm()))
, range_map(
matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
communicator.Comm()))
-# else
- domain_map(internal::make_serial_Epetra_map(
- matrix_exemplar.locally_owned_domain_indices()))
- , range_map(internal::make_serial_Epetra_map(
- matrix_exemplar.locally_owned_range_indices()))
-# endif
{
vmult = [&matrix_exemplar, &matrix](Range & tril_dst,
const Domain &tril_src) {
const TrilinosWrappers::SparseMatrix & matrix_exemplar,
const TrilinosWrappers::PreconditionBase &preconditioner)
: use_transpose(matrix_exemplar.trilinos_matrix().UseTranspose())
- ,
-# ifdef DEAL_II_WITH_MPI
- communicator(matrix_exemplar.get_mpi_communicator())
+ , communicator(matrix_exemplar.get_mpi_communicator())
, domain_map(
matrix_exemplar.locally_owned_domain_indices().make_trilinos_map(
communicator.Comm()))
, range_map(
matrix_exemplar.locally_owned_range_indices().make_trilinos_map(
communicator.Comm()))
-# else
- domain_map(internal::make_serial_Epetra_map(
- matrix_exemplar.locally_owned_domain_indices()))
- , range_map(internal::make_serial_Epetra_map(
- matrix_exemplar.locally_owned_range_indices()))
-# endif
{
vmult = [&matrix_exemplar, &preconditioner](Range & tril_dst,
const Domain &tril_src) {
const TrilinosWrappers::PreconditionBase &preconditioner)
: use_transpose(
preconditioner_exemplar.trilinos_operator().UseTranspose())
- ,
-# ifdef DEAL_II_WITH_MPI
- communicator(preconditioner_exemplar.get_mpi_communicator())
+ , communicator(preconditioner_exemplar.get_mpi_communicator())
, domain_map(preconditioner_exemplar.locally_owned_domain_indices()
.make_trilinos_map(communicator.Comm()))
, range_map(preconditioner_exemplar.locally_owned_range_indices()
.make_trilinos_map(communicator.Comm()))
-# else
- domain_map(internal::make_serial_Epetra_map(
- preconditioner_exemplar.locally_owned_domain_indices()))
- , range_map(internal::make_serial_Epetra_map(
- preconditioner_exemplar.locally_owned_range_indices()))
-# endif
{
vmult = [&preconditioner_exemplar,
&preconditioner](Range &tril_dst, const Domain &tril_src) {
MPI_Comm
TrilinosPayload::get_mpi_communicator() const
{
-# ifdef DEAL_II_WITH_MPI
return communicator.Comm();
-# else
- return MPI_COMM_SELF;
-# endif
}
dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
-# ifdef DEAL_II_WITH_MPI
-# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template void
SparseMatrix::vmult(
dealii::LinearAlgebra::TpetraWrappers::Vector<double> &,
SparseMatrix::vmult(
dealii::LinearAlgebra::TpetraWrappers::Vector<float> &,
const dealii::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
-# endif
+# endif
template void
SparseMatrix::vmult(
dealii::LinearAlgebra::EpetraWrappers::Vector &,
const dealii::LinearAlgebra::EpetraWrappers::Vector &) const;
-# endif
template void
SparseMatrix::Tvmult(MPI::Vector &, const MPI::Vector &) const;
dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
-# ifdef DEAL_II_WITH_MPI
-# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template void
SparseMatrix::Tvmult(
dealii::LinearAlgebra::TpetraWrappers::Vector<double> &,
SparseMatrix::Tvmult(
dealii::LinearAlgebra::TpetraWrappers::Vector<float> &,
const dealii::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
-# endif
+# endif
template void
SparseMatrix::Tvmult(
dealii::LinearAlgebra::EpetraWrappers::Vector &,
const dealii::LinearAlgebra::EpetraWrappers::Vector &) const;
-# endif
template void
SparseMatrix::vmult_add(MPI::Vector &, const MPI::Vector &) const;
dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
-# ifdef DEAL_II_WITH_MPI
-# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template void
SparseMatrix::vmult_add(
dealii::LinearAlgebra::TpetraWrappers::Vector<double> &,
SparseMatrix::vmult_add(
dealii::LinearAlgebra::TpetraWrappers::Vector<float> &,
const dealii::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
-# endif
+# endif
template void
SparseMatrix::vmult_add(
dealii::LinearAlgebra::EpetraWrappers::Vector &,
const dealii::LinearAlgebra::EpetraWrappers::Vector &) const;
-# endif
template void
SparseMatrix::Tvmult_add(MPI::Vector &, const MPI::Vector &) const;
dealii::LinearAlgebra::distributed::Vector<double> &,
const dealii::LinearAlgebra::distributed::Vector<double> &) const;
-# ifdef DEAL_II_WITH_MPI
-# ifdef DEAL_II_TRILINOS_WITH_TPETRA
+# ifdef DEAL_II_TRILINOS_WITH_TPETRA
template void
SparseMatrix::Tvmult_add(
dealii::LinearAlgebra::TpetraWrappers::Vector<double> &,
SparseMatrix::Tvmult_add(
dealii::LinearAlgebra::TpetraWrappers::Vector<float> &,
const dealii::LinearAlgebra::TpetraWrappers::Vector<float> &) const;
-# endif
+# endif
template void
SparseMatrix::Tvmult_add(
dealii::LinearAlgebra::EpetraWrappers::Vector &,
const dealii::LinearAlgebra::EpetraWrappers::Vector &) const;
-# endif
} // namespace TrilinosWrappers
# endif // DOXYGEN
MPI_Comm
SparsityPattern::get_mpi_communicator() const
{
-# ifdef DEAL_II_WITH_MPI
-
const Epetra_MpiComm *mpi_comm =
dynamic_cast<const Epetra_MpiComm *>(&graph->RangeMap().Comm());
Assert(mpi_comm != nullptr, ExcInternalError());
return mpi_comm->Comm();
-# else
-
- return MPI_COMM_SELF;
-
-# endif
}
#ifdef DEAL_II_TRILINOS_WITH_TPETRA
-# ifdef DEAL_II_WITH_MPI
+# include <deal.II/base/index_set.h>
-# include <deal.II/base/index_set.h>
+# include <Tpetra_Map.hpp>
-# include <Tpetra_Map.hpp>
-
-# include <memory>
+# include <memory>
DEAL_II_NAMESPACE_OPEN
DEAL_II_NAMESPACE_CLOSE
-# endif
-
#endif
#include <deal.II/lac/trilinos_tpetra_vector.templates.h>
#ifdef DEAL_II_TRILINOS_WITH_TPETRA
-# ifdef DEAL_II_WITH_MPI
DEAL_II_NAMESPACE_OPEN
{
template class Vector<float>;
template class Vector<double>;
-# ifdef DEAL_II_WITH_COMPLEX_VALUES
+# ifdef DEAL_II_WITH_COMPLEX_VALUES
template class Vector<std::complex<float>>;
template class Vector<std::complex<double>>;
-# endif
+# endif
} // namespace TpetraWrappers
} // namespace LinearAlgebra
DEAL_II_NAMESPACE_CLOSE
-# endif
#endif
{
// When we clear the vector, reset the pointer and generate an empty
// vector.
-# ifdef DEAL_II_WITH_MPI
Epetra_Map map(0, 0, Epetra_MpiComm(MPI_COMM_SELF));
-# else
- Epetra_Map map(0, 0, Epetra_SerialComm());
-# endif
has_ghosts = false;
vector = std::make_unique<Epetra_FEVector>(map);
// version in case the underlying Epetra_MpiComm object is the same,
// otherwise we might access an MPI_Comm object that has been
// deleted
-# ifdef DEAL_II_WITH_MPI
const Epetra_MpiComm *my_comm =
dynamic_cast<const Epetra_MpiComm *>(&vector->Comm());
const Epetra_MpiComm *v_comm =
const bool same_communicators =
my_comm != nullptr && v_comm != nullptr &&
my_comm->DataPtr() == v_comm->DataPtr();
-# else
- const bool same_communicators = true;
-# endif
if (!same_communicators ||
vector->Map().SameAs(v.vector->Map()) == false)
{
last_action = Insert;
}
-# if defined(DEBUG) && defined(DEAL_II_WITH_MPI)
+# if defined(DEBUG)
const Epetra_MpiComm *comm_ptr =
dynamic_cast<const Epetra_MpiComm *>(&(v.vector->Comm()));
Assert(comm_ptr != nullptr, ExcInternalError());
}
else
vector = std::move(actual_vec);
-# if defined(DEBUG) && defined(DEAL_II_WITH_MPI)
+# if defined(DEBUG)
const Epetra_MpiComm *comm_ptr =
dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
Assert(comm_ptr != nullptr, ExcInternalError());
// check equality for MPI communicators to avoid accessing a possibly
// invalid MPI_Comm object
-# ifdef DEAL_II_WITH_MPI
const Epetra_MpiComm *my_comm =
dynamic_cast<const Epetra_MpiComm *>(&vector->Comm());
const Epetra_MpiComm *v_comm =
// else
// same_communicators = true;
// }
-# else
- const bool same_communicators = true;
-# endif
// distinguish three cases. First case: both vectors have the same
// layout (just need to copy the local data, not reset the memory and
# ifdef DEBUG
-# ifdef DEAL_II_WITH_MPI
// check that every process has decided to use the same mode. This will
// otherwise result in undefined behavior in the call to
// GlobalAssemble().
"this vector was an addition or a set operation. This will "
"prevent the compress() operation from succeeding."));
-# endif
# endif
// Now pass over the information about what we did last to the vector.
++ptr;
}
-# ifdef DEAL_II_WITH_MPI
// in parallel, check that the vector
// is zero on _all_ processors.
const Epetra_MpiComm *mpi_comm =
Assert(mpi_comm != nullptr, ExcInternalError());
unsigned int num_nonzero = Utilities::MPI::sum(flag, mpi_comm->Comm());
return num_nonzero == 0;
-# else
- return flag == 0;
-# endif
}
bool
Vector::is_non_negative() const
{
-# ifdef DEAL_II_WITH_MPI
// if this vector is a parallel one, then
// we need to communicate to determine
// the answer to the current
// function. this still has to be
// implemented
AssertThrow(local_size() == size(), ExcNotImplemented());
-# endif
// get a representation of the vector and
// loop over all the elements
TrilinosScalar *start_ptr;