LinearAlgebra::distributed::BlockVector<double>;
LinearAlgebra::distributed::BlockVector<float> ;
- @DEAL_II_EXPAND_TRILINOS_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_VECTOR@;
LinearAlgebra::distributed::BlockVector<double>;
LinearAlgebra::distributed::BlockVector<float> ;
- @DEAL_II_EXPAND_TRILINOS_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_VECTOR_REAL@;
- @DEAL_II_EXPAND_TRILINOS_BLOCKVECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_BLOCKVECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_BLOCKVECTOR_REAL@;
}
LinearAlgebra::distributed::Vector<double>;
LinearAlgebra::distributed::Vector<float> ;
- @DEAL_II_EXPAND_TRILINOS_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
@DEAL_II_EXPAND_PETSC_MPI_VECTOR_REAL@;
LinearAlgebra::distributed::Vector<double>;
- @DEAL_II_EXPAND_TRILINOS_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
@DEAL_II_EXPAND_EPETRA_VECTOR@;
}
MACRO(FEATURE_TRILINOS_CONFIGURE_EXTERNAL)
- SET(DEAL_II_EXPAND_TRILINOS_VECTOR "TrilinosWrappers::VectorBase")
SET(DEAL_II_EXPAND_TRILINOS_SPARSITY_PATTERN "TrilinosWrappers::SparsityPattern")
SET(DEAL_II_EXPAND_TRILINOS_BLOCK_SPARSITY_PATTERN "TrilinosWrappers::BlockSparsityPattern")
SET(DEAL_II_EXPAND_TRILINOS_MPI_BLOCKVECTOR "TrilinosWrappers::MPI::BlockVector")
if (value == PetscScalar())
{
// we have to check after using Insert/Add in any case to be
- // consistent with the MPI communication model (see the comments in
- // the documentation of TrilinosWrappers::VectorBase), but we can save
+ // consistent with the MPI communication model, but we can save
// some work if the addend is zero. However, these actions are done
// in case we pass on to the other function.
prepare_action(VectorOperation::add);
*
* This function is only applicable if the matrix only has one block.
*/
- TrilinosScalar residual (VectorBase &dst,
- const VectorBase &x,
- const VectorBase &b) const;
+ TrilinosScalar residual (MPI::Vector &dst,
+ const MPI::Vector &x,
+ const MPI::Vector &b) const;
/**
* Make the clear() function in the base class visible, though it is
{
// forward declarations
class SparseMatrix;
- class VectorBase;
class PreconditionBase;
*/
void
solve (const SparseMatrix &A,
- VectorBase &x,
- const VectorBase &b,
+ MPI::Vector &x,
+ const MPI::Vector &b,
const PreconditionBase &preconditioner);
/**
*/
void
solve (const Epetra_Operator &A,
- VectorBase &x,
- const VectorBase &b,
+ MPI::Vector &x,
+ const MPI::Vector &b,
const PreconditionBase &preconditioner);
/**
*/
void
solve (const Epetra_Operator &A,
- VectorBase &x,
- const VectorBase &b,
+ MPI::Vector &x,
+ const MPI::Vector &b,
const Epetra_Operator &preconditioner);
/**
* package set in intialize(). Note the matrix is not refactorized during
* this call.
*/
- void solve (VectorBase &x, const VectorBase &b);
+ void solve (MPI::Vector &x, const MPI::Vector &b);
/**
* Solve the linear system <tt>Ax=b</tt>. Creates a factorization of the
*/
void
solve (const SparseMatrix &A,
- VectorBase &x,
- const VectorBase &b);
+ MPI::Vector &x,
+ const MPI::Vector &b);
/**
* Solve the linear system <tt>Ax=b</tt>. This class works with Trilinos
* Source and destination must not be the same vector.
*
* This function can be called with several different vector objects,
- * namely TrilinosWrappers::VectorBase, TrilinosWrappers::MPI::Vector as well
- * as deal.II's own vector classes Vector<double> and
- * LinearAlgebra::distributed::Vector<double>.
+ * namely TrilinosWrappers::MPI::Vector as well as deal.II's own vector
+ * classes Vector<double> and LinearAlgebra::distributed::Vector<double>.
*
- * Note that both vectors have to be distributed vectors generated using
- * the same Map as was used for the matrix in case you work on a
- * distributed memory architecture, using the interface in the
- * TrilinosWrappers::VectorBase class (or one of the two derived classes
- * Vector and MPI::Vector).
+ * When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
+ * have to be distributed vectors generated using the same Map as was used
+ * for the matrix rows and columns in case you work on a distributed
+ * memory architecture, using the interface in the
+ * TrilinosWrappers::MPI::Vector class.
*
* In case of a localized Vector, this function will only work when
* running on one processor, since the matrix object is inherently
* Source and destination must not be the same vector.
*
* This function can be called with several different vector objects,
- * namely TrilinosWrappers::VectorBase, TrilinosWrappers::MPI::Vector as well
- * as deal.II's own vector classes Vector<double> and
- * LinearAlgebra::distributed::Vector<double>.
+ * namely TrilinosWrappers::MPI::Vector as well as deal.II's own vector
+ * classes Vector<double> and LinearAlgebra::distributed::Vector<double>.
*
- * Note that both vectors have to be distributed vectors generated using
- * the same Map as was used for the matrix in case you work on a
- * distributed memory architecture, using the interface in the
- * TrilinosWrappers::VectorBase class (or one of the two derived classes
- * Vector and MPI::Vector).
+ * When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
+ * have to be distributed vectors generated using the same Map as was used
+ * for the matrix rows and columns in case you work on a distributed
+ * memory architecture, using the interface in the
+ * TrilinosWrappers::MPI::Vector class.
*
* In case of a localized Vector, this function will only work when
* running on one processor, since the matrix object is inherently
* Source and destination must not be the same vector.
*
* This function can be called with several different vector objects,
- * namely TrilinosWrappers::VectorBase, TrilinosWrappers::MPI::Vector as well
- * as deal.II's own vector classes Vector<double> and
- * LinearAlgebra::distributed::Vector<double>.
+ * namely TrilinosWrappers::MPI::Vector as well as deal.II's own vector
+ * classes Vector<double> and LinearAlgebra::distributed::Vector<double>.
*
* When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
* have to be distributed vectors generated using the same Map as was used
* for the matrix rows and columns in case you work on a distributed
* memory architecture, using the interface in the
- * TrilinosWrappers::VectorBase class.
+ * TrilinosWrappers::MPI::Vector class.
*
- * In case of a localized Vector (i.e., TrilinosWrappers::VectorBase or
- * Vector<double>), this function will only work when running on one
- * processor, since the matrix object is inherently distributed.
- * Otherwise, and exception will be thrown.
+ * In case of a localized Vector, this function will only work when
+ * running on one processor, since the matrix object is inherently
+ * distributed. Otherwise, and exception will be thrown.
*
*/
template<typename VectorType>
* Source and destination must not be the same vector.
*
* This function can be called with several different vector objects,
- * namely TrilinosWrappers::VectorBase, TrilinosWrappers::MPI::Vector as well
- * as deal.II's own vector classes Vector<double> and
- * LinearAlgebra::distributed::Vector<double>.
+ * namely TrilinosWrappers::MPI::Vector as well as deal.II's own vector
+ * classes Vector<double> and LinearAlgebra::distributed::Vector<double>.
*
* When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
* have to be distributed vectors generated using the same Map as was used
* for the matrix rows and columns in case you work on a distributed
* memory architecture, using the interface in the
- * TrilinosWrappers::VectorBase class.
+ * TrilinosWrappers::MPI::Vector class.
*
- * In case of a localized Vector (i.e., TrilinosWrappers::VectorBase or
- * Vector<double>), this function will only work when running on one
- * processor, since the matrix object is inherently distributed.
- * Otherwise, and exception will be thrown.
+ * In case of a localized Vector, this function will only work when
+ * running on one processor, since the matrix object is inherently
+ * distributed. Otherwise, and exception will be thrown.
*/
template <typename VectorType>
void Tvmult_add (VectorType &dst,
* the Trilinos wrapper class) since Trilinos doesn't support this
* operation and needs a temporary vector.
*
- * Note that both vectors have to be distributed vectors generated using
- * the same Map as was used for the matrix in case you work on a
- * distributed memory architecture, using the interface in the
- * TrilinosWrappers::VectorBase class (or one of the two derived classes
- * Vector and MPI::Vector).
+ * When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
+ * have to be distributed vectors generated using the same Map as was used
+ * for the matrix rows and columns in case you work on a distributed
+ * memory architecture, using the interface in the
+ * TrilinosWrappers::MPI::Vector class.
*
* In case of a localized Vector, this function will only work when
* running on one processor, since the matrix object is inherently
* distributed. Otherwise, and exception will be thrown.
*/
- TrilinosScalar matrix_norm_square (const VectorBase &v) const;
+ TrilinosScalar matrix_norm_square (const MPI::Vector &v) const;
/**
* Compute the matrix scalar product $\left(u,Mv\right)$.
* the Trilinos wrapper class) since Trilinos doesn't support this
* operation and needs a temporary vector.
*
- * Note that both vectors have to be distributed vectors generated using
- * the same Map as was used for the matrix in case you work on a
- * distributed memory architecture, using the interface in the
- * TrilinosWrappers::VectorBase class (or one of the two derived classes
- * Vector and MPI::Vector).
+ * When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
+ * have to be distributed vectors generated using the same Map as was used
+ * for the matrix rows and columns in case you work on a distributed
+ * memory architecture, using the interface in the
+ * TrilinosWrappers::MPI::Vector class.
*
* In case of a localized Vector, this function will only work when
* running on one processor, since the matrix object is inherently
* distributed. Otherwise, and exception will be thrown.
*/
- TrilinosScalar matrix_scalar_product (const VectorBase &u,
- const VectorBase &v) const;
+ TrilinosScalar matrix_scalar_product (const MPI::Vector &u,
+ const MPI::Vector &v) const;
/**
* Compute the residual of an equation <i>Mx=b</i>, where the residual is
*
* Source <i>x</i> and destination <i>dst</i> must not be the same vector.
*
- * Note that both vectors have to be distributed vectors generated using
- * the same Map as was used for the matrix in case you work on a
- * distributed memory architecture, using the interface in the
- * TrilinosWrappers::VectorBase class (or one of the two derived classes
- * Vector and MPI::Vector).
+ * When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
+ * have to be distributed vectors generated using the same Map as was used
+ * for the matrix rows and columns in case you work on a distributed
+ * memory architecture, using the interface in the
+ * TrilinosWrappers::MPI::Vector class.
*
* In case of a localized Vector, this function will only work when
* running on one processor, since the matrix object is inherently
* distributed. Otherwise, and exception will be thrown.
*/
- TrilinosScalar residual (VectorBase &dst,
- const VectorBase &x,
- const VectorBase &b) const;
+ TrilinosScalar residual (MPI::Vector &dst,
+ const MPI::Vector &x,
+ const MPI::Vector &b) const;
/**
* Perform the matrix-matrix multiplication <tt>C = A * B</tt>, or, if an
*/
void mmult (SparseMatrix &C,
const SparseMatrix &B,
- const VectorBase &V = VectorBase()) const;
+ const MPI::Vector &V = MPI::Vector()) const;
/**
*/
void Tmmult (SparseMatrix &C,
const SparseMatrix &B,
- const VectorBase &V = VectorBase()) const;
+ const MPI::Vector &V = MPI::Vector()) const;
//@}
/**
if (value == 0)
{
// we have to check after Insert/Add in any case to be consistent
- // with the MPI communication model (see the comments in the
- // documentation of TrilinosWrappers::VectorBase), but we can save some
+ // with the MPI communication model, but we can save some
// work if the addend is zero. However, these actions are done in case
// we pass on to the other function.
{
class Vector;
}
- class VectorBase;
}
#endif
* result in a copy of the vector on all processors.
*/
explicit Vector (const TrilinosWrappers::MPI::Vector &v);
-
- /**
- * Another copy constructor: copy the values from a localized Trilinos
- * wrapper vector. This copy constructor is only available if Trilinos was
- * detected during configuration time.
- */
- explicit Vector (const TrilinosWrappers::VectorBase &v);
#endif
/**
*/
Vector<Number> &
operator= (const TrilinosWrappers::MPI::Vector &v);
-
- /**
- * Another copy operator: copy the values from a sequential Trilinos wrapper
- * vector class. This operator is only available if Trilinos was detected
- * during configuration time.
- */
- Vector<Number> &
- operator= (const TrilinosWrappers::VectorBase &v);
#endif
/**
}
}
-
-
-template <typename Number>
-Vector<Number>::Vector (const TrilinosWrappers::VectorBase &v)
- :
- Subscriptor(),
- vec_size(v.size()),
- max_vec_size(v.size()),
- val(nullptr)
-{
- if (vec_size != 0)
- {
- allocate();
-
- // get a representation of the vector
- // and copy it
- TrilinosScalar **start_ptr;
-
- int ierr = v.trilinos_vector().ExtractView (&start_ptr);
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
-
- std::copy (start_ptr[0], start_ptr[0]+vec_size, begin());
- }
-}
-
#endif
TrilinosWrappers::MPI::Vector localized_vector;
localized_vector.reinit(complete_index_set(v.size()));
localized_vector.reinit(v, false, true);
- *this = static_cast<TrilinosWrappers::VectorBase>(localized_vector);
- return *this;
-}
-
-
-template <typename Number>
-Vector<Number> &
-Vector<Number>::operator= (const TrilinosWrappers::VectorBase &v)
-{
if (v.size() != vec_size)
reinit (v.size(), true);
if (vec_size != 0)
};
#endif
- template <>
- struct MatrixSelector<dealii::TrilinosWrappers::VectorBase>
- {
- typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
- typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
-
- template <typename SparsityPatternType, typename DoFHandlerType>
- static void reinit(Matrix &, Sparsity &, int /*level*/, const SparsityPatternType &, DoFHandlerType &)
- {
- }
- };
#else
// ! DEAL_II_WITH_TRILINOS
template <typename Number>
{
class SparseMatrix;
class BlockSparseMatrix;
- class VectorBase;
namespace MPI
{
class Vector;
* argument is actually not implemented; that argument has <code>true</code>
* as its default value to stay consistent with the other functions of same
* name in this namespace.)
- */
- void
- apply_boundary_values (const std::map<types::global_dof_index,TrilinosScalar> &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
- TrilinosWrappers::VectorBase &solution,
- TrilinosWrappers::VectorBase &right_hand_side,
- const bool eliminate_columns = true);
-
- /**
- * Same as above, but for parallel matrices and vectors.
*
* @note If the matrix is stored in parallel across multiple processors
* using MPI, this function only touches rows that are locally stored and
#endif
#ifdef DEAL_II_WITH_TRILINOS
- template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::VectorBase, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
-
template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::MPI::Vector, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::MPI::BlockVector, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
-for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS; VEC : REAL_SERIAL_VECTORS)
+for (deal_II_dimension : DIMENSIONS; deal_II_space_dimension : SPACE_DIMENSIONS; VEC : SERIAL_VECTORS)
{
namespace FETools
\{
#ifdef DEAL_II_WITH_TRILINOS
inline
TrilinosScalar
- max_element (const TrilinosWrappers::VectorBase &criteria)
+ max_element (const TrilinosWrappers::MPI::Vector &criteria)
{
TrilinosScalar m = 0;
criteria.trilinos_vector().MaxValue(&m);
inline
TrilinosScalar
- min_element (const TrilinosWrappers::VectorBase &criteria)
+ min_element (const TrilinosWrappers::MPI::Vector &criteria)
{
TrilinosScalar m = 0;
criteria.trilinos_vector().MinValue(&m);
#ifdef DEAL_II_WITH_TRILINOS
MATRIX_FUNCTIONS(TrilinosWrappers::SparseMatrix);
BLOCK_MATRIX_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix);
-MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::SparseMatrix, TrilinosWrappers::VectorBase);
MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::SparseMatrix, TrilinosWrappers::MPI::Vector);
BLOCK_MATRIX_VECTOR_FUNCTIONS(TrilinosWrappers::BlockSparseMatrix, TrilinosWrappers::MPI::BlockVector);
#endif
TrilinosScalar
- BlockSparseMatrix::residual (VectorBase &dst,
- const VectorBase &x,
- const VectorBase &b) const
+ BlockSparseMatrix::residual (MPI::Vector &dst,
+ const MPI::Vector &x,
+ const MPI::Vector &b) const
{
vmult (dst, x);
dst -= b;
void
SolverBase::solve (const SparseMatrix &A,
- VectorBase &x,
- const VectorBase &b,
+ MPI::Vector &x,
+ const MPI::Vector &b,
const PreconditionBase &preconditioner)
{
linear_problem.reset();
// can be used by the inverse_operator of LinearOperator
void
SolverBase::solve (const Epetra_Operator &A,
- VectorBase &x,
- const VectorBase &b,
+ MPI::Vector &x,
+ const MPI::Vector &b,
const PreconditionBase &preconditioner)
{
linear_problem.reset();
// can be used by the inverse_operator of LinearOperator
void
SolverBase::solve (const Epetra_Operator &A,
- VectorBase &x,
- const VectorBase &b,
+ MPI::Vector &x,
+ const MPI::Vector &b,
const Epetra_Operator &preconditioner)
{
linear_problem.reset();
}
- void SolverDirect::solve (VectorBase &x, const VectorBase &b)
+ void SolverDirect::solve (MPI::Vector &x, const MPI::Vector &b)
{
// Assign the empty LHS vector to the Epetra_LinearProblem object
linear_problem->SetLHS(&x.trilinos_vector());
void
SolverDirect::solve (const SparseMatrix &A,
- VectorBase &x,
- const VectorBase &b)
+ MPI::Vector &x,
+ const MPI::Vector &b)
{
// We need an Epetra_LinearProblem object to let the Amesos solver know
// about the matrix and vectors.
Assert (&src != &dst, ExcSourceEqualsDestination());
// Reinit a temporary vector with fast argument set, which does not
- // overwrite the content (to save time). However, the
- // TrilinosWrappers::VectorBase classes do not support this, so create a
- // deal.II local vector that has this fast setting. It will be accepted in
- // vmult because it only checks the local size.
- dealii::Vector<TrilinosScalar> temp_vector;
- temp_vector.reinit(internal::end(dst)-internal::begin(dst), true);
- dealii::VectorView<TrilinosScalar> src_view(internal::end(src)-internal::begin(src),
- internal::begin(src));
- dealii::VectorView<TrilinosScalar> dst_view(internal::end(dst)-internal::begin(dst),
- internal::begin(dst));
- vmult (temp_vector, static_cast<const dealii::Vector<TrilinosScalar>&>(src_view));
- if (dst_view.size() > 0)
- dst_view += temp_vector;
+ // overwrite the content (to save time).
+ VectorType tmp_vector;
+ tmp_vector.reinit(dst, true);
+ vmult (tmp_vector, src);
+ dst += tmp_vector;
}
Assert (&src != &dst, ExcSourceEqualsDestination());
// Reinit a temporary vector with fast argument set, which does not
- // overwrite the content (to save time). However, the
- // TrilinosWrappers::VectorBase classes do not support this, so create a
- // deal.II local vector that has this fast setting. It will be accepted in
- // vmult because it only checks the local size.
- dealii::Vector<TrilinosScalar> temp_vector;
- temp_vector.reinit(internal::end(dst)-internal::begin(dst), true);
- dealii::VectorView<TrilinosScalar> src_view(internal::end(src)-internal::begin(src),
- internal::begin(src));
- dealii::VectorView<TrilinosScalar> dst_view(internal::end(dst)-internal::begin(dst),
- internal::begin(dst));
- Tvmult (temp_vector, static_cast<const dealii::Vector<TrilinosScalar>&>(src_view));
- if (dst_view.size() > 0)
- dst_view += temp_vector;
+ // overwrite the content (to save time).
+ VectorType tmp_vector;
+ tmp_vector.reinit(dst, true);
+ Tvmult (tmp_vector, src);
+ dst += tmp_vector;
}
TrilinosScalar
- SparseMatrix::matrix_norm_square (const VectorBase &v) const
+ SparseMatrix::matrix_norm_square (const MPI::Vector &v) const
{
Assert (matrix->RowMap().SameAs(matrix->DomainMap()),
ExcNotQuadratic());
- VectorBase temp_vector;
+ MPI::Vector temp_vector;
temp_vector.reinit(v, true);
vmult (temp_vector, v);
TrilinosScalar
- SparseMatrix::matrix_scalar_product (const VectorBase &u,
- const VectorBase &v) const
+ SparseMatrix::matrix_scalar_product (const MPI::Vector &u,
+ const MPI::Vector &v) const
{
Assert (matrix->RowMap().SameAs(matrix->DomainMap()),
ExcNotQuadratic());
- VectorBase temp_vector;
+ MPI::Vector temp_vector;
temp_vector.reinit(v, true);
vmult (temp_vector, v);
TrilinosScalar
- SparseMatrix::residual (VectorBase &dst,
- const VectorBase &x,
- const VectorBase &b) const
+ SparseMatrix::residual (MPI::Vector &dst,
+ const MPI::Vector &x,
+ const MPI::Vector &b) const
{
vmult (dst, x);
dst -= b;
void perform_mmult (const SparseMatrix &inputleft,
const SparseMatrix &inputright,
SparseMatrix &result,
- const VectorBase &V,
+ const MPI::Vector &V,
const bool transpose_left)
{
#ifdef DEAL_II_WITH_64BIT_INDICES
void
SparseMatrix::mmult (SparseMatrix &C,
const SparseMatrix &B,
- const VectorBase &V) const
+ const MPI::Vector &V) const
{
#ifdef DEAL_II_WITH_64BIT_INDICES
Assert(false,ExcNotImplemented())
void
SparseMatrix::Tmmult (SparseMatrix &C,
const SparseMatrix &B,
- const VectorBase &V) const
+ const MPI::Vector &V) const
{
#ifdef DEAL_II_WITH_64BIT_INDICES
Assert(false,ExcNotImplemented())
const dealii::SparsityPattern &,
const MPI_Comm &,
const bool);
+
template void
SparseMatrix::reinit (const IndexSet &,
const IndexSet &,
const bool);
template void
- SparseMatrix::vmult (VectorBase &,
- const VectorBase &) const;
- template void
SparseMatrix::vmult (MPI::Vector &,
const MPI::Vector &) const;
template void
const dealii::LinearAlgebra::EpetraWrappers::Vector &) const;
#endif
template void
- SparseMatrix::Tvmult (VectorBase &,
- const VectorBase &) const;
- template void
SparseMatrix::Tvmult (MPI::Vector &,
const MPI::Vector &) const;
template void
const dealii::LinearAlgebra::EpetraWrappers::Vector &) const;
#endif
template void
- SparseMatrix::vmult_add (VectorBase &,
- const VectorBase &) const;
- template void
SparseMatrix::vmult_add (MPI::Vector &,
const MPI::Vector &) const;
template void
const dealii::LinearAlgebra::EpetraWrappers::Vector &) const;
#endif
template void
- SparseMatrix::Tvmult_add (VectorBase &,
- const VectorBase &) const;
- template void
SparseMatrix::Tvmult_add (MPI::Vector &,
const MPI::Vector &) const;
template void
-
- void
- apply_boundary_values (const std::map<types::global_dof_index,TrilinosScalar> &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
- TrilinosWrappers::VectorBase &solution,
- TrilinosWrappers::VectorBase &right_hand_side,
- const bool eliminate_columns)
- {
- // simply redirect to the generic function
- // used for both trilinos matrix types
- internal::TrilinosWrappers::apply_boundary_values (boundary_values, matrix, solution,
- right_hand_side, eliminate_columns);
- }
-
-
-
void
apply_boundary_values (const std::map<types::global_dof_index,TrilinosScalar> &boundary_values,
TrilinosWrappers::SparseMatrix &matrix,
{
norm = v_tmp.linfty_norm();
}
- catch (TrilinosWrappers::VectorBase::ExcTrilinosError e)
+ catch (TrilinosWrappers::MPI::Vector::ExcTrilinosError e)
{
deallog << e.get_exc_name() << std::endl;
exc = true;
#include <deal.II/lac/trilinos_parallel_block_vector.h>
-// Check that the base class vector and also the block vector class support
-// update_ghost_values. This method doesn't do anything but is needed for
-// genericity.
+// Check that the block vector class support update_ghost_values. This method
+// doesn't do anything but is needed for genericity.
int main(int argc, char **argv)
{
Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
initlog();
- TrilinosWrappers::VectorBase v;
- v.update_ghost_values();
TrilinosWrappers::MPI::BlockVector bv;
bv.update_ghost_values();
deallog << "OK" << std::endl;
-// check TrilinosWrappers::MPI::Vector::operator==(TrilinosWrappers::Vector) for vectors that are not
-// equal
+// check TrilinosWrappers::MPI::Vector::operator==(TrilinosWrappers::MPI::Vector)
+// for vectors that are not equal
#include "../tests.h"
#include <deal.II/base/utilities.h>
-// check TrilinosWrappers::MPI::Vector::operator==(TrilinosWrappers::Vector) for vectors that are
-// equal
+// check TrilinosWrappers::MPI::Vector::operator==(TrilinosWrappers::MPI::Vector)
+// for vectors that are equal
#include "../tests.h"
#include <deal.II/base/utilities.h>
-// check TrilinosWrappers::MPI::Vector::operator!=(TrilinosWrappers::Vector) for vectors that are not
-// equal
+// check TrilinosWrappers::MPI::Vector::operator!=(TrilinosWrappers::MPI::Vector)
+// for vectors that are not equal
#include "../tests.h"
#include <deal.II/base/utilities.h>
-// check TrilinosWrappers::MPI::Vector::operator!=(TrilinosWrappers::Vector) for vectors that are
-// equal
+// check TrilinosWrappers::MPI::Vector::operator!=(TrilinosWrappers::MPI::Vector)
+// for vectors that are equal
#include "../tests.h"
#include <deal.II/base/utilities.h>