// the argument's lifetime needs to be longer then. If we do this, we need
// to think about whether the view should be read/write.
- stored_elements = IndexSet(trilinos_vec.vector_partitioner());
+ stored_elements = IndexSet(trilinos_vec.trilinos_partitioner());
resize_val(stored_elements.n_elements());
inline void
PreconditionBase::vmult(MPI::Vector &dst, const MPI::Vector &src) const
{
- Assert(dst.vector_partitioner().SameAs(preconditioner->OperatorRangeMap()),
+ Assert(dst.trilinos_partitioner().SameAs(
+ preconditioner->OperatorRangeMap()),
ExcNonMatchingMaps("dst"));
- Assert(src.vector_partitioner().SameAs(preconditioner->OperatorDomainMap()),
+ Assert(src.trilinos_partitioner().SameAs(
+ preconditioner->OperatorDomainMap()),
ExcNonMatchingMaps("src"));
const int ierr = preconditioner->ApplyInverse(src.trilinos_vector(),
inline void
PreconditionBase::Tvmult(MPI::Vector &dst, const MPI::Vector &src) const
{
- Assert(dst.vector_partitioner().SameAs(preconditioner->OperatorRangeMap()),
+ Assert(dst.trilinos_partitioner().SameAs(
+ preconditioner->OperatorRangeMap()),
ExcNonMatchingMaps("dst"));
- Assert(src.vector_partitioner().SameAs(preconditioner->OperatorDomainMap()),
+ Assert(src.trilinos_partitioner().SameAs(
+ preconditioner->OperatorDomainMap()),
ExcNonMatchingMaps("src"));
preconditioner->SetUseTranspose(true);
/**
* Return a const reference to the underlying Trilinos Epetra_Map that
* sets the parallel partitioning of the vector.
+ *
+ * @deprecated Use trilinos_partitioner() instead.
*/
+ DEAL_II_DEPRECATED
const Epetra_Map &
vector_partitioner() const;
+ /**
+ * Return a const reference to the underlying Trilinos Epetra_BlockMap
+ * that sets the parallel partitioning of the vector.
+ */
+ const Epetra_BlockMap &
+ trilinos_partitioner() const;
+
/**
* Print to a stream. @p precision denotes the desired precision with
* which values shall be printed, @p scientific whether scientific
+ inline const Epetra_BlockMap &
+ Vector::trilinos_partitioner() const
+ {
+ return vector->Map();
+ }
+
+
+
inline const MPI_Comm &
Vector::get_mpi_communicator() const
{
const TrilinosWrappers::MPI::Vector &in,
const TrilinosWrappers::MPI::Vector &out)
{
- Assert(in.vector_partitioner().SameAs(m.DomainMap()) == true,
+ Assert(in.trilinos_partitioner().SameAs(m.DomainMap()) == true,
ExcMessage(
"Column map of matrix does not fit with vector map!"));
- Assert(out.vector_partitioner().SameAs(m.RangeMap()) == true,
+ Assert(out.trilinos_partitioner().SameAs(m.RangeMap()) == true,
ExcMessage("Row map of matrix does not fit with vector map!"));
(void)m;
(void)in;
{
TrilinosWrappers::types::int_type *glob_elements =
TrilinosWrappers::my_global_elements(
- v.block(block).vector_partitioner());
+ v.block(block).trilinos_partitioner());
for (size_type i = 0; i < v.block(block).local_size(); ++i)
global_ids[added_elements++] = glob_elements[i] + block_offset;
owned_elements.add_indices(v.block(block).owned_elements,
n_elements,
global_ids.data(),
0,
- v.block(0).vector_partitioner().Comm());
+ v.block(0).trilinos_partitioner().Comm());
auto actual_vec = std_cxx14::make_unique<Epetra_FEVector>(new_map);
// GlobalAssemble().
double double_mode = mode;
const Epetra_MpiComm *comm_ptr =
- dynamic_cast<const Epetra_MpiComm *>(&(vector_partitioner().Comm()));
+ dynamic_cast<const Epetra_MpiComm *>(&(trilinos_partitioner().Comm()));
Assert(comm_ptr != nullptr, ExcInternalError());
Utilities::MPI::MinMaxAvg result =
Utilities::MPI::min_max_avg(double_mode, comm_ptr->GetMpiComm());
test1.compress(VectorOperation::add);
- // TrilinosWrappers::MPI::Vector test(test1.vector_partitioner()); // works
+ // TrilinosWrappers::MPI::Vector test(test1.trilinos_partitioner()); // works
// TrilinosWrappers::MPI::Vector test(locally_owned); // works
TrilinosWrappers::MPI::Vector test(test1); // fails
TrilinosWrappers::MPI::Vector test1, test2;
- AssertThrow(test1.vector_partitioner().SameAs(test2.vector_partitioner()),
+ AssertThrow(test1.trilinos_partitioner().SameAs(test2.trilinos_partitioner()),
ExcInternalError());
// first processor owns 2 indices, second
// reinit Trilinos vector from other vector
test2.reinit(test1, true);
- AssertThrow(test1.vector_partitioner().SameAs(test2.vector_partitioner()),
+ AssertThrow(test1.trilinos_partitioner().SameAs(test2.trilinos_partitioner()),
ExcInternalError());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
TrilinosWrappers::MPI::Vector v;
v.reinit(set_my, set_ghost, MPI_COMM_WORLD);
- IndexSet from_partitioner(v.vector_partitioner());
+ IndexSet from_partitioner(v.trilinos_partitioner());
deallog << "vec size: " << v.size()
<< " from_partitioner: " << from_partitioner.size() << std::endl;
tril_vector_ghosted.print(deallog.get_file_stream());
- IndexSet readwrite_is(tril_vector_ghosted.vector_partitioner());
+ IndexSet readwrite_is(tril_vector_ghosted.trilinos_partitioner());
deallog << "ghosted IS: ";
readwrite_is.print(deallog);
tril_vector_ghosted.print(deallog.get_file_stream());
- IndexSet readwrite_is(tril_vector_ghosted.vector_partitioner());
+ IndexSet readwrite_is(tril_vector_ghosted.trilinos_partitioner());
deallog << "ghosted IS: ";
readwrite_is.print(deallog);