const AdditionalData &data)
:
Solver<VectorType>(cn,mem),
+ Vr(NULL),
+ Vp(NULL),
+ Vz(NULL),
additional_data(data)
{}
{
AssertIsFinite(s);
- const int ierr = vector->PutScalar(s);
-
+ int ierr = vector->PutScalar(s);
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
if (nonlocal_vector.get() != 0)
- nonlocal_vector->PutScalar(0.);
+ {
+ ierr = nonlocal_vector->PutScalar(0.);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
return *this;
}
Assert (!has_ghost_elements(), ExcGhostsPresent());
if (last_action == Add)
- vector->GlobalAssemble(Add);
+ {
+ const int ierr = vector->GlobalAssemble(Add);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
if (last_action != Insert)
last_action = Insert;
switch (rate_mode)
{
- case none:
- break;
+ // case none: already considered above
case reduction_rate:
rate_key += "red.rate";
no_rate_entries = columns[rate_key].entries.size();
{
return find(filename, *suffix, open_mode);
}
- catch (ExcFileNotFound)
+ catch (ExcFileNotFound &)
{
continue;
}
namespace MPI
{
SparseMatrix::SparseMatrix ()
+ : communicator(MPI_COMM_SELF)
{
// just like for vectors: since we
// create an empty matrix, we can as
{
Vector::Vector ()
+ : communicator (MPI_COMM_SELF)
{
// this is an invalid empty vector, so we can just as well create a
// sequential one to avoid all the overhead incurred by parallelism
const Vector &down_V = dynamic_cast<const Vector &>(V);
// If the maps are the same we can Update right away.
if (vector->Map().SameAs(down_V.trilinos_vector().Map()))
- vector->Update(1., down_V.trilinos_vector(), 1.);
+ {
+ const int ierr = vector->Update(1., down_V.trilinos_vector(), 1.);
+ Assert(ierr==0, ExcTrilinosError(ierr));
+ (void) ierr;
+ }
else
{
Assert(this->size()==down_V.size(),
#if DEAL_II_TRILINOS_VERSION_GTE(11,11,0)
Epetra_Import data_exchange (vector->Map(), down_V.trilinos_vector().Map());
- int ierr = vector->Import(down_V.trilinos_vector(), data_exchange, Epetra_AddLocalAlso);
+ const int ierr = vector->Import(down_V.trilinos_vector(),
+ data_exchange, Epetra_AddLocalAlso);
Assert(ierr==0, ExcTrilinosError(ierr));
(void) ierr;
#else
int ierr = dummy.Import(down_V.trilinos_vector(), data_exchange, Insert);
Assert(ierr==0, ExcTrilinosError(ierr));
- (void) ierr;
ierr = vector->Update(1.0, dummy, 1.0);
Assert(ierr==0, ExcTrilinosError(ierr));
{
const Epetra_MpiComm *epetra_comm
= dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
+ Assert (epetra_comm != 0, ExcInternalError());
return epetra_comm->GetMpiComm();
}
PreconditionAMG::size_type
PreconditionAMG::memory_consumption() const
{
- unsigned int memory = sizeof(this);
+ unsigned int memory = sizeof(*this);
// todo: find a way to read out ML's data
// sizes
PreconditionAMGMueLu::size_type
PreconditionAMGMueLu::memory_consumption() const
{
- unsigned int memory = sizeof(this);
+ unsigned int memory = sizeof(*this);
// todo: find a way to read out ML's data
// sizes
SparseMatrix::size_type
SparseMatrix::memory_consumption () const
{
- size_type static_memory = sizeof(this) + sizeof (*matrix)
+ size_type static_memory = sizeof(*this) + sizeof (*matrix)
+ sizeof(*matrix->Graph().DataPtr());
return ((sizeof(TrilinosScalar)+sizeof(TrilinosWrappers::types::int_type))*
matrix->NumMyNonzeros() + sizeof(int)*local_size() + static_memory);
const Epetra_MpiComm *mpi_comm
= dynamic_cast<const Epetra_MpiComm *>(&matrix->RangeMap().Comm());
+ Assert(mpi_comm != 0, ExcInternalError());
return mpi_comm->Comm();
#else
const Epetra_MpiComm *mpi_comm
= dynamic_cast<const Epetra_MpiComm *>(&graph->RangeMap().Comm());
+ Assert (mpi_comm != 0, ExcInternalError());
return mpi_comm->Comm();
#else
}
}
#if defined(DEBUG) && defined(DEAL_II_WITH_MPI)
- const MPI_Comm mpi_communicator
- = dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()))->Comm();
+ const Epetra_MpiComm *comm_ptr
+ = dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
+ Assert (comm_ptr != 0, ExcInternalError());
const size_type n_elements_global
- = Utilities::MPI::sum (owned_elements.n_elements(), mpi_communicator);
+ = Utilities::MPI::sum (owned_elements.n_elements(), comm_ptr->Comm());
Assert (has_ghosts || n_elements_global == size(), ExcInternalError());
#endif
last_action = Insert;
}
#if defined(DEBUG) && defined(DEAL_II_WITH_MPI)
- const MPI_Comm mpi_communicator
- = dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()))->Comm();
+ const Epetra_MpiComm *comm_ptr
+ = dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
+ Assert (comm_ptr != 0, ExcInternalError());
const size_type n_elements_global
- = Utilities::MPI::sum (owned_elements.n_elements(), mpi_communicator);
+ = Utilities::MPI::sum (owned_elements.n_elements(), comm_ptr->Comm());
Assert (has_ghosts || n_elements_global == size(), ExcInternalError());
#endif
last_action = Insert;
}
#if defined(DEBUG) && defined(DEAL_II_WITH_MPI)
- const MPI_Comm mpi_communicator
- = dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()))->Comm();
+ const Epetra_MpiComm *comm_ptr
+ = dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
+ Assert (comm_ptr != 0, ExcInternalError());
const size_type n_elements_global
- = Utilities::MPI::sum (owned_elements.n_elements(), mpi_communicator);
+ = Utilities::MPI::sum (owned_elements.n_elements(), comm_ptr->Comm());
Assert (has_ghosts || n_elements_global == size(), ExcInternalError());
#endif
// otherwise result in undefined behaviour in the call to
// GlobalAssemble().
double double_mode = mode;
+ const Epetra_MpiComm *comm_ptr
+ = dynamic_cast<const Epetra_MpiComm *>(&(vector_partitioner().Comm()));
+ Assert (comm_ptr != 0, ExcInternalError());
Utilities::MPI::MinMaxAvg result
- = Utilities::MPI::min_max_avg (double_mode,
- dynamic_cast<const Epetra_MpiComm *>
- (&vector_partitioner().Comm())->GetMpiComm());
+ = Utilities::MPI::min_max_avg (double_mode, comm_ptr->GetMpiComm());
Assert(result.max-result.min<1e-5,
ExcMessage ("Not all processors agree whether the last operation on "
"this vector was an addition or a set operation. This will "
{
Epetra_Export exporter(nonlocal_vector->Map(), vector->Map());
ierr = vector->Export(*nonlocal_vector, exporter, mode);
- nonlocal_vector->PutScalar(0.);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ ierr = nonlocal_vector->PutScalar(0.);
}
AssertThrow (ierr == 0, ExcTrilinosError(ierr));
last_action = Zero;
// is zero on _all_ processors.
const Epetra_MpiComm *mpi_comm
= dynamic_cast<const Epetra_MpiComm *>(&vector->Map().Comm());
+ Assert(mpi_comm != 0, ExcInternalError());
unsigned int num_nonzero = Utilities::MPI::sum(flag, mpi_comm->Comm());
return num_nonzero == 0;
#else