chunk_size * chunk_size;
if (N > max_len || max_len == 0)
{
- val.reset (new number[N]);
+ val = std_cxx14::make_unique<number[]>(N);
max_len = N;
}
AssertThrow (c == '[', ExcIO());
// reallocate space
- val.reset (new number[max_len]);
+ val = std_cxx14::make_unique<number[]>(max_len);
// then read data
in.read (reinterpret_cast<char *>(val.get()),
#include <deal.II/base/config.h>
+#include <deal.II/base/std_cxx14/memory.h>
#include <deal.II/lac/exceptions.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/vector_operations_internal.h>
values.reset();
allocated_size = 0;
}
- thread_loop_partitioner.reset(new ::dealii::parallel::internal::TBBPartitioner());
+ thread_loop_partitioner = std::make_shared<::dealii::parallel::internal::TBBPartitioner>();
}
import_data.reset ();
// set partitioner to serial version
- partitioner.reset (new Utilities::MPI::Partitioner (size));
+ partitioner = std::make_shared<Utilities::MPI::Partitioner> (size);
// set entries to zero if so requested
if (omit_zeroing_entries == false)
// allocate import_data in case it is not set up yet
if (import_data == nullptr && partitioner->n_import_indices() > 0)
- import_data.reset (new Number[partitioner->n_import_indices()]);
+ import_data = std_cxx14::make_unique<Number[]>(partitioner->n_import_indices());
partitioner->import_from_ghosted_array_start
(operation, counter,
// allocate import_data in case it is not set up yet
if (import_data == nullptr && partitioner->n_import_indices() > 0)
- import_data.reset (new Number[partitioner->n_import_indices()]);
+ import_data = std_cxx14::make_unique<Number[]>(partitioner->n_import_indices());
partitioner->export_to_ghosted_array_start
(counter,
ghost_indices.subtract_set(locally_owned_elem);
IndexSet local_indices(V.get_stored_elements());
local_indices.subtract_set(ghost_indices);
- comm_pattern.reset(new Utilities::MPI::Partitioner(local_indices,
- ghost_indices,
- get_mpi_communicator()));
+ comm_pattern = std::make_shared<Utilities::MPI::Partitioner>
+ (local_indices, ghost_indices, get_mpi_communicator());
}
else
{
* The matrix <i>U</i> in the singular value decomposition
* <i>USV<sup>T</sup></i>.
*/
- std::shared_ptr<LAPACKFullMatrix<number> > svd_u;
+ std::unique_ptr<LAPACKFullMatrix<number> > svd_u;
/**
* The matrix <i>V<sup>T</sup></i> in the singular value decomposition
* <i>USV<sup>T</sup></i>.
*/
- std::shared_ptr<LAPACKFullMatrix<number> > svd_vt;
+ std::unique_ptr<LAPACKFullMatrix<number> > svd_vt;
/**
* Thread mutex.
* Pointer to an object that stores the solver context. This is recreated
* in the main solver routine if necessary.
*/
- std::shared_ptr<SolverData> solver_data;
+ std::unique_ptr<SolverData> solver_data;
#ifdef DEAL_II_WITH_SLEPC
/**
PC pc;
};
- std::shared_ptr<SolverDataMUMPS> solver_data;
+ std::unique_ptr<SolverDataMUMPS> solver_data;
/**
* Flag specifies whether matrix being factorized is symmetric or not. It
preconditioner->m() != matrix.m())
{
if (preconditioner.get() == nullptr)
- preconditioner.reset(new DiagonalMatrix<VectorType>());
+ preconditioner = std::make_shared<DiagonalMatrix<VectorType>>();
Assert(preconditioner->m() == 0,
ExcMessage("Preconditioner appears to be initialized but not sized correctly"));
if (val != nullptr)
free(val);
val = nullptr;
- thread_loop_partitioner.reset(new parallel::internal::TBBPartitioner());
+ thread_loop_partitioner = std::make_shared<parallel::internal::TBBPartitioner>();
}
else
{
Utilities::System::posix_memalign ((void **)&val, 64, sizeof(Number)*new_alloc_size);
if (new_alloc_size >= 4*dealii::internal::Vector::minimum_parallel_grain_size)
- thread_loop_partitioner.reset(new parallel::internal::TBBPartitioner());
+ thread_loop_partitioner = std::make_shared<parallel::internal::TBBPartitioner>();
}
}
std::shared_ptr<const Utilities::MPI::Partitioner> comm_pattern;
if (communication_pattern.get() == nullptr)
{
- comm_pattern.reset(new Utilities::MPI::Partitioner(vec.locally_owned_elements(),
- get_stored_elements(),
- vec.get_mpi_communicator()));
+ comm_pattern = std::make_shared<Utilities::MPI::Partitioner>
+ (vec.locally_owned_elements(),
+ get_stored_elements(),
+ vec.get_mpi_communicator());
}
else
{
source_stored_elements = source_index_set;
EpetraWrappers::CommunicationPattern epetra_comm_pattern(
source_stored_elements, stored_elements, mpi_comm);
- comm_pattern.reset(new EpetraWrappers::CommunicationPattern(
- source_stored_elements, stored_elements, mpi_comm));
+ comm_pattern = std::make_shared<EpetraWrappers::CommunicationPattern>
+ (source_stored_elements, stored_elements, mpi_comm);
return epetra_comm_pattern;
}
const std::size_t N = cols->n_nonzero_elements();
if (N > max_len || max_len == 0)
{
- val.reset (new number[N]);
+ val = std_cxx14::make_unique<number[]>(N);
max_len = N;
}
AssertThrow (c == '[', ExcIO());
// reallocate space
- val.reset (new number[max_len]);
+ val = std_cxx14::make_unique<number[]> (max_len);
// then read data
in.read (reinterpret_cast<char *>(val.get()),
#include <deal.II/base/config.h>
#include <deal.II/base/exceptions.h>
+#include <deal.II/base/std_cxx14/memory.h>
#include <deal.II/base/subscriptor.h>
// boost::serialization::make_array used to be in array.hpp, but was
#endif
#include <boost/serialization/split_member.hpp>
-#include <memory>
#include <vector>
#include <iostream>
#include <algorithm>
ar &max_dim &rows &cols &max_vec_len &max_row_length &compressed &store_diagonal_first_in_row;
- rowstart.reset (new std::size_t[max_dim + 1]);
- colnums.reset (new size_type[max_vec_len]);
+ rowstart = std_cxx14::make_unique<std::size_t[]>(max_dim + 1);
+ colnums = std_cxx14::make_unique<size_type[]>(max_vec_len);
ar &boost::serialization::make_array(rowstart.get(), max_dim + 1);
ar &boost::serialization::make_array(colnums.get(), max_vec_len);
/**
* Shared pointer to the Epetra_Import object used.
*/
- std::shared_ptr<Epetra_Import> import;
+ std::unique_ptr<Epetra_Import> import;
};
} // end of namespace EpetraWrappers
} // end of namespace LinearAlgebra
/**
* Pointer to the actual Epetra vector object.
*/
- std::shared_ptr<Epetra_FEVector> vector;
+ std::unique_ptr<Epetra_FEVector> vector;
/**
* IndexSet of the elements of the last imported vector.
* side vector and the solution vector, which is passed down to the
* Trilinos solver.
*/
- std::shared_ptr<Epetra_LinearProblem> linear_problem;
+ std::unique_ptr<Epetra_LinearProblem> linear_problem;
/**
* A structure that contains a Trilinos object that can query the linear
* solver and determine whether the convergence criterion have been met.
*/
- std::shared_ptr<AztecOO_StatusTest> status_test;
+ std::unique_ptr<AztecOO_StatusTest> status_test;
/**
* A structure that contains the Trilinos solver and preconditioner
* side vector and the solution vector, which is passed down to the
* Trilinos solver.
*/
- std::shared_ptr<Epetra_LinearProblem> linear_problem;
+ std::unique_ptr<Epetra_LinearProblem> linear_problem;
/**
* A structure that contains the Trilinos solver and preconditioner
* objects.
*/
- std::shared_ptr<Amesos_BaseSolver> solver;
+ std::unique_ptr<Amesos_BaseSolver> solver;
/**
* Store a copy of the flags for this particular solver.
# include <memory>
# include <Epetra_FECrsMatrix.h>
+# include <Epetra_Export.h>
# include <Epetra_Map.h>
# include <Epetra_CrsGraph.h>
# include <Epetra_MultiVector.h>
# include <Epetra_SerialComm.h>
# endif
-class Epetra_Export;
-
DEAL_II_NAMESPACE_OPEN
// forward declarations
* Pointer to the user-supplied Epetra Trilinos mapping of the matrix
* columns that assigns parts of the matrix to the individual processes.
*/
- std::shared_ptr<Epetra_Map> column_space_map;
+ std::unique_ptr<Epetra_Map> column_space_map;
/**
* A sparse matrix object in Trilinos to be used for finite element based
* problems which allows for assembling into non-local elements. The
* actual type, a sparse matrix, is set in the constructor.
*/
- std::shared_ptr<Epetra_FECrsMatrix> matrix;
+ std::unique_ptr<Epetra_FECrsMatrix> matrix;
/**
* A sparse matrix object in Trilinos to be used for collecting the non-
* local elements if the matrix was constructed from a Trilinos sparsity
* pattern with the respective option.
*/
- std::shared_ptr<Epetra_CrsMatrix> nonlocal_matrix;
+ std::unique_ptr<Epetra_CrsMatrix> nonlocal_matrix;
/**
* An export object used to communicate the nonlocal matrix.
*/
- std::shared_ptr<Epetra_Export> nonlocal_matrix_exporter;
+ std::unique_ptr<Epetra_Export> nonlocal_matrix_exporter;
/**
* Trilinos doesn't allow to mix additions to matrix entries and
* Pointer to the user-supplied Epetra Trilinos mapping of the matrix
* columns that assigns parts of the matrix to the individual processes.
*/
- std::shared_ptr<Epetra_Map> column_space_map;
+ std::unique_ptr<Epetra_Map> column_space_map;
/**
* A sparsity pattern object in Trilinos to be used for finite element
* based problems which allows for adding non-local elements to the
* pattern.
*/
- std::shared_ptr<Epetra_FECrsGraph> graph;
+ std::unique_ptr<Epetra_FECrsGraph> graph;
/**
* A sparsity pattern object for the non-local part of the sparsity
* when the particular constructor or reinit method with writable_rows
* argument is set
*/
- std::shared_ptr<Epetra_CrsGraph> nonlocal_graph;
+ std::unique_ptr<Epetra_CrsGraph> nonlocal_graph;
friend class TrilinosWrappers::SparseMatrix;
friend class SparsityPatternIterators::Accessor;
* that is in fact distributed among multiple processors. The object
* requires an existing Epetra_Map for storing data when setting it up.
*/
- std::shared_ptr<Epetra_FEVector> vector;
+ std::unique_ptr<Epetra_FEVector> vector;
/**
* A vector object in Trilinos to be used for collecting the non-local
* elements if the vector was constructed with an additional IndexSet
* describing ghost elements.
*/
- std::shared_ptr<Epetra_MultiVector> nonlocal_vector;
+ std::unique_ptr<Epetra_MultiVector> nonlocal_vector;
/**
* An IndexSet storing the indices this vector owns exclusively.
/**
* Return dimension of the vector.
*/
- std::size_t size () const;
+ size_type size () const;
/**
* Return whether the vector contains only elements with value zero. This
template <typename Number>
inline
-std::size_t Vector<Number>::size () const
+typename Vector<Number>::size_type
+Vector<Number>::size () const
{
return vec_size;
}
{
values.reset();
max_vec_size = vec_size = 0;
- thread_loop_partitioner.reset(new parallel::internal::TBBPartitioner());
+ thread_loop_partitioner = std::make_shared<parallel::internal::TBBPartitioner>();
return;
}
// only reset the partitioner if we actually expect a significant vector
// size
if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size)
- thread_loop_partitioner.reset(new parallel::internal::TBBPartitioner());
+ thread_loop_partitioner = std::make_shared<parallel::internal::TBBPartitioner>();
}
if (omit_zeroing_entries == false)
{
values.reset();
max_vec_size = vec_size = 0;
- thread_loop_partitioner.reset(new parallel::internal::TBBPartitioner());
+ thread_loop_partitioner = std::make_shared<parallel::internal::TBBPartitioner>();
return;
}
// only reset the partitioner if we actually expect a significant vector
// size
if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size)
- thread_loop_partitioner.reset(new parallel::internal::TBBPartitioner());
+ thread_loop_partitioner = std::make_shared<parallel::internal::TBBPartitioner>();
}
// pad with zeroes
std::fill(wr.begin(), wr.end(), 0.);
ipiv.resize(8*mm);
- svd_u.reset (new LAPACKFullMatrix<number>(mm,mm));
- svd_vt.reset (new LAPACKFullMatrix<number>(nn,nn));
+ svd_u = std_cxx14::make_unique<LAPACKFullMatrix<number>>(mm,mm);
+ svd_vt = std_cxx14::make_unique<LAPACKFullMatrix<number>>(nn,nn);
number *const mu = &svd_u->values[0];
number *const mvt = &svd_vt->values[0];
types::blas_int info = 0;
// iterator for an empty line (what
// would it point to?)
Assert (ncols != 0, ExcInternalError());
- colnum_cache.reset (new std::vector<size_type> (colnums, colnums+ncols));
- value_cache.reset (new std::vector<PetscScalar> (values, values+ncols));
+ colnum_cache = std::make_shared<std::vector<size_type>> (colnums, colnums+ncols);
+ value_cache = std::make_shared<std::vector<PetscScalar>> (values, values+ncols);
// and finally restore the matrix
ierr = MatRestoreRow(*matrix, this->a_row, &ncols, &colnums, &values);
#include <deal.II/base/logstream.h>
+#include <deal.II/base/std_cxx14/memory.h>
#include <deal.II/lac/petsc_solver.h>
#ifdef DEAL_II_WITH_PETSC
// is necessary
if (solver_data.get() == nullptr)
{
- solver_data.reset (new SolverData());
+ solver_data = std_cxx14::make_unique<SolverData>();
PetscErrorCode ierr = KSPCreate (mpi_communicator, &solver_data->ksp);
AssertThrow (ierr == 0, ExcPETScError(ierr));
{
PetscErrorCode ierr;
- solver_data.reset (new SolverData());
+ solver_data = std_cxx14::make_unique<SolverData>();
ierr = KSPCreate (mpi_communicator, &solver_data->ksp);
AssertThrow (ierr == 0, ExcPETScError(ierr));
*/
if (solver_data.get() == 0)
{
- solver_data.reset (new SolverDataMUMPS ());
+ solver_data = std_cxx14::make_unique<SolverDataMUMPS >();
/**
* creates the default KSP context and puts it in the location
* Create a 1x1 column grid which will be used to initialize
* an effectively serial ScaLAPACK matrix to gather the contents from the current object
*/
- std::shared_ptr<Utilities::MPI::ProcessGrid> column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
+ const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
const int MB=n_rows, NB=n_columns;
ScaLAPACKMatrix<NumberType> tmp(n_rows,n_columns,column_grid,MB,NB);
*
* Create a 1xn_processes column grid
*/
- std::shared_ptr<Utilities::MPI::ProcessGrid> column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,n_mpi_processes);
+ const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,n_mpi_processes);
const int MB=n_rows, NB=std::ceil(n_columns/n_mpi_processes);
ScaLAPACKMatrix<NumberType> tmp(n_rows,n_columns,column_grid,MB,NB);
* Therefore, one process has all the data and can write it to a file
*/
//create a 1xP column grid with P being the number of MPI processes
- std::shared_ptr<Utilities::MPI::ProcessGrid> one_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
+ const auto one_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
const int MB=n_rows, NB=n_columns;
ScaLAPACKMatrix<NumberType> tmp(n_rows,n_columns,one_grid,MB,NB);
* Therefore, the processes hold contiguous chunks of the matrix, which they can write to the file
*/
//create a 1xP column grid with P being the number of MPI processes
- std::shared_ptr<Utilities::MPI::ProcessGrid> column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,n_mpi_processes);
+ const auto column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,n_mpi_processes);
const int MB=n_rows, NB=std::ceil(n_columns/n_mpi_processes);
ScaLAPACKMatrix<NumberType> tmp(n_rows,n_columns,column_grid,MB,NB);
// ---------------------------------------------------------------------
+#include <deal.II/base/std_cxx14/memory.h>
#include <deal.II/base/vector_slice.h>
#include <deal.II/base/utilities.h>
#include <deal.II/lac/sparsity_pattern.h>
{
vec_len = 1;
max_vec_len = vec_len;
- colnums.reset (new size_type[max_vec_len]);
+ colnums = std_cxx14::make_unique<size_type[]> (max_vec_len);
}
max_row_length = (row_lengths.size() == 0 ?
if (rows > max_dim)
{
max_dim = rows;
- rowstart.reset (new std::size_t[max_dim+1]);
+ rowstart = std_cxx14::make_unique<std::size_t[]> (max_dim+1);
}
// allocate memory for the column numbers if necessary
if (vec_len > max_vec_len)
{
max_vec_len = vec_len;
- colnums.reset (new size_type[max_vec_len]);
+ colnums = std_cxx14::make_unique<size_type[]> (max_vec_len);
}
// set the rowstart array
AssertThrow (c == '[', ExcIO());
// reallocate space
- rowstart.reset (new std::size_t[max_dim+1]);
- colnums.reset (new size_type[max_vec_len]);
+ rowstart = std_cxx14::make_unique<std::size_t[]> (max_dim+1);
+ colnums = std_cxx14::make_unique<size_type[]> (max_vec_len);
// then read data
in.read (reinterpret_cast<char *>(rowstart.get()),
//
// ---------------------------------------------------------------------
+#include <deal.II/base/std_cxx14/memory.h>
#include <deal.II/lac/trilinos_epetra_communication_pattern.h>
#ifdef DEAL_II_WITH_TRILINOS
// Target map is read_write_vector_map
// Source map is vector_space_vector_map. This map must have uniquely
// owned GID.
- import.reset(new Epetra_Import(read_write_vector_map, vector_space_vector_map));
+ import = std_cxx14::make_unique<Epetra_Import>(read_write_vector_map, vector_space_vector_map);
}
//
// ---------------------------------------------------------------------
+#include <deal.II/base/std_cxx14/memory.h>
#include <deal.II/lac/trilinos_epetra_vector.h>
#ifdef DEAL_II_WITH_TRILINOS
#include <deal.II/base/index_set.h>
#include <boost/io/ios_state.hpp>
+#include <memory>
#include <deal.II/lac/read_write_vector.h>
{
Epetra_Map input_map = parallel_partitioner.make_trilinos_map(communicator,false);
if (vector->Map().SameAs(input_map)==false)
- vector.reset(new Epetra_FEVector(input_map));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(input_map);
else if (omit_zeroing_entries==false)
{
const int ierr = vector->PutScalar(0.);
(void) ierr;
}
else
- vector.reset(new Epetra_FEVector(V.trilinos_vector()));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(V.trilinos_vector());
}
return *this;
const MPI_Comm &mpi_comm)
{
source_stored_elements = source_index_set;
- epetra_comm_pattern.reset(new CommunicationPattern(locally_owned_elements(),
- source_index_set, mpi_comm));
+ epetra_comm_pattern = std::make_shared<CommunicationPattern>
+ (locally_owned_elements(), source_index_set, mpi_comm);
}
}
}
const AdditionalData &additional_data)
{
preconditioner.reset ();
- preconditioner.reset (new Ifpack_Chebyshev (&matrix.trilinos_matrix()));
+ preconditioner = std::make_shared<Ifpack_Chebyshev> (&matrix.trilinos_matrix());
Ifpack_Chebyshev *ifpack = static_cast<Ifpack_Chebyshev *>
(preconditioner.get());
const Teuchos::ParameterList &ml_parameters)
{
preconditioner.reset ();
- preconditioner.reset (new ML_Epetra::MultiLevelPreconditioner
- (matrix, ml_parameters));
+ preconditioner = std::make_shared<ML_Epetra::MultiLevelPreconditioner>
+ (matrix, ml_parameters);
}
// equidistributed map; avoid
// storing the nonzero
// elements.
- vector_distributor.reset (new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n_rows),
- 0, communicator));
+ vector_distributor = std::make_shared<Epetra_Map>
+ (static_cast<TrilinosWrappers::types::int_type>(n_rows),
+ 0, communicator);
if (trilinos_matrix.get() == nullptr)
- trilinos_matrix.reset (new SparseMatrix());
+ trilinos_matrix = std::make_shared<SparseMatrix>();
trilinos_matrix->reinit (*vector_distributor, *vector_distributor,
deal_ii_sparse_matrix, drop_tolerance, true,
// MueLu::EpetraOperator is just a wrapper around a "standard"
// Epetra_Operator.
- preconditioner.reset(new MueLu::EpetraOperator(hierarchy));
+ preconditioner = std::make_shared<MueLu::EpetraOperator>(hierarchy);
}
// equidistributed map; avoid
// storing the nonzero
// elements.
- vector_distributor.reset (new Epetra_Map(static_cast<TrilinosWrappers::types::int_type>(n_rows),
- 0, communicator));
+ vector_distributor = std::make_shared<Epetra_Map>
+ (static_cast<TrilinosWrappers::types::int_type>(n_rows),
+ 0, communicator);
if (trilinos_matrix.get() == nullptr)
- trilinos_matrix.reset (new SparseMatrix());
+ trilinos_matrix = std::make_shared<SparseMatrix>();
trilinos_matrix->reinit (*vector_distributor, *vector_distributor,
deal_ii_sparse_matrix, drop_tolerance, true,
#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/conditional_ostream.h>
+# include <deal.II/base/std_cxx14/memory.h>
# include <deal.II/lac/trilinos_sparse_matrix.h>
# include <deal.II/lac/trilinos_vector.h>
# include <deal.II/lac/trilinos_precondition.h>
const MPI::Vector &b,
const PreconditionBase &preconditioner)
{
- linear_problem.reset();
-
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset
- (new Epetra_LinearProblem(const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
- &x.trilinos_vector(),
- const_cast<Epetra_MultiVector *>(&b.trilinos_vector())));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
+ &x.trilinos_vector(),
+ const_cast<Epetra_MultiVector *>(&b.trilinos_vector()));
do_solve(preconditioner);
}
const MPI::Vector &b,
const PreconditionBase &preconditioner)
{
- linear_problem.reset();
-
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset
- (new Epetra_LinearProblem(const_cast<Epetra_Operator *>(&A),
- &x.trilinos_vector(),
- const_cast<Epetra_MultiVector *>(&b.trilinos_vector())));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_Operator *>(&A),
+ &x.trilinos_vector(),
+ const_cast<Epetra_MultiVector *>(&b.trilinos_vector()));
do_solve(preconditioner);
}
const MPI::Vector &b,
const Epetra_Operator &preconditioner)
{
- linear_problem.reset();
-
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset
- (new Epetra_LinearProblem(const_cast<Epetra_Operator *>(&A),
- &x.trilinos_vector(),
- const_cast<Epetra_MultiVector *>(&b.trilinos_vector())));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_Operator *>(&A),
+ &x.trilinos_vector(),
+ const_cast<Epetra_MultiVector *>(&b.trilinos_vector()));
do_solve(preconditioner);
}
const Epetra_MultiVector &b,
const PreconditionBase &preconditioner)
{
- linear_problem.reset();
-
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset
- (new Epetra_LinearProblem(const_cast<Epetra_Operator *>(&A),
- &x,
- const_cast<Epetra_MultiVector *>(&b)));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_Operator *>(&A),
+ &x,
+ const_cast<Epetra_MultiVector *>(&b));
do_solve(preconditioner);
}
const Epetra_MultiVector &b,
const Epetra_Operator &preconditioner)
{
- linear_problem.reset();
-
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset
- (new Epetra_LinearProblem(const_cast<Epetra_Operator *>(&A),
- &x,
- const_cast<Epetra_MultiVector *>(&b)));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_Operator *>(&A),
+ &x,
+ const_cast<Epetra_MultiVector *>(&b));
do_solve(preconditioner);
}
const dealii::Vector<double> &b,
const PreconditionBase &preconditioner)
{
- linear_problem.reset();
-
// In case we call the solver with deal.II vectors, we create views of the
// vectors in Epetra format.
Assert (x.size() == A.n(),
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset (new Epetra_LinearProblem
- (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
- &ep_x, &ep_b));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
+ &ep_x, &ep_b);
do_solve(preconditioner);
}
const dealii::Vector<double> &b,
const PreconditionBase &preconditioner)
{
- linear_problem.reset();
-
Epetra_Vector ep_x (View, A.OperatorDomainMap(), x.begin());
Epetra_Vector ep_b (View, A.OperatorRangeMap(), const_cast<double *>(b.begin()));
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset (new Epetra_LinearProblem(&A,&ep_x, &ep_b));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>(&A,&ep_x, &ep_b);
do_solve(preconditioner);
}
const dealii::LinearAlgebra::distributed::Vector<double> &b,
const PreconditionBase &preconditioner)
{
- linear_problem.reset();
-
// In case we call the solver with deal.II vectors, we create views of the
// vectors in Epetra format.
AssertDimension (static_cast<TrilinosWrappers::types::int_type>(x.local_size()),
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset (new Epetra_LinearProblem
- (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
- &ep_x, &ep_b));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
+ &ep_x, &ep_b);
do_solve(preconditioner);
}
const dealii::LinearAlgebra::distributed::Vector<double> &b,
const PreconditionBase &preconditioner)
{
- linear_problem.reset();
-
AssertDimension (static_cast<TrilinosWrappers::types::int_type>(x.local_size()),
A.OperatorDomainMap().NumMyElements());
AssertDimension (static_cast<TrilinosWrappers::types::int_type>(b.local_size()),
// We need an Epetra_LinearProblem object to let the AztecOO solver know
// about the matrix and vectors.
- linear_problem.reset (new Epetra_LinearProblem(&A,&ep_x, &ep_b));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>(&A,&ep_x, &ep_b);
do_solve(preconditioner);
}
class TrilinosReductionControl : public AztecOO_StatusTest
{
public:
- TrilinosReductionControl (const double &max_steps,
+ TrilinosReductionControl (const int &max_steps,
const double &tolerance,
const double &reduction,
const Epetra_LinearProblem &linear_problem);
private:
double initial_residual;
double current_residual;
- std::shared_ptr<AztecOO_StatusTestCombo> status_test_collection;
- std::shared_ptr<AztecOO_StatusTestMaxIters> status_test_max_steps;
- std::shared_ptr<AztecOO_StatusTestResNorm> status_test_abs_tol;
- std::shared_ptr<AztecOO_StatusTestResNorm> status_test_rel_tol;
+ std::unique_ptr<AztecOO_StatusTestCombo> status_test_collection;
+ std::unique_ptr<AztecOO_StatusTestMaxIters> status_test_max_steps;
+ std::unique_ptr<AztecOO_StatusTestResNorm> status_test_abs_tol;
+ std::unique_ptr<AztecOO_StatusTestResNorm> status_test_rel_tol;
};
TrilinosReductionControl::TrilinosReductionControl(
- const double &max_steps,
+ const int &max_steps,
const double &tolerance,
const double &reduction,
const Epetra_LinearProblem &linear_problem )
{
// Consider linear problem converged if any of the collection
// of criterion are met
- status_test_collection.reset(
- new AztecOO_StatusTestCombo (AztecOO_StatusTestCombo::OR) );
+ status_test_collection = std_cxx14::make_unique<AztecOO_StatusTestCombo>
+ (AztecOO_StatusTestCombo::OR);
// Maximum number of iterations
- status_test_max_steps.reset(
- new AztecOO_StatusTestMaxIters(max_steps) );
+ Assert (max_steps >=0, ExcInternalError());
+ status_test_max_steps = std_cxx14::make_unique<AztecOO_StatusTestMaxIters>(max_steps);
status_test_collection->AddStatusTest(*status_test_max_steps);
Assert(linear_problem.GetRHS()->NumVectors() == 1,
ExcMessage("RHS multivector holds more than one vector"));
// Residual norm is below some absolute value
- status_test_abs_tol.reset(
- new AztecOO_StatusTestResNorm(*linear_problem.GetOperator(),
- *(linear_problem.GetLHS()->operator()(0)),
- *(linear_problem.GetRHS()->operator()(0)),
- tolerance) );
+ status_test_abs_tol = std_cxx14::make_unique<AztecOO_StatusTestResNorm>
+ (*linear_problem.GetOperator(),
+ *(linear_problem.GetLHS()->operator()(0)),
+ *(linear_problem.GetRHS()->operator()(0)),
+ tolerance);
status_test_abs_tol->DefineResForm(AztecOO_StatusTestResNorm::Explicit,
AztecOO_StatusTestResNorm::TwoNorm);
status_test_abs_tol->DefineScaleForm(AztecOO_StatusTestResNorm::None,
status_test_collection->AddStatusTest(*status_test_abs_tol);
// Residual norm, scaled by some initial value, is below some threshold
- status_test_rel_tol.reset(
- new AztecOO_StatusTestResNorm(*linear_problem.GetOperator(),
- *(linear_problem.GetLHS()->operator()(0)),
- *(linear_problem.GetRHS()->operator()(0)),
- reduction) );
+ status_test_rel_tol = std_cxx14::make_unique<AztecOO_StatusTestResNorm>
+ (*linear_problem.GetOperator(),
+ *(linear_problem.GetLHS()->operator()(0)),
+ *(linear_problem.GetRHS()->operator()(0)),
+ reduction);
status_test_rel_tol->DefineResForm(AztecOO_StatusTestResNorm::Explicit,
AztecOO_StatusTestResNorm::TwoNorm);
status_test_rel_tol->DefineScaleForm(AztecOO_StatusTestResNorm::NormOfInitRes,
if (const ReductionControl* const reduction_control
= dynamic_cast<const ReductionControl *const>(&solver_control))
{
- status_test.reset(new internal::TrilinosReductionControl(
- reduction_control->max_steps(),
- reduction_control->tolerance(),
- reduction_control->reduction(),
- *linear_problem) );
+ status_test = std_cxx14::make_unique<internal::TrilinosReductionControl>
+ (reduction_control->max_steps(),
+ reduction_control->tolerance(),
+ reduction_control->reduction(),
+ *linear_problem);
solver.SetStatusTest(status_test.get());
}
}
{
// We need an Epetra_LinearProblem object to let the Amesos solver know
// about the matrix and vectors.
- linear_problem.reset (new Epetra_LinearProblem ());
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem >();
// Assign the matrix operator to the Epetra_LinearProblem object
linear_problem->SetOperator(const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()));
ConditionalOStream verbose_cout (std::cout,
additional_data.output_solver_details);
- solver.reset();
-
// Next allocate the Amesos solver, this is done in two steps, first we
// create a solver Factory and and generate with that the concrete Amesos
// solver, if possible.
ConditionalOStream verbose_cout (std::cout,
additional_data.output_solver_details);
- solver.reset ();
-
// Next allocate the Amesos solver, this is done in two steps, first we
// create a solver Factory and and generate with that the concrete Amesos
// solver, if possible.
{
// We need an Epetra_LinearProblem object to let the Amesos solver know
// about the matrix and vectors.
- linear_problem.reset
- (new Epetra_LinearProblem(const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
- &x.trilinos_vector(),
- const_cast<Epetra_MultiVector *>(&b.trilinos_vector())));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
+ &x.trilinos_vector(),
+ const_cast<Epetra_MultiVector *>(&b.trilinos_vector()));
do_solve();
}
// We need an Epetra_LinearProblem object to let the Amesos solver know
// about the matrix and vectors.
- linear_problem.reset (new Epetra_LinearProblem
- (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
- &ep_x, &ep_b));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
+ &ep_x, &ep_b);
do_solve();
}
// We need an Epetra_LinearProblem object to let the Amesos solver know
// about the matrix and vectors.
- linear_problem.reset (new Epetra_LinearProblem
- (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
- &ep_x, &ep_b));
+ linear_problem = std_cxx14::make_unique<Epetra_LinearProblem>
+ (const_cast<Epetra_CrsMatrix *>(&A.trilinos_matrix()),
+ &ep_x, &ep_b);
do_solve();
}
# include <Teuchos_RCP.hpp>
#include <boost/container/small_vector.hpp>
+#include <memory>
DEAL_II_NAMESPACE_OPEN
TrilinosWrappers::types::int_type colnums = matrix->n();
if (value_cache.get() == nullptr)
{
- value_cache.reset (new std::vector<TrilinosScalar> (matrix->n()));
- colnum_cache.reset (new std::vector<size_type> (matrix->n()));
+ value_cache = std::make_shared<std::vector<TrilinosScalar>>(matrix->n());
+ colnum_cache = std::make_shared<std::vector<size_type>>(matrix->n());
}
else
{
if (needs_deep_copy)
{
- column_space_map.reset (new Epetra_Map (rhs.domain_partitioner()));
+ column_space_map = std_cxx14::make_unique<Epetra_Map>(rhs.domain_partitioner());
// release memory before reallocation
- matrix.reset ();
- matrix.reset (new Epetra_FECrsMatrix(*rhs.matrix));
+ matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(*rhs.matrix);
matrix->FillComplete(*column_space_map, matrix->RowMap());
}
if (rhs.nonlocal_matrix.get() != nullptr)
- nonlocal_matrix.reset(new Epetra_CrsMatrix(Copy, rhs.nonlocal_matrix->Graph()));
+ nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>(Copy, rhs.nonlocal_matrix->Graph());
}
const Epetra_Map &input_col_map,
const SparsityPatternType &sparsity_pattern,
const bool exchange_data,
- std::shared_ptr<Epetra_Map> &column_space_map,
- std::shared_ptr<Epetra_FECrsMatrix> &matrix,
- std::shared_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
- std::shared_ptr<Epetra_Export> &nonlocal_matrix_exporter)
+ std::unique_ptr<Epetra_Map> &column_space_map,
+ std::unique_ptr<Epetra_FECrsMatrix> &matrix,
+ std::unique_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
+ std::unique_ptr<Epetra_Export> &nonlocal_matrix_exporter)
{
// release memory before reallocation
matrix.reset();
static_cast<size_type>(TrilinosWrappers::n_global_elements(input_col_map)));
}
- column_space_map.reset (new Epetra_Map (input_col_map));
+ column_space_map = std_cxx14::make_unique<Epetra_Map>(input_col_map);
// if we want to exchange data, build a usual Trilinos sparsity pattern
// and let that handle the exchange. otherwise, manually create a
SparsityPattern trilinos_sparsity;
trilinos_sparsity.reinit (input_row_map, input_col_map,
sparsity_pattern, exchange_data);
- matrix.reset (new Epetra_FECrsMatrix
- (Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false));
+ matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>
+ (Copy, trilinos_sparsity.trilinos_sparsity_pattern(), false);
return;
}
// col_map that tells how the domain dofs of the matrix will be
// distributed). for only one processor, we can directly assign the
// columns as well. Compare this with bug # 4123 in the Sandia Bugzilla.
- std::shared_ptr<Epetra_CrsGraph> graph;
+ std::unique_ptr<Epetra_CrsGraph> graph;
if (input_row_map.Comm().NumProc() > 1)
- graph.reset (new Epetra_CrsGraph (Copy, input_row_map,
- n_entries_per_row.data(), true));
+ graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, input_row_map,
+ n_entries_per_row.data(), true);
else
- graph.reset (new Epetra_CrsGraph (Copy, input_row_map, input_col_map,
- n_entries_per_row.data(), true));
+ graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, input_row_map, input_col_map,
+ n_entries_per_row.data(), true);
// This functions assumes that the sparsity pattern sits on all
// processors (completely). The parallel version uses an Epetra graph
(void)n_global_cols;
// And now finally generate the matrix.
- matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
+ matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
}
const Epetra_Map &input_col_map,
const DynamicSparsityPattern &sparsity_pattern,
const bool exchange_data,
- std::shared_ptr<Epetra_Map> &column_space_map,
- std::shared_ptr<Epetra_FECrsMatrix> &matrix,
- std::shared_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
- std::shared_ptr<Epetra_Export> &nonlocal_matrix_exporter)
+ std::unique_ptr<Epetra_Map> &column_space_map,
+ std::unique_ptr<Epetra_FECrsMatrix> &matrix,
+ std::unique_ptr<Epetra_CrsMatrix> &nonlocal_matrix,
+ std::unique_ptr<Epetra_Export> &nonlocal_matrix_exporter)
{
matrix.reset();
nonlocal_matrix.reset();
AssertDimension (sparsity_pattern.n_cols(),
static_cast<size_type>(TrilinosWrappers::n_global_elements(input_col_map)));
- column_space_map.reset (new Epetra_Map (input_col_map));
+ column_space_map = std_cxx14::make_unique<Epetra_Map>(input_col_map);
IndexSet relevant_rows (sparsity_pattern.row_index_set());
// serial case
(ghost_rows.size()>0)?(ghost_rows.data()):nullptr,
0, input_row_map.Comm());
- std::shared_ptr<Epetra_CrsGraph> graph;
- std::shared_ptr<Epetra_CrsGraphMod> nonlocal_graph;
+ std::unique_ptr<Epetra_CrsGraph> graph;
+ std::unique_ptr<Epetra_CrsGraphMod> nonlocal_graph;
if (input_row_map.Comm().NumProc() > 1)
{
- graph.reset (new Epetra_CrsGraph (Copy, input_row_map,
- (n_entries_per_row.size()>0)?(n_entries_per_row.data()):nullptr,
- exchange_data ? false : true));
+ graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, input_row_map,
+ (n_entries_per_row.size()>0)?(n_entries_per_row.data()):nullptr,
+ exchange_data ? false : true);
if (have_ghost_rows == true)
- nonlocal_graph.reset (new Epetra_CrsGraphMod (off_processor_map,
- n_entries_per_ghost_row.data()));
+ nonlocal_graph = std_cxx14::make_unique<Epetra_CrsGraphMod>(off_processor_map,
+ n_entries_per_ghost_row.data());
}
else
- graph.reset (new Epetra_CrsGraph (Copy, input_row_map, input_col_map,
- (n_entries_per_row.size()>0)?(n_entries_per_row.data()):nullptr,
- true));
+ graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, input_row_map, input_col_map,
+ (n_entries_per_row.size()>0)?(n_entries_per_row.data()):nullptr,
+ true);
// now insert the indices, select between the right matrix
std::vector<TrilinosWrappers::types::int_type> row_indices;
Assert (ierr==0, ExcTrilinosError(ierr));
}
- nonlocal_matrix.reset (new Epetra_CrsMatrix(Copy, *nonlocal_graph));
+ nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>(Copy, *nonlocal_graph);
}
graph->FillComplete(input_col_map, input_row_map);
AssertDimension (sparsity_pattern.n_cols(),static_cast<size_type>(
TrilinosWrappers::n_global_cols(*graph)));
- matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
+ matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
}
}
nonlocal_matrix_exporter.reset();
// reinit with a (parallel) Trilinos sparsity pattern.
- column_space_map.reset (new Epetra_Map
- (sparsity_pattern.domain_partitioner()));
- matrix.reset (new Epetra_FECrsMatrix
- (Copy, sparsity_pattern.trilinos_sparsity_pattern(), false));
+ column_space_map = std_cxx14::make_unique<Epetra_Map>
+ (sparsity_pattern.domain_partitioner());
+ matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>
+ (Copy, sparsity_pattern.trilinos_sparsity_pattern(), false);
if (sparsity_pattern.nonlocal_graph.get() != nullptr)
- nonlocal_matrix.reset (new Epetra_CrsMatrix(Copy, *sparsity_pattern.nonlocal_graph));
+ nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>(Copy, *sparsity_pattern.nonlocal_graph);
else
nonlocal_matrix.reset ();
if (this == &sparse_matrix)
return;
- column_space_map.reset (new Epetra_Map (sparse_matrix.domain_partitioner()));
+ column_space_map = std_cxx14::make_unique<Epetra_Map>(sparse_matrix.domain_partitioner());
matrix.reset ();
nonlocal_matrix_exporter.reset();
- matrix.reset (new Epetra_FECrsMatrix
- (Copy, sparse_matrix.trilinos_sparsity_pattern(), false));
+ matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>
+ (Copy, sparse_matrix.trilinos_sparsity_pattern(), false);
if (sparse_matrix.nonlocal_matrix != nullptr)
- nonlocal_matrix.reset (new Epetra_CrsMatrix
- (Copy, sparse_matrix.nonlocal_matrix->Graph()));
+ nonlocal_matrix = std_cxx14::make_unique<Epetra_CrsMatrix>
+ (Copy, sparse_matrix.nonlocal_matrix->Graph());
else
nonlocal_matrix.reset();
Assert (input_matrix.Filled()==true,
ExcMessage("Input CrsMatrix has not called FillComplete()!"));
- column_space_map.reset (new Epetra_Map (input_matrix.DomainMap()));
+ column_space_map = std_cxx14::make_unique<Epetra_Map>(input_matrix.DomainMap());
const Epetra_CrsGraph *graph = &input_matrix.Graph();
nonlocal_matrix.reset();
nonlocal_matrix_exporter.reset();
matrix.reset ();
- matrix.reset (new Epetra_FECrsMatrix(Copy, *graph, false));
+ matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(Copy, *graph, false);
matrix->FillComplete (*column_space_map, input_matrix.RangeMap(), true);
// processor must have set the correct entry
nonlocal_matrix->FillComplete(*column_space_map, matrix->RowMap());
if (nonlocal_matrix_exporter.get() == nullptr)
- nonlocal_matrix_exporter.reset
- (new Epetra_Export(nonlocal_matrix->RowMap(), matrix->RowMap()));
+ nonlocal_matrix_exporter = std_cxx14::make_unique<Epetra_Export>
+ (nonlocal_matrix->RowMap(), matrix->RowMap());
ierr = matrix->Export(*nonlocal_matrix, *nonlocal_matrix_exporter, mode);
AssertThrow(ierr == 0, ExcTrilinosError(ierr));
ierr = matrix->FillComplete(*column_space_map, matrix->RowMap());
// When we clear the matrix, reset
// the pointer and generate an
// empty matrix.
- column_space_map.reset (new Epetra_Map (0, 0,
- Utilities::Trilinos::comm_self()));
- matrix.reset (new Epetra_FECrsMatrix(View, *column_space_map, 0));
+ column_space_map = std_cxx14::make_unique<Epetra_Map>
+ (0, 0, Utilities::Trilinos::comm_self());
+ matrix = std_cxx14::make_unique<Epetra_FECrsMatrix>(View, *column_space_map, 0);
nonlocal_matrix.reset();
nonlocal_matrix_exporter.reset();
// otherwise first flush Trilinos caches
sparsity_pattern->compress ();
- colnum_cache.reset (new std::vector<size_type> (sparsity_pattern->row_length(this->a_row)));
-
+ colnum_cache = std::make_shared<std::vector<size_type> >(sparsity_pattern->row_length(this->a_row));
if (colnum_cache->size() > 0)
{
// interface.
SparsityPattern::SparsityPattern ()
{
- column_space_map.reset(new Epetra_Map (TrilinosWrappers::types::int_type(0),
- TrilinosWrappers::types::int_type(0),
- Utilities::Trilinos::comm_self()));
- graph.reset (new Epetra_FECrsGraph(View,
- *column_space_map,
- *column_space_map,
- 0));
+ column_space_map = std_cxx14::make_unique<Epetra_Map> (TrilinosWrappers::types::int_type(0),
+ TrilinosWrappers::types::int_type(0),
+ Utilities::Trilinos::comm_self());
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>(View,
+ *column_space_map,
+ *column_space_map,
+ 0);
graph->FillComplete();
}
reinit_sp (const Epetra_Map &row_map,
const Epetra_Map &col_map,
const size_type n_entries_per_row,
- std::shared_ptr<Epetra_Map> &column_space_map,
- std::shared_ptr<Epetra_FECrsGraph> &graph,
- std::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
+ std::unique_ptr<Epetra_Map> &column_space_map,
+ std::unique_ptr<Epetra_FECrsGraph> &graph,
+ std::unique_ptr<Epetra_CrsGraph> &nonlocal_graph)
{
Assert(row_map.IsOneToOne(),
ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
nonlocal_graph.reset();
graph.reset ();
- column_space_map.reset (new Epetra_Map (col_map));
+ column_space_map = std_cxx14::make_unique<Epetra_Map >(col_map);
// for more than one processor, need to specify only row map first and
// let the matrix entries decide about the column map (which says which
// require building a non-local graph which gives us thread-safe
// initialization.
if (row_map.Comm().NumProc() > 1)
- graph.reset (new Epetra_FECrsGraph(Copy, row_map,
- n_entries_per_row, false
- // TODO: Check which new Trilinos
- // version supports this... Remember
- // to change tests/trilinos/assemble_matrix_parallel_07
- // too.
- //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
- //, true
- //#endif
- ));
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>
+ (Copy, row_map, n_entries_per_row, false
+ // TODO: Check which new Trilinos version supports this...
+ // Remember to change tests/trilinos/assemble_matrix_parallel_07, too.
+//#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
+// , true
+//#endif
+ );
else
- graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
- n_entries_per_row, false));
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>
+ (Copy, row_map, col_map, n_entries_per_row, false);
}
reinit_sp (const Epetra_Map &row_map,
const Epetra_Map &col_map,
const std::vector<size_type> &n_entries_per_row,
- std::shared_ptr<Epetra_Map> &column_space_map,
- std::shared_ptr<Epetra_FECrsGraph> &graph,
- std::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
+ std::unique_ptr<Epetra_Map> &column_space_map,
+ std::unique_ptr<Epetra_FECrsGraph> &graph,
+ std::unique_ptr<Epetra_CrsGraph> &nonlocal_graph)
{
Assert(row_map.IsOneToOne(),
ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
AssertDimension (n_entries_per_row.size(),
static_cast<size_type>(TrilinosWrappers::n_global_elements(row_map)));
- column_space_map.reset (new Epetra_Map (col_map));
+ column_space_map = std_cxx14::make_unique<Epetra_Map >(col_map);
std::vector<int> local_entries_per_row(TrilinosWrappers::max_my_gid(row_map)-
TrilinosWrappers::min_my_gid(row_map));
for (unsigned int i=0; i<local_entries_per_row.size(); ++i)
local_entries_per_row[i] = n_entries_per_row[TrilinosWrappers::min_my_gid(row_map)+i];
if (row_map.Comm().NumProc() > 1)
- graph.reset(new Epetra_FECrsGraph(Copy, row_map,
- local_entries_per_row.data(),
- false
- // TODO: Check which new Trilinos
- // version supports this... Remember
- // to change tests/trilinos/assemble_matrix_parallel_07
- // too.
- //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
- //, true
- //#endif
- ));
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>
+ (Copy, row_map, local_entries_per_row.data(), false
+ // TODO: Check which new Trilinos version supports this...
+ // Remember to change tests/trilinos/assemble_matrix_parallel_07, too.
+//#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
+// , true
+//#endif
+ );
else
- graph.reset(new Epetra_FECrsGraph(Copy, row_map, col_map,
- local_entries_per_row.data(),
- false));
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>
+ (Copy, row_map, col_map, local_entries_per_row.data(), false);
}
const Epetra_Map &col_map,
const SparsityPatternType &sp,
const bool exchange_data,
- std::shared_ptr<Epetra_Map> &column_space_map,
- std::shared_ptr<Epetra_FECrsGraph> &graph,
- std::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
+ std::unique_ptr<Epetra_Map> &column_space_map,
+ std::unique_ptr<Epetra_FECrsGraph> &graph,
+ std::unique_ptr<Epetra_CrsGraph> &nonlocal_graph)
{
nonlocal_graph.reset ();
graph.reset ();
AssertDimension (sp.n_cols(),
static_cast<size_type>(TrilinosWrappers::n_global_elements(col_map)));
- column_space_map.reset (new Epetra_Map (col_map));
+ column_space_map = std_cxx14::make_unique<Epetra_Map >(col_map);
Assert (row_map.LinearMap() == true,
ExcMessage ("This function only works if the row map is contiguous."));
n_entries_per_row[row-first_row] = static_cast<int>(sp.row_length(row));
if (row_map.Comm().NumProc() > 1)
- graph.reset(new Epetra_FECrsGraph(Copy, row_map,
- n_entries_per_row.data(),
- false));
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>(Copy, row_map,
+ n_entries_per_row.data(),
+ false);
else
- graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
- n_entries_per_row.data(),
- false));
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>(Copy, row_map, col_map,
+ n_entries_per_row.data(),
+ false);
AssertDimension (sp.n_rows(),
static_cast<size_type>(n_global_rows(*graph)));
{
Epetra_Map nonlocal_map =
nonlocal_partitioner.make_trilinos_map(communicator, true);
- nonlocal_graph.reset(new Epetra_CrsGraph(Copy, nonlocal_map, 0));
+ nonlocal_graph = std_cxx14::make_unique<Epetra_CrsGraph>(Copy, nonlocal_map, 0);
}
else
Assert(nonlocal_partitioner.n_elements() == 0, ExcInternalError());
void
SparsityPattern::copy_from (const SparsityPattern &sp)
{
- column_space_map.reset (new Epetra_Map (*sp.column_space_map));
- graph.reset (new Epetra_FECrsGraph(*sp.graph));
+ column_space_map = std_cxx14::make_unique<Epetra_Map >(*sp.column_space_map);
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>(*sp.graph);
if (sp.nonlocal_graph.get()!=nullptr)
- nonlocal_graph.reset(new Epetra_CrsGraph(*sp.nonlocal_graph));
+ nonlocal_graph = std_cxx14::make_unique<Epetra_CrsGraph>(*sp.nonlocal_graph);
else
nonlocal_graph.reset();
}
// When we clear the matrix, reset
// the pointer and generate an
// empty sparsity pattern.
- column_space_map.reset (new Epetra_Map (TrilinosWrappers::types::int_type(0),
- TrilinosWrappers::types::int_type(0),
- Utilities::Trilinos::comm_self()));
- graph.reset (new Epetra_FECrsGraph(View, *column_space_map,
- *column_space_map, 0));
+ column_space_map = std_cxx14::make_unique<Epetra_Map >(TrilinosWrappers::types::int_type(0),
+ TrilinosWrappers::types::int_type(0),
+ Utilities::Trilinos::comm_self());
+ graph = std_cxx14::make_unique<Epetra_FECrsGraph>(View, *column_space_map,
+ *column_space_map, 0);
graph->FillComplete();
nonlocal_graph.reset();
#ifdef DEAL_II_WITH_TRILINOS
# include <deal.II/base/mpi.h>
+# include <deal.II/base/std_cxx14/memory.h>
# include <deal.II/lac/trilinos_sparse_matrix.h>
# include <deal.II/lac/trilinos_parallel_block_vector.h>
# include <deal.II/lac/trilinos_index_access.h>
Vector()
{
has_ghosts = v.has_ghosts;
- vector.reset(new Epetra_FEVector(*v.vector));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(*v.vector);
owned_elements = v.owned_elements;
}
ExcDimensionMismatch (parallel_partitioner.size(),
TrilinosWrappers::n_global_elements(v.vector->Map())));
- vector.reset (new Epetra_FEVector
- (parallel_partitioner.make_trilinos_map(communicator,
- true)));
+ vector = std_cxx14::make_unique<Epetra_FEVector>
+ (parallel_partitioner.make_trilinos_map(communicator, true));
reinit (v, false, true);
}
#endif
has_ghosts = false;
- vector.reset (new Epetra_FEVector(map));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(map);
last_action = Zero;
}
Epetra_Map map = parallel_partitioner.make_trilinos_map (communicator,
true);
- vector.reset (new Epetra_FEVector(map));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(map);
has_ghosts = vector->Map().UniqueGIDs()==false;
#endif
if (!same_communicators || vector->Map().SameAs(v.vector->Map()) == false)
{
- vector.reset (new Epetra_FEVector(v.vector->Map()));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(v.vector->Map());
has_ghosts = v.has_ghosts;
last_action = Zero;
owned_elements = v.owned_elements;
Epetra_Map new_map (v.size(), n_elements, global_ids.data(), 0,
v.block(0).vector_partitioner().Comm());
- std::shared_ptr<Epetra_FEVector> actual_vec;
- if ( import_data == true )
- actual_vec.reset (new Epetra_FEVector (new_map));
- else
- {
- vector.reset (new Epetra_FEVector (new_map));
- actual_vec = vector;
- }
+ auto actual_vec = std_cxx14::make_unique<Epetra_FEVector>(new_map);
TrilinosScalar *entries = (*actual_vec)[0];
for (size_type block=0; block<v.n_blocks(); ++block)
last_action = Insert;
}
+ else
+ vector = std::move(actual_vec);
#if defined(DEBUG) && defined(DEAL_II_WITH_MPI)
const Epetra_MpiComm *comm_ptr
= dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
parallel_partitioner.add_indices(ghost_entries);
Epetra_Map map = parallel_partitioner.make_trilinos_map (communicator,
true);
- vector.reset (new Epetra_FEVector(map));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(map);
}
else
{
"its parallel partitioning"));
if (vector->Map().SameAs(map)==false)
- vector.reset (new Epetra_FEVector(map));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(map);
else
{
const int ierr = vector->PutScalar(0.);
{
Epetra_Map nonlocal_map =
nonlocal_entries.make_trilinos_map(communicator, true);
- nonlocal_vector.reset(new Epetra_MultiVector(nonlocal_map, 1));
+ nonlocal_vector = std_cxx14::make_unique<Epetra_MultiVector>(nonlocal_map, 1);
}
}
{
*vector = *v.vector;
if (v.nonlocal_vector.get() != nullptr)
- nonlocal_vector.reset(new Epetra_MultiVector(v.nonlocal_vector->Map(), 1));
+ nonlocal_vector = std_cxx14::make_unique<Epetra_MultiVector>(v.nonlocal_vector->Map(), 1);
last_action = Zero;
}
// Second case: vectors have the same global
// size.
else
{
- vector.reset (new Epetra_FEVector(*v.vector));
+ vector = std_cxx14::make_unique<Epetra_FEVector>(*v.vector);
last_action = Zero;
has_ghosts = v.has_ghosts;
owned_elements = v.owned_elements;
}
if (v.nonlocal_vector.get() != nullptr)
- nonlocal_vector.reset(new Epetra_MultiVector(v.nonlocal_vector->Map(), 1));
+ nonlocal_vector = std_cxx14::make_unique<Epetra_MultiVector>(v.nonlocal_vector->Map(), 1);
return *this;
}
"which is not allowed."));
if (vector->Map().SameAs(m.trilinos_matrix().ColMap()) == false)
- {
- vector.reset (new Epetra_FEVector(
- m.trilinos_matrix().ColMap()
- ));
- }
+ vector = std_cxx14::make_unique<Epetra_FEVector>(m.trilinos_matrix().ColMap());
Epetra_Import data_exchange (vector->Map(), v.vector->Map());
const int ierr = vector->Import(*v.vector, data_exchange, Insert);