LinearAlgebra::distributed::Vector<double>;
LinearAlgebra::distributed::Vector<float> ;
- parallel::distributed::BlockVector<double>;
- parallel::distributed::BlockVector<float> ;
+ LinearAlgebra::distributed::BlockVector<double>;
+ LinearAlgebra::distributed::BlockVector<float> ;
@DEAL_II_EXPAND_TRILINOS_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
BlockVector<double>;
BlockVector<float>;
- parallel::distributed::Vector<double>;
- parallel::distributed::Vector<float> ;
+ LinearAlgebra::distributed::Vector<double>;
+ LinearAlgebra::distributed::Vector<float> ;
- parallel::distributed::BlockVector<double>;
- parallel::distributed::BlockVector<float> ;
+ LinearAlgebra::distributed::BlockVector<double>;
+ LinearAlgebra::distributed::BlockVector<float> ;
@DEAL_II_EXPAND_TRILINOS_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
BlockVector<double>;
BlockVector<float>;
- parallel::distributed::Vector<double>;
+ LinearAlgebra::distributed::Vector<double>;
@DEAL_II_EXPAND_TRILINOS_VECTOR@;
@DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
// This includes the data structures for the efficient implementation of
// matrix-free methods.
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/matrix_free/matrix_free.h>
#include <deal.II/matrix_free/fe_evaluation.h>
SineGordonOperation(const MatrixFree<dim,double> &data_in,
const double time_step);
- void apply (parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src) const;
+ void apply (LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src) const;
private:
const MatrixFree<dim,double> &data;
const VectorizedArray<double> delta_t_sqr;
- parallel::distributed::Vector<double> inv_mass_matrix;
+ LinearAlgebra::distributed::Vector<double> inv_mass_matrix;
void local_apply (const MatrixFree<dim,double> &data,
- parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src,
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const;
};
// one destination vector, even if we happen to use many vectors like the two
// in this case. Note that the cell loop accepts any valid class for input and
// output, which does not only include vectors but general data types.
- // However, only in case it encounters a parallel::distributed::Vector<Number>
+ // However, only in case it encounters a LinearAlgebra::distributed::Vector<Number>
// or a <tt>std::vector</tt> collecting these vectors, it calls functions that
// exchange data at the beginning and the end of the loop. In the loop over
// the cells, we first have to read in the values in the vectors related to
template <int dim, int fe_degree>
void SineGordonOperation<dim, fe_degree>::
local_apply (const MatrixFree<dim> &data,
- parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src,
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
AssertDimension (src.size(), 2);
// provide a function with the same signature that is not part of a class.
template <int dim, int fe_degree>
void SineGordonOperation<dim, fe_degree>::
- apply (parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src) const
+ apply (LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src) const
{
dst = 0;
data.cell_loop (&SineGordonOperation<dim,fe_degree>::local_apply,
MatrixFree<dim,double> matrix_free_data;
- parallel::distributed::Vector<double> solution, old_solution, old_old_solution;
+ LinearAlgebra::distributed::Vector<double> solution, old_solution, old_old_solution;
const unsigned int n_global_refinements;
double time, time_step;
// the integrate_difference function as well as in DataOut. We only need to
// make sure that we tell the vector to update its ghost values before we
// read them. This is a feature present only in the
- // parallel::distributed::Vector class. Distributed vectors with PETSc and
+ // LinearAlgebra::distributed::Vector class. Distributed vectors with PETSc and
// Trilinos, on the other hand, need to be copied to special vectors
// including ghost values (see the relevant section in step-40). If we
// wanted to access all degrees of freedom on ghost cells, too (e.g. when
old_solution);
output_results (0);
- std::vector<parallel::distributed::Vector<double>*> previous_solutions;
+ std::vector<LinearAlgebra::distributed::Vector<double>*> previous_solutions;
previous_solutions.push_back(&old_solution);
previous_solutions.push_back(&old_old_solution);
* The reason this function exists is that this operation involves less
* memory transfer than calling the two functions separately on deal.II's
* vector classes (Vector<Number> and
- * parallel::distributed::Vector<double>). This method only needs to load
+ * LinearAlgebra::distributed::Vector<double>). This method only needs to load
* three vectors, @p this, @p V, @p W, whereas calling separate methods
* means to load the calling vector @p this twice. Since most vector
* operations are memory transfer limited, this reduces the time by 25\% (or
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/block_sparsity_pattern.h>
#include <deal.II/lac/block_sparse_matrix.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_vector.h>
}
template<typename Number>
- void set_zero_parallel(const std::vector<size_type> &cm, parallel::distributed::Vector<Number> &vec, size_type shift = 0)
+ void set_zero_parallel(const std::vector<size_type> &cm, LinearAlgebra::distributed::Vector<Number> &vec, size_type shift = 0)
{
for (typename std::vector<size_type>::const_iterator it = cm.begin();
it != cm.end(); ++it)
template <typename number>
void
- import_vector_with_ghost_elements (const parallel::distributed::Vector<number> &vec,
+ import_vector_with_ghost_elements (const LinearAlgebra::distributed::Vector<number> &vec,
const IndexSet &locally_owned_elements,
const IndexSet &needed_elements,
- parallel::distributed::Vector<number> &output,
+ LinearAlgebra::distributed::Vector<number> &output,
const internal::bool2type<false> /*is_block_vector*/)
{
// TODO: the in vector might already have all elements. need to find a
// way to efficiently avoid the copy then
- const_cast<parallel::distributed::Vector<number>&>(vec).zero_out_ghosts();
+ const_cast<LinearAlgebra::distributed::Vector<number>&>(vec).zero_out_ghosts();
output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator());
output = vec;
output.update_ghost_values();
# include <deal.II/base/std_cxx11/shared_ptr.h>
# include <deal.II/lac/trilinos_vector_base.h>
-# include <deal.II/lac/parallel_vector.h>
+# include <deal.II/lac/la_parallel_vector.h>
DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
# ifdef DEAL_II_WITH_MPI
* Apply the preconditioner on deal.II parallel data structures instead of
* the ones provided in the Trilinos wrapper class.
*/
- virtual void vmult (dealii::parallel::distributed::Vector<double> &dst,
- const dealii::parallel::distributed::Vector<double> &src) const;
+ virtual void vmult (dealii::LinearAlgebra::distributed::Vector<double> &dst,
+ const dealii::LinearAlgebra::distributed::Vector<double> &src) const;
/**
* Apply the transpose preconditioner on deal.II parallel data structures
* instead of the ones provided in the Trilinos wrapper class.
*/
- virtual void Tvmult (dealii::parallel::distributed::Vector<double> &dst,
- const dealii::parallel::distributed::Vector<double> &src) const;
+ virtual void Tvmult (dealii::LinearAlgebra::distributed::Vector<double> &dst,
+ const dealii::LinearAlgebra::distributed::Vector<double> &src) const;
/**
* Return a reference to the underlaying Trilinos Epetra_Operator. So you
* Apply the preconditioner on deal.II parallel data structures instead of
* the ones provided in the Trilinos wrapper class, i.e., dst = src.
*/
- void vmult (parallel::distributed::Vector<double> &dst,
- const dealii::parallel::distributed::Vector<double> &src) const;
+ void vmult (LinearAlgebra::distributed::Vector<double> &dst,
+ const dealii::LinearAlgebra::distributed::Vector<double> &src) const;
/**
* Apply the transpose preconditioner on deal.II parallel data structures
* instead of the ones provided in the Trilinos wrapper class, i.e., dst =
* src.
*/
- void Tvmult (parallel::distributed::Vector<double> &dst,
- const dealii::parallel::distributed::Vector<double> &src) const;
+ void Tvmult (LinearAlgebra::distributed::Vector<double> &dst,
+ const dealii::LinearAlgebra::distributed::Vector<double> &src) const;
};
inline
void
- PreconditionBase::vmult (parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ PreconditionBase::vmult (LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
AssertDimension (static_cast<TrilinosWrappers::types::int_type>(dst.local_size()),
preconditioner->OperatorDomainMap().NumMyElements());
inline
void
- PreconditionBase::Tvmult (parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ PreconditionBase::Tvmult (LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
AssertDimension (static_cast<TrilinosWrappers::types::int_type>(dst.local_size()),
preconditioner->OperatorDomainMap().NumMyElements());
# include <deal.II/lac/exceptions.h>
# include <deal.II/lac/solver_control.h>
# include <deal.II/lac/vector.h>
-# include <deal.II/lac/parallel_vector.h>
+# include <deal.II/lac/la_parallel_vector.h>
DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
# include <Epetra_LinearProblem.h>
*/
void
solve (const SparseMatrix &A,
- dealii::parallel::distributed::Vector<double> &x,
- const dealii::parallel::distributed::Vector<double> &b,
+ dealii::LinearAlgebra::distributed::Vector<double> &x,
+ const dealii::LinearAlgebra::distributed::Vector<double> &b,
const PreconditionBase &preconditioner);
/**
*/
void
solve (Epetra_Operator &A,
- dealii::parallel::distributed::Vector<double> &x,
- const dealii::parallel::distributed::Vector<double> &b,
+ dealii::LinearAlgebra::distributed::Vector<double> &x,
+ const dealii::LinearAlgebra::distributed::Vector<double> &b,
const PreconditionBase &preconditioner);
*/
void
solve (const SparseMatrix &A,
- dealii::parallel::distributed::Vector<double> &x,
- const dealii::parallel::distributed::Vector<double> &b);
+ dealii::LinearAlgebra::distributed::Vector<double> &x,
+ const dealii::LinearAlgebra::distributed::Vector<double> &b);
/**
* Access to object that controls convergence.
* This function can be called with several different vector objects,
* namely TrilinosWrappers::Vector, TrilinosWrappers::MPI::Vector as well
* as deal.II's own vector classes Vector<double> and
- * parallel::distributed::Vector<double>.
+ * LinearAlgebra::distributed::Vector<double>.
*
* Note that both vectors have to be distributed vectors generated using
* the same Map as was used for the matrix in case you work on a
* This function can be called with several different vector objects,
* namely TrilinosWrappers::Vector, TrilinosWrappers::MPI::Vector as well
* as deal.II's own vector classes Vector<double> and
- * parallel::distributed::Vector<double>.
+ * LinearAlgebra::distributed::Vector<double>.
*
* Note that both vectors have to be distributed vectors generated using
* the same Map as was used for the matrix in case you work on a
* This function can be called with several different vector objects,
* namely TrilinosWrappers::Vector, TrilinosWrappers::MPI::Vector as well
* as deal.II's own vector classes Vector<double> and
- * parallel::distributed::Vector<double>.
+ * LinearAlgebra::distributed::Vector<double>.
*
* When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
* have to be distributed vectors generated using the same Map as was used
* This function can be called with several different vector objects,
* namely TrilinosWrappers::Vector, TrilinosWrappers::MPI::Vector as well
* as deal.II's own vector classes Vector<double> and
- * parallel::distributed::Vector<double>.
+ * LinearAlgebra::distributed::Vector<double>.
*
* When using a vector of type TrilinosWrappers::MPI::Vector, both vectors
* have to be distributed vectors generated using the same Map as was used
#include <deal.II/fe/mapping.h>
#include <deal.II/fe/mapping_q1.h>
#include <deal.II/lac/vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/block_vector_base.h>
#include <deal.II/lac/constraint_matrix.h>
#include <deal.II/dofs/dof_handler.h>
/**
* Initialize function for a general vector. The length of the vector is
* equal to the total number of degrees in the DoFHandler. If the vector is
- * of class parallel::distributed::Vector@<Number@>, the ghost entries are
+ * of class LinearAlgebra::distributed::Vector@<Number@>, the ghost entries are
* set accordingly. For vector-valued problems with several DoFHandlers
* underlying this class, the parameter @p vector_component defines which
* component is to be used.
/**
* Initialize function for a distributed vector. The length of the vector is
* equal to the total number of degrees in the DoFHandler. If the vector is
- * of class parallel::distributed::Vector@<Number@>, the ghost entries are
+ * of class LinearAlgebra::distributed::Vector@<Number@>, the ghost entries are
* set accordingly. For vector-valued problems with several DoFHandlers
* underlying this class, the parameter @p vector_component defines which
* component is to be used.
*/
template <typename Number2>
- void initialize_dof_vector(parallel::distributed::Vector<Number2> &vec,
+ void initialize_dof_vector(LinearAlgebra::distributed::Vector<Number2> &vec,
const unsigned int vector_component=0) const;
/**
template <typename Number2>
inline
void
-MatrixFree<dim,Number>::initialize_dof_vector(parallel::distributed::Vector<Number2> &vec,
+MatrixFree<dim,Number>::initialize_dof_vector(LinearAlgebra::distributed::Vector<Number2> &vec,
const unsigned int comp) const
{
AssertIndexRange (comp, n_components());
template<typename Number>
inline
- bool update_ghost_values_start (const parallel::distributed::Vector<Number> &vec,
+ bool update_ghost_values_start (const LinearAlgebra::distributed::Vector<Number> &vec,
const unsigned int channel = 0)
{
bool return_value = !vec.has_ghost_elements();
template<typename Number>
inline
- void reset_ghost_values (const parallel::distributed::Vector<Number> &vec,
+ void reset_ghost_values (const LinearAlgebra::distributed::Vector<Number> &vec,
const bool zero_out_ghosts)
{
if (zero_out_ghosts)
- const_cast<parallel::distributed::Vector<Number>&>(vec).zero_out_ghosts();
+ const_cast<LinearAlgebra::distributed::Vector<Number>&>(vec).zero_out_ghosts();
}
template <typename Number>
inline
- void update_ghost_values_finish (const parallel::distributed::Vector<Number> &vec)
+ void update_ghost_values_finish (const LinearAlgebra::distributed::Vector<Number> &vec)
{
vec.update_ghost_values_finish();
}
template <typename Number>
inline
- void compress_start (parallel::distributed::Vector<Number> &vec,
+ void compress_start (LinearAlgebra::distributed::Vector<Number> &vec,
const unsigned int channel = 0)
{
vec.compress_start(channel);
template <typename Number>
inline
- void compress_finish (parallel::distributed::Vector<Number> &vec)
+ void compress_finish (LinearAlgebra::distributed::Vector<Number> &vec)
{
vec.compress_finish(::dealii::VectorOperation::add);
}
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/block_sparsity_pattern.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/vector_memory.h>
#ifdef DEAL_II_WITH_TRILINOS
template <typename Number>
- struct MatrixSelector<parallel::distributed::Vector<Number> >
+ struct MatrixSelector<LinearAlgebra::distributed::Vector<Number> >
{
typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
* Implementation of transfer between the global vectors and the multigrid
* levels for use in the derived class MGTransferPrebuilt and other classes.
* This class is a specialization for the case of
- * parallel::distributed::Vector that requires a few different calling
+ * LinearAlgebra::distributed::Vector that requires a few different calling
* routines as compared to the %parallel vectors in the PETScWrappers and
* TrilinosWrappers namespaces.
*
* @date 2016
*/
template <typename Number>
-class MGLevelGlobalTransfer<parallel::distributed::Vector<Number> > : public MGTransferBase<parallel::distributed::Vector<Number> >
+class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> > : public MGTransferBase<LinearAlgebra::distributed::Vector<Number> >
{
public:
template <int dim, typename Number2, int spacedim>
void
copy_to_mg (const DoFHandler<dim,spacedim> &mg_dof,
- MGLevelObject<parallel::distributed::Vector<Number> > &dst,
- const parallel::distributed::Vector<Number2> &src) const;
+ MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &dst,
+ const LinearAlgebra::distributed::Vector<Number2> &src) const;
/**
* Transfer from multi-level vector to normal vector.
template <int dim, typename Number2, int spacedim>
void
copy_from_mg (const DoFHandler<dim,spacedim> &mg_dof,
- parallel::distributed::Vector<Number2> &dst,
- const MGLevelObject<parallel::distributed::Vector<Number> > &src) const;
+ LinearAlgebra::distributed::Vector<Number2> &dst,
+ const MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &src) const;
/**
* Add a multi-level vector to a normal vector.
template <int dim, typename Number2, int spacedim>
void
copy_from_mg_add (const DoFHandler<dim,spacedim> &mg_dof,
- parallel::distributed::Vector<Number2> &dst,
- const MGLevelObject<parallel::distributed::Vector<Number> > &src) const;
+ LinearAlgebra::distributed::Vector<Number2> &dst,
+ const MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &src) const;
/**
* If this object operates on BlockVector objects, we need to describe how
/**
* The mg_constrained_dofs of the level systems.
*/
- SmartPointer<const MGConstrainedDoFs, MGLevelGlobalTransfer<parallel::distributed::Vector<Number> > > mg_constrained_dofs;
+ SmartPointer<const MGConstrainedDoFs, MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> > > mg_constrained_dofs;
/**
* In the function copy_to_mg, we need to access ghosted entries of the
* global vector for inserting into the level vectors. This vector is
* populated with those entries.
*/
- mutable parallel::distributed::Vector<Number> ghosted_global_vector;
+ mutable LinearAlgebra::distributed::Vector<Number> ghosted_global_vector;
/**
* In the function copy_from_mg, we access all level vectors with certain
* ghost entries for inserting the result into a global vector.
*/
- mutable MGLevelObject<parallel::distributed::Vector<Number> > ghosted_level_vector;
+ mutable MGLevelObject<LinearAlgebra::distributed::Vector<Number> > ghosted_level_vector;
};
void
reinit_vector (const dealii::DoFHandler<dim,spacedim> &mg_dof,
const std::vector<unsigned int> &,
- MGLevelObject<parallel::distributed::Vector<number> > &v)
+ MGLevelObject<LinearAlgebra::distributed::Vector<number> > &v)
{
const parallel::Triangulation<dim,spacedim> *tria =
(dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
-/* --------- MGLevelGlobalTransfer<parallel::distributed::Vector> ------- */
+/* --------- MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector> ------- */
template <typename Number>
template <int dim, typename Number2, int spacedim>
void
-MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::copy_to_mg
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::copy_to_mg
(const DoFHandler<dim,spacedim> &mg_dof_handler,
- MGLevelObject<parallel::distributed::Vector<Number> > &dst,
- const parallel::distributed::Vector<Number2> &src) const
+ MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &dst,
+ const LinearAlgebra::distributed::Vector<Number2> &src) const
{
AssertIndexRange(dst.max_level(), mg_dof_handler.get_triangulation().n_global_levels());
AssertIndexRange(dst.min_level(), dst.max_level()+1);
--level;
typedef std::vector<std::pair<unsigned int, unsigned int> >::const_iterator dof_pair_iterator;
- parallel::distributed::Vector<Number> &dst_level = dst[level];
+ LinearAlgebra::distributed::Vector<Number> &dst_level = dst[level];
// first copy local unknowns
for (dof_pair_iterator i = copy_indices[level].begin();
template <typename Number>
template <int dim, typename Number2, int spacedim>
void
-MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::copy_from_mg
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::copy_from_mg
(const DoFHandler<dim,spacedim> &mg_dof_handler,
- parallel::distributed::Vector<Number2> &dst,
- const MGLevelObject<parallel::distributed::Vector<Number> > &src) const
+ LinearAlgebra::distributed::Vector<Number2> &dst,
+ const MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &src) const
{
(void)mg_dof_handler;
AssertIndexRange(src.max_level(), mg_dof_handler.get_triangulation().n_global_levels());
// the first time around, we copy the source vector to the temporary
// vector that we hold for the purpose of data exchange
- parallel::distributed::Vector<Number> &ghosted_vector =
+ LinearAlgebra::distributed::Vector<Number> &ghosted_vector =
ghosted_level_vector[level];
ghosted_vector = src[level];
ghosted_vector.update_ghost_values();
template <typename Number>
template <int dim, typename Number2, int spacedim>
void
-MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::copy_from_mg_add
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::copy_from_mg_add
(const DoFHandler<dim,spacedim> &/*mg_dof_handler*/,
- parallel::distributed::Vector<Number2> &dst,
- const MGLevelObject<parallel::distributed::Vector<Number> > &src) const
+ LinearAlgebra::distributed::Vector<Number2> &dst,
+ const MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &src) const
{
// For non-DG: degrees of freedom in the refinement face may need special
// attention, since they belong to the coarse level, but have fine level
// the first time around, we copy the source vector to the temporary
// vector that we hold for the purpose of data exchange
- parallel::distributed::Vector<Number> &ghosted_vector =
+ LinearAlgebra::distributed::Vector<Number> &ghosted_vector =
ghosted_level_vector[level];
ghosted_vector = src[level];
ghosted_vector.update_ghost_values();
template <typename Number>
void
-MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::
set_component_to_block_map (const std::vector<unsigned int> &map)
{
component_to_block_map = map;
#include <deal.II/base/config.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/multigrid/mg_base.h>
#include <deal.II/multigrid/mg_constrained_dofs.h>
#include <deal.II/base/mg_level_object.h>
* @date 2016
*/
template <int dim, typename Number>
-class MGTransferMatrixFree : public MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >
+class MGTransferMatrixFree : public MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >
{
public:
/**
* finer level.
*/
virtual void prolongate (const unsigned int to_level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const;
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const;
/**
* Restrict a vector from level <tt>from_level</tt> to level
* coarser level.
*/
virtual void restrict_and_add (const unsigned int from_level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const;
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const;
/**
* Finite element does not provide prolongation matrices.
*/
template <int degree>
void do_prolongate_add(const unsigned int to_level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const;
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const;
/**
* Performs templated restriction operation
*/
template <int degree>
void do_restrict_add(const unsigned int from_level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const;
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const;
};
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/work_stream.h>
#include <deal.II/lac/vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/base/qprojector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/exceptions.h>
#include <deal.II/lac/vector_memory.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/base/multithread_info.h>
#include <iostream>
#ifdef DEAL_II_WITH_MPI
// Start with the deal.II MPI vectors (need to do this before finalizing
// PETSc because it finalizes MPI). Delete vectors from the pools:
- GrowingVectorMemory<parallel::distributed::Vector<double> >
+ GrowingVectorMemory<LinearAlgebra::distributed::Vector<double> >
::release_unused_memory ();
- GrowingVectorMemory<parallel::distributed::BlockVector<double> >
+ GrowingVectorMemory<LinearAlgebra::distributed::BlockVector<double> >
::release_unused_memory ();
- GrowingVectorMemory<parallel::distributed::Vector<float> >
+ GrowingVectorMemory<LinearAlgebra::distributed::Vector<float> >
::release_unused_memory ();
- GrowingVectorMemory<parallel::distributed::BlockVector<float> >
+ GrowingVectorMemory<LinearAlgebra::distributed::BlockVector<float> >
::release_unused_memory ();
// Next with Trilinos:
#include <deal.II/base/time_stepping.templates.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_parallel_vector.h>
for (S : REAL_SCALARS; V : DEAL_II_VEC_TEMPLATES)
{
- template class RungeKutta<parallel::distributed::V<S> >;
- template class ExplicitRungeKutta<parallel::distributed::V<S> >;
- template class ImplicitRungeKutta<parallel::distributed::V<S> >;
- template class EmbeddedExplicitRungeKutta<parallel::distributed::V<S> >;
+ template class RungeKutta<LinearAlgebra::distributed::V<S> >;
+ template class ExplicitRungeKutta<LinearAlgebra::distributed::V<S> >;
+ template class ImplicitRungeKutta<LinearAlgebra::distributed::V<S> >;
+ template class EmbeddedExplicitRungeKutta<LinearAlgebra::distributed::V<S> >;
}
for (V : EXTERNAL_SEQUENTIAL_VECTORS)
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#if deal_II_dimension > 1
#if deal_II_dimension <= deal_II_space_dimension
template class SolutionTransfer<deal_II_dimension,::dealii::Vector<double>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
- template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::Vector<double>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
- template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::Vector<float>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
- template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::BlockVector<double>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
- template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::BlockVector<float>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
+ template class SolutionTransfer<deal_II_dimension,::dealii::LinearAlgebra::distributed::Vector<double>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
+ template class SolutionTransfer<deal_II_dimension,::dealii::LinearAlgebra::distributed::Vector<float>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
+ template class SolutionTransfer<deal_II_dimension,::dealii::LinearAlgebra::distributed::BlockVector<double>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
+ template class SolutionTransfer<deal_II_dimension,::dealii::LinearAlgebra::distributed::BlockVector<float>, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
#ifdef DEAL_II_WITH_PETSC
template class SolutionTransfer<deal_II_dimension, TrilinosWrappers::MPI::BlockVector, DoFHandler<deal_II_dimension,deal_II_space_dimension> >;
#endif
-
+
#endif
#endif
\}
\}
}
-
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/dofs/dof_tools.h>
#ifdef DEAL_II_WITH_MPI
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#endif
#include <algorithm>
unsigned int dofs_per_cell;
std::vector<types::global_dof_index> parameter_dof_indices;
#ifdef DEAL_II_WITH_MPI
- std::vector<dealii::parallel::distributed::Vector<double> > global_parameter_representation;
+ std::vector<dealii::LinearAlgebra::distributed::Vector<double> > global_parameter_representation;
#else
std::vector<dealii::Vector<double> > global_parameter_representation;
#endif
#include <deal.II/base/utilities.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_parallel_block_vector.h>
}
#endif
- // special version for parallel::distributed::Vector
+ // special version for LinearAlgebra::distributed::Vector
template <int dim, int spacedim, typename Number>
void back_interpolate (const DoFHandler<dim,spacedim> &dof1,
const ConstraintMatrix &constraints1,
- const parallel::distributed::Vector<Number> &u1,
+ const LinearAlgebra::distributed::Vector<Number> &u1,
const DoFHandler<dim,spacedim> &dof2,
const ConstraintMatrix &constraints2,
- parallel::distributed::Vector<Number> &u1_interpolated)
+ LinearAlgebra::distributed::Vector<Number> &u1_interpolated)
{
IndexSet dof2_locally_owned_dofs = dof2.locally_owned_dofs();
IndexSet dof2_locally_relevant_dofs;
DoFTools::extract_locally_relevant_dofs (dof2,
dof2_locally_relevant_dofs);
- parallel::distributed::Vector<Number>
+ LinearAlgebra::distributed::Vector<Number>
u2 (dof2_locally_owned_dofs,
dof2_locally_relevant_dofs,
u1.get_mpi_communicator());
#include <deal.II/base/std_cxx11/unique_ptr.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/sparse_matrix_ez.h>
#include <deal.II/lac/chunk_sparse_matrix.h>
#include <deal.II/lac/block_sparse_matrix_ez.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_sparse_matrix.h>
template void ConstraintMatrix::condense<T<S> >(const T<S> &, T<S> &) const;
template void ConstraintMatrix::condense<T<S> >(T<S> &vec) const;
template void ConstraintMatrix::distribute_local_to_global<T<S> > (
- const Vector<S>&, const std::vector<types::global_dof_index> &, T<S> &, const FullMatrix<S>&) const;
+ const Vector<S>&, const std::vector<types::global_dof_index> &, T<S> &, const FullMatrix<S>&) const;
template void ConstraintMatrix::distribute_local_to_global<T<S> > (
const Vector<S>&, const std::vector<types::global_dof_index> &, const std::vector<types::global_dof_index> &, T<S> &, const FullMatrix<S>&, bool) const;
template void ConstraintMatrix::set_zero<T<S> >(T<S> &) const;
for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES)
{
- template void ConstraintMatrix::condense<parallel::distributed::T<S> >(const parallel::distributed::T<S> &, parallel::distributed::T<S> &) const;
- template void ConstraintMatrix::condense<parallel::distributed::T<S> >(parallel::distributed::T<S> &vec) const;
- template void ConstraintMatrix::distribute_local_to_global<parallel::distributed::T<S> > (
- const Vector<S>&, const std::vector<types::global_dof_index> &, parallel::distributed::T<S> &, const FullMatrix<S>&) const;
- template void ConstraintMatrix::distribute_local_to_global<parallel::distributed::T<S> > (
- const Vector<S>&, const std::vector<types::global_dof_index> &, const std::vector<types::global_dof_index> &, parallel::distributed::T<S> &, const FullMatrix<S>&, bool) const;
- template void ConstraintMatrix::set_zero<parallel::distributed::T<S> >(parallel::distributed::T<S> &) const;
+ template void ConstraintMatrix::condense<LinearAlgebra::distributed::T<S> >(const LinearAlgebra::distributed::T<S> &, LinearAlgebra::distributed::T<S> &) const;
+ template void ConstraintMatrix::condense<LinearAlgebra::distributed::T<S> >(LinearAlgebra::distributed::T<S> &vec) const;
+ template void ConstraintMatrix::distribute_local_to_global<LinearAlgebra::distributed::T<S> > (
+ const Vector<S>&, const std::vector<types::global_dof_index> &, LinearAlgebra::distributed::T<S> &, const FullMatrix<S>&) const;
+ template void ConstraintMatrix::distribute_local_to_global<LinearAlgebra::distributed::T<S> > (
+ const Vector<S>&, const std::vector<types::global_dof_index> &, const std::vector<types::global_dof_index> &, LinearAlgebra::distributed::T<S> &, const FullMatrix<S>&, bool) const;
+ template void ConstraintMatrix::set_zero<LinearAlgebra::distributed::T<S> >(LinearAlgebra::distributed::T<S> &) const;
}
template void ConstraintMatrix::condense<S1,Vector<S2> >(SparseMatrix<S1>&, Vector<S2>&) const;
template void ConstraintMatrix::condense<S1,BlockVector<S2> >(BlockSparseMatrix<S1>&, BlockVector<S2>&) const;
}
-
+
for (S1 : COMPLEX_SCALARS)
{
template void ConstraintMatrix::condense<S1,Vector<S1> >(SparseMatrix<S1>&, Vector<S1>&) const;
{
template void ConstraintMatrix::distribute<Vec>(Vec &) const;
}
-
+
for (S: COMPLEX_SCALARS; T : DEAL_II_VEC_TEMPLATES)
{
template void ConstraintMatrix::distribute<T<S> >(T<S> &) const;
}
-
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/trilinos_block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
DEAL_II_NAMESPACE_OPEN
#include <deal.II/lac/sparse_matrix.templates.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
DEAL_II_NAMESPACE_OPEN
for (S1 : REAL_SCALARS)
{
template void SparseMatrix<S1>::
- vmult (parallel::distributed::Vector<S1> &, const parallel::distributed::Vector<S1> &) const;
+ vmult (LinearAlgebra::distributed::Vector<S1> &, const LinearAlgebra::distributed::Vector<S1> &) const;
template void SparseMatrix<S1>::
- Tvmult (parallel::distributed::Vector<S1> &, const parallel::distributed::Vector<S1> &) const;
+ Tvmult (LinearAlgebra::distributed::Vector<S1> &, const LinearAlgebra::distributed::Vector<S1> &) const;
template void SparseMatrix<S1>::
- vmult_add (parallel::distributed::Vector<S1> &, const parallel::distributed::Vector<S1> &) const;
+ vmult_add (LinearAlgebra::distributed::Vector<S1> &, const LinearAlgebra::distributed::Vector<S1> &) const;
template void SparseMatrix<S1>::
- Tvmult_add (parallel::distributed::Vector<S1> &, const parallel::distributed::Vector<S1> &) const;
+ Tvmult_add (LinearAlgebra::distributed::Vector<S1> &, const LinearAlgebra::distributed::Vector<S1> &) const;
}
for (S1, S2, S3: REAL_SCALARS)
#include <deal.II/lac/sparse_matrix.templates.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
DEAL_II_NAMESPACE_OPEN
}
void
- PreconditionIdentity::vmult(parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ PreconditionIdentity::vmult(LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
dst = src;
}
void
- PreconditionIdentity::Tvmult(parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ PreconditionIdentity::Tvmult(LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
dst = src;
}
void
SolverBase::solve (const SparseMatrix &A,
- dealii::parallel::distributed::Vector<double> &x,
- const dealii::parallel::distributed::Vector<double> &b,
+ dealii::LinearAlgebra::distributed::Vector<double> &x,
+ const dealii::LinearAlgebra::distributed::Vector<double> &b,
const PreconditionBase &preconditioner)
{
linear_problem.reset();
void
SolverBase::solve (Epetra_Operator &A,
- dealii::parallel::distributed::Vector<double> &x,
- const dealii::parallel::distributed::Vector<double> &b,
+ dealii::LinearAlgebra::distributed::Vector<double> &x,
+ const dealii::LinearAlgebra::distributed::Vector<double> &b,
const PreconditionBase &preconditioner)
{
linear_problem.reset();
void
SolverDirect::solve (const SparseMatrix &A,
- dealii::parallel::distributed::Vector<double> &x,
- const dealii::parallel::distributed::Vector<double> &b)
+ dealii::LinearAlgebra::distributed::Vector<double> &x,
+ const dealii::LinearAlgebra::distributed::Vector<double> &b)
{
AssertDimension (static_cast<TrilinosWrappers::types::int_type>(x.local_size()),
A.domain_partitioner().NumMyElements());
# include <deal.II/lac/sparsity_pattern.h>
# include <deal.II/lac/dynamic_sparsity_pattern.h>
# include <deal.II/lac/sparsity_tools.h>
-# include <deal.II/lac/parallel_vector.h>
+# include <deal.II/lac/la_parallel_vector.h>
DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
# include <Epetra_Export.h>
SparseMatrix::vmult (dealii::Vector<double> &,
const dealii::Vector<double> &) const;
template void
- SparseMatrix::vmult (dealii::parallel::distributed::Vector<double> &,
- const dealii::parallel::distributed::Vector<double> &) const;
+ SparseMatrix::vmult (dealii::LinearAlgebra::distributed::Vector<double> &,
+ const dealii::LinearAlgebra::distributed::Vector<double> &) const;
template void
SparseMatrix::Tvmult (VectorBase &,
const VectorBase &) const;
SparseMatrix::Tvmult (dealii::Vector<double> &,
const dealii::Vector<double> &) const;
template void
- SparseMatrix::Tvmult (dealii::parallel::distributed::Vector<double> &,
- const dealii::parallel::distributed::Vector<double> &) const;
+ SparseMatrix::Tvmult (dealii::LinearAlgebra::distributed::Vector<double> &,
+ const dealii::LinearAlgebra::distributed::Vector<double> &) const;
template void
SparseMatrix::vmult_add (VectorBase &,
const VectorBase &) const;
SparseMatrix::vmult_add (dealii::Vector<double> &,
const dealii::Vector<double> &) const;
template void
- SparseMatrix::vmult_add (dealii::parallel::distributed::Vector<double> &,
- const dealii::parallel::distributed::Vector<double> &) const;
+ SparseMatrix::vmult_add (dealii::LinearAlgebra::distributed::Vector<double> &,
+ const dealii::LinearAlgebra::distributed::Vector<double> &) const;
template void
SparseMatrix::Tvmult_add (VectorBase &,
const VectorBase &) const;
SparseMatrix::Tvmult_add (dealii::Vector<double> &,
const dealii::Vector<double> &) const;
template void
- SparseMatrix::Tvmult_add (dealii::parallel::distributed::Vector<double> &,
- const dealii::parallel::distributed::Vector<double> &) const;
+ SparseMatrix::Tvmult_add (dealii::LinearAlgebra::distributed::Vector<double> &,
+ const dealii::LinearAlgebra::distributed::Vector<double> &) const;
}
DEAL_II_NAMESPACE_CLOSE
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
template <typename Number>
template <int dim, int spacedim>
void
-MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::fill_and_communicate_copy_indices
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::fill_and_communicate_copy_indices
(const DoFHandler<dim,spacedim> &mg_dof)
{
// first go to the usual routine...
template <typename Number>
void
-MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::clear()
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::clear()
{
sizes.resize(0);
copy_indices.clear();
template <typename Number>
void
-MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::print_indices (std::ostream &os) const
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::print_indices (std::ostream &os) const
{
for (unsigned int level = 0; level<copy_indices.size(); ++level)
{
template <typename Number>
std::size_t
-MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::memory_consumption () const
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::memory_consumption () const
{
std::size_t result = sizeof(*this);
result += MemoryConsumption::memory_consumption(sizes);
// create an additional instantiation currently not supported by the automatic
// template instantiation scheme
-template class MGLevelGlobalTransfer<parallel::distributed::Vector<float> >;
+template class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<float> >;
DEAL_II_NAMESPACE_CLOSE
for (deal_II_dimension : DIMENSIONS)
{
template
- void MGLevelGlobalTransfer< parallel::distributed::Vector<float> >::fill_and_communicate_copy_indices<deal_II_dimension,deal_II_dimension>(
+ void MGLevelGlobalTransfer< LinearAlgebra::distributed::Vector<float> >::fill_and_communicate_copy_indices<deal_II_dimension,deal_II_dimension>(
const DoFHandler<deal_II_dimension,deal_II_dimension> &mg_dof);
}
for (deal_II_dimension : DIMENSIONS; S1, S2 : REAL_SCALARS)
{
template void
- MGLevelGlobalTransfer<parallel::distributed::Vector<S1> >::copy_to_mg (
- const DoFHandler<deal_II_dimension>&, MGLevelObject<parallel::distributed::Vector<S1> >&, const parallel::distributed::Vector<S2>&) const;
+ MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<S1> >::copy_to_mg (
+ const DoFHandler<deal_II_dimension>&, MGLevelObject<LinearAlgebra::distributed::Vector<S1> >&, const LinearAlgebra::distributed::Vector<S2>&) const;
template void
- MGLevelGlobalTransfer<parallel::distributed::Vector<S1> >::copy_from_mg (const DoFHandler<deal_II_dimension>&, parallel::distributed::Vector<S2>&,
- const MGLevelObject<parallel::distributed::Vector<S1> >&) const;
+ MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<S1> >::copy_from_mg (const DoFHandler<deal_II_dimension>&, LinearAlgebra::distributed::Vector<S2>&,
+ const MGLevelObject<LinearAlgebra::distributed::Vector<S1> >&) const;
template void
- MGLevelGlobalTransfer<parallel::distributed::Vector<S1> >::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, parallel::distributed::Vector<S2>&,
- const MGLevelObject<parallel::distributed::Vector<S1> >&) const;
+ MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<S1> >::copy_from_mg_add (const DoFHandler<deal_II_dimension>&, LinearAlgebra::distributed::Vector<S2>&,
+ const MGLevelObject<LinearAlgebra::distributed::Vector<S1> >&) const;
}
for(deal_II_dimension : DIMENSIONS)
#include <deal.II/base/logstream.h>
#include <deal.II/base/function.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/grid/tria.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/dofs/dof_tools.h>
template <int dim, typename Number>
void MGTransferMatrixFree<dim,Number>::clear ()
{
- this->MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::clear();
+ this->MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::clear();
fe_degree = 0;
element_is_continuous = false;
n_components = 0;
reinit_ghosted_vector(const IndexSet &locally_owned,
std::vector<types::global_dof_index> &ghosted_level_dofs,
const MPI_Comm &communicator,
- parallel::distributed::Vector<Number> &ghosted_level_vector,
+ LinearAlgebra::distributed::Vector<Number> &ghosted_level_vector,
std::vector<std::pair<unsigned int,unsigned int> > ©_indices_global_mine)
{
std::sort(ghosted_level_dofs.begin(), ghosted_level_dofs.end());
template <int dim, typename Number>
void MGTransferMatrixFree<dim,Number>
::prolongate (const unsigned int to_level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const
{
Assert ((to_level >= 1) && (to_level<=level_dof_indices.size()),
ExcIndexRange (to_level, 1, level_dof_indices.size()+1));
template <int dim, typename Number>
void MGTransferMatrixFree<dim,Number>
::restrict_and_add (const unsigned int from_level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const
{
Assert ((from_level >= 1) && (from_level<=level_dof_indices.size()),
ExcIndexRange (from_level, 1, level_dof_indices.size()+1));
template <int degree>
void MGTransferMatrixFree<dim,Number>
::do_prolongate_add (const unsigned int to_level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const
{
const unsigned int vec_size = VectorizedArray<Number>::n_array_elements;
const unsigned int n_child_dofs_1d = 2*(fe_degree+1) - element_is_continuous;
template <int degree>
void MGTransferMatrixFree<dim,Number>
::do_restrict_add (const unsigned int from_level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const
{
const unsigned int vec_size = VectorizedArray<Number>::n_array_elements;
const unsigned int n_child_dofs_1d = 2*(fe_degree+1) - element_is_continuous;
std::size_t
MGTransferMatrixFree<dim,Number>::memory_consumption() const
{
- std::size_t memory = MGLevelGlobalTransfer<parallel::distributed::Vector<Number> >::memory_consumption();
+ std::size_t memory = MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::memory_consumption();
memory += MemoryConsumption::memory_consumption(level_dof_indices);
memory += MemoryConsumption::memory_consumption(parent_child_connect);
memory += MemoryConsumption::memory_consumption(n_owned_level_cells);
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/base/memory_consumption.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/base/work_stream.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/work_stream.h>
#include <deal.II/lac/vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/fe/fe.h>
#include <deal.II/lac/vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/block_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_block_vector.h>
#include <deal.II/lac/trilinos_block_vector.h>
#include <deal.II/numerics/solution_transfer.h>
#include "../tests.h"
#include "fe_tools_common.h"
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/sparsity_pattern.h>
// check
cm1.close ();
cm2.close ();
- parallel::distributed::Vector<double> in (dof1->n_dofs());
+ LinearAlgebra::distributed::Vector<double> in (dof1->n_dofs());
for (unsigned int i=0; i<in.size(); ++i) in(i) = i;
- parallel::distributed::Vector<double> out (dof1->n_dofs());
+ LinearAlgebra::distributed::Vector<double> out (dof1->n_dofs());
FETools::back_interpolate (*dof1, cm1, in, *dof2, cm2, out);
output_vector (out);
#include "../tests.h"
#include <deal.II/lac/generic_linear_algebra.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <fstream>
#include <iostream>
#include <iomanip>
{
deallog.push("deal.II");
- parallel::distributed::Vector<double> w(local, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> w(local, MPI_COMM_WORLD);
set (w);
- parallel::distributed::Vector<double> v(local, dense_local, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local, dense_local, MPI_COMM_WORLD);
v = w; // get copy of vector including ghost elements
test (v);
deallog.pop();
{
deallog.push("deal.II");
- parallel::distributed::BlockVector<double> w(partitioning, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::BlockVector<double> w(partitioning, MPI_COMM_WORLD);
set (w);
- parallel::distributed::BlockVector<double> v(partitioning, dense_partitioning, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::BlockVector<double> v(partitioning, dense_partitioning, MPI_COMM_WORLD);
v = w; // get copy of vector including ghost elements
test (v);
deallog.pop();
// ---------------------------------------------------------------------
-// check that parallel::distributed::Vector::reinit does not carry over any
+// check that LinearAlgebra::distributed::Vector::reinit does not carry over any
// state that can lead to invalid memory access. In this test, the MPI
// communicator is deleted.
#include "../tests.h"
#include <deal.II/base/mpi.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/vector_memory.h>
template <typename VectorType>
initlog();
- do_test<parallel::distributed::Vector<double> >();
+ do_test<LinearAlgebra::distributed::Vector<double> >();
}
mf_data.reinit (dof, constraints, quad, data);
}
- MatrixFreeTest<dim,fe_degree,number,parallel::distributed::Vector<number> > mf (mf_data);
- parallel::distributed::Vector<number> in, out, ref;
+ MatrixFreeTest<dim,fe_degree,number,LinearAlgebra::distributed::Vector<number> > mf (mf_data);
+ LinearAlgebra::distributed::Vector<number> in, out, ref;
mf_data.initialize_dof_vector (in);
out.reinit (in);
ref.reinit (in);
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/function.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_boundary_lib.h>
mf_data.reinit (dof, constraints, quad, data);
}
- MatrixFreeTest<dim,fe_degree,number,parallel::distributed::Vector<number> > mf (mf_data);
- parallel::distributed::Vector<number> in, out, ref;
+ MatrixFreeTest<dim,fe_degree,number,LinearAlgebra::distributed::Vector<number> > mf (mf_data);
+ LinearAlgebra::distributed::Vector<number> in, out, ref;
mf_data.initialize_dof_vector (in);
out.reinit (in);
ref.reinit (in);
data.tasks_block_size = 3;
mf_data.reinit (dof, constraints, quad, data);
- MatrixFreeTest<dim, fe_degree, number,parallel::distributed::Vector<number> > mf (mf_data);
+ MatrixFreeTest<dim, fe_degree, number,LinearAlgebra::distributed::Vector<number> > mf (mf_data);
deallog << "Norm of difference:";
// run 10 times to make a possible error more
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/function.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_boundary_lib.h>
template <int dim, int fe_degree, typename Number>
void
helmholtz_operator (const MatrixFree<dim,Number> &data,
- std::vector<parallel::distributed::Vector<Number> > &dst,
- const std::vector<parallel::distributed::Vector<Number> > &src,
+ std::vector<LinearAlgebra::distributed::Vector<Number> > &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<Number> > &src,
const std::pair<unsigned int,unsigned int> &cell_range)
{
FEEvaluation<dim,fe_degree,fe_degree+1,2,Number> fe_eval (data);
data (data_in)
{};
- void vmult (std::vector<parallel::distributed::Vector<Number> > &dst,
- const std::vector<parallel::distributed::Vector<Number> > &src) const
+ void vmult (std::vector<LinearAlgebra::distributed::Vector<Number> > &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<Number> > &src) const
{
for (unsigned int i=0; i<dst.size(); ++i)
dst[i] = 0;
const std_cxx11::function<void(const MatrixFree<dim,Number> &,
- std::vector<parallel::distributed::Vector<Number> > &,
- const std::vector<parallel::distributed::Vector<Number> > &,
+ std::vector<LinearAlgebra::distributed::Vector<Number> > &,
+ const std::vector<LinearAlgebra::distributed::Vector<Number> > &,
const std::pair<unsigned int,unsigned int> &)>
wrap = helmholtz_operator<dim,fe_degree,Number>;
data.cell_loop (wrap, dst, src);
}
MatrixFreeTest<dim,fe_degree,number> mf (mf_data);
- parallel::distributed::Vector<number> ref;
- std::vector<parallel::distributed::Vector<number> > in(2), out(2);
+ LinearAlgebra::distributed::Vector<number> ref;
+ std::vector<LinearAlgebra::distributed::Vector<number> > in(2), out(2);
for (unsigned int i=0; i<2; ++i)
{
mf_data.initialize_dof_vector (in[i]);
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/function.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_boundary_lib.h>
template <int dim, int fe_degree, typename Number>
void
helmholtz_operator (const MatrixFree<dim,Number> &data,
- parallel::distributed::BlockVector<Number> &dst,
- const parallel::distributed::BlockVector<Number> &src,
+ LinearAlgebra::distributed::BlockVector<Number> &dst,
+ const LinearAlgebra::distributed::BlockVector<Number> &src,
const std::pair<unsigned int,unsigned int> &cell_range)
{
FEEvaluation<dim,fe_degree,fe_degree+1,2,Number> fe_eval (data);
data (data_in)
{};
- void vmult (parallel::distributed::BlockVector<Number> &dst,
- const parallel::distributed::BlockVector<Number> &src) const
+ void vmult (LinearAlgebra::distributed::BlockVector<Number> &dst,
+ const LinearAlgebra::distributed::BlockVector<Number> &src) const
{
dst = 0;
const std_cxx11::function<void(const MatrixFree<dim,Number> &,
- parallel::distributed::BlockVector<Number> &,
- const parallel::distributed::BlockVector<Number> &,
+ LinearAlgebra::distributed::BlockVector<Number> &,
+ const LinearAlgebra::distributed::BlockVector<Number> &,
const std::pair<unsigned int,unsigned int> &)>
wrap = helmholtz_operator<dim,fe_degree,Number>;
data.cell_loop (wrap, dst, src);
}
MatrixFreeTest<dim,fe_degree,number> mf (mf_data);
- parallel::distributed::Vector<number> ref;
- parallel::distributed::BlockVector<number> in(2), out(2);
+ LinearAlgebra::distributed::Vector<number> ref;
+ LinearAlgebra::distributed::BlockVector<number> in(2), out(2);
for (unsigned int i=0; i<2; ++i)
{
mf_data.initialize_dof_vector (in.block(i));
#include <deal.II/matrix_free/fe_evaluation.h>
#include <deal.II/lac/vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
template <int dim, int fe_degree, typename VectorType>
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/grid/tria.h>
compute_inverse_diagonal();
}
- void vmult(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
dst = 0;
vmult_add(dst, src);
}
- void Tvmult(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void Tvmult(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
dst = 0;
vmult_add(dst, src);
}
- void Tvmult_add(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void Tvmult_add(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
vmult_add(dst, src);
}
- void vmult_add(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult_add(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
data.cell_loop (&LaplaceOperator::local_apply,
this, dst, src);
}
void
- initialize_dof_vector(parallel::distributed::Vector<number> &vector) const
+ initialize_dof_vector(LinearAlgebra::distributed::Vector<number> &vector) const
{
if (!vector.partitioners_are_compatible(*data.get_dof_info(0).vector_partitioner))
data.initialize_dof_vector(vector);
ExcInternalError());
}
- const parallel::distributed::Vector<number> &
+ const LinearAlgebra::distributed::Vector<number> &
get_matrix_diagonal_inverse() const
{
Assert(inverse_diagonal_entries.size() > 0, ExcNotInitialized());
private:
void
local_apply (const MatrixFree<dim,number> &data,
- parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src,
+ LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
FEEvaluation<dim,fe_degree,n_q_points_1d,1,number> phi (data);
void
local_diagonal_cell (const MatrixFree<dim,number> &data,
- parallel::distributed::Vector<number> &dst,
+ LinearAlgebra::distributed::Vector<number> &dst,
const unsigned int &,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
}
MatrixFree<dim,number> data;
- parallel::distributed::Vector<number> inverse_diagonal_entries;
+ LinearAlgebra::distributed::Vector<number> inverse_diagonal_entries;
};
template <typename MatrixType>
-class MGTransferPrebuiltMF : public MGTransferPrebuilt<parallel::distributed::Vector<double> >
+class MGTransferPrebuiltMF : public MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >
{
public:
MGTransferPrebuiltMF(const MGLevelObject<MatrixType> &laplace)
template <int dim, class InVector, int spacedim>
void
copy_to_mg (const DoFHandler<dim,spacedim> &mg_dof,
- MGLevelObject<parallel::distributed::Vector<double> > &dst,
+ MGLevelObject<LinearAlgebra::distributed::Vector<double> > &dst,
const InVector &src) const
{
for (unsigned int level=dst.min_level();
level<=dst.max_level(); ++level)
laplace_operator[level].initialize_dof_vector(dst[level]);
- MGTransferPrebuilt<parallel::distributed::Vector<double> >::copy_to_mg(mg_dof, dst, src);
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >::copy_to_mg(mg_dof, dst, src);
}
private:
template<typename MatrixType, typename Number>
-class MGCoarseIterative : public MGCoarseGridBase<parallel::distributed::Vector<Number> >
+class MGCoarseIterative : public MGCoarseGridBase<LinearAlgebra::distributed::Vector<Number> >
{
public:
MGCoarseIterative() {}
}
virtual void operator() (const unsigned int level,
- parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
ReductionControl solver_control (1e4, 1e-50, 1e-10);
- SolverCG<parallel::distributed::Vector<double> > solver_coarse (solver_control);
+ SolverCG<LinearAlgebra::distributed::Vector<double> > solver_coarse (solver_control);
solver_coarse.solve (*coarse_matrix, dst, src, PreconditionIdentity());
}
dirichlet_boundaries.insert(0);
fine_matrix.initialize(mapping, dof, dirichlet_boundaries);
- parallel::distributed::Vector<number> in, sol;
+ LinearAlgebra::distributed::Vector<number> in, sol;
fine_matrix.initialize_dof_vector(in);
fine_matrix.initialize_dof_vector(sol);
MGCoarseIterative<LevelMatrixType,number> mg_coarse;
mg_coarse.initialize(mg_matrices[0]);
- typedef PreconditionChebyshev<LevelMatrixType,parallel::distributed::Vector<number> > SMOOTHER;
- MGSmootherPrecondition<LevelMatrixType, SMOOTHER, parallel::distributed::Vector<number> >
+ typedef PreconditionChebyshev<LevelMatrixType,LinearAlgebra::distributed::Vector<number> > SMOOTHER;
+ MGSmootherPrecondition<LevelMatrixType, SMOOTHER, LinearAlgebra::distributed::Vector<number> >
mg_smoother;
MGLevelObject<typename SMOOTHER::AdditionalData> smoother_data;
}
mg_smoother.initialize(mg_matrices, smoother_data);
- mg::Matrix<parallel::distributed::Vector<double> >
+ mg::Matrix<LinearAlgebra::distributed::Vector<double> >
mg_matrix(mg_matrices);
- Multigrid<parallel::distributed::Vector<double> > mg(dof,
- mg_matrix,
- mg_coarse,
- mg_transfer,
- mg_smoother,
- mg_smoother);
- PreconditionMG<dim, parallel::distributed::Vector<double>,
+ Multigrid<LinearAlgebra::distributed::Vector<double> > mg(dof,
+ mg_matrix,
+ mg_coarse,
+ mg_transfer,
+ mg_smoother,
+ mg_smoother);
+ PreconditionMG<dim, LinearAlgebra::distributed::Vector<double>,
MGTransferPrebuiltMF<LevelMatrixType> >
preconditioner(dof, mg, mg_transfer);
{
ReductionControl control(30, 1e-20, 1e-7);
- SolverCG<parallel::distributed::Vector<double> > solver(control);
+ SolverCG<LinearAlgebra::distributed::Vector<double> > solver(control);
solver.solve(fine_matrix, sol, in, preconditioner);
}
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/grid/tria.h>
compute_inverse_diagonal();
}
- void vmult(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
dst = 0;
vmult_add(dst, src);
}
- void Tvmult(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void Tvmult(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
dst = 0;
vmult_add(dst, src);
}
- void Tvmult_add(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void Tvmult_add(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
vmult_add(dst, src);
}
- void vmult_add(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult_add(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
edge_constrained_values[i] =
std::pair<number,number>(src.local_element(edge_constrained_indices[i]),
dst.local_element(edge_constrained_indices[i]));
- const_cast<parallel::distributed::Vector<double>&>(src).local_element(edge_constrained_indices[i]) = 0.;
+ const_cast<LinearAlgebra::distributed::Vector<double>&>(src).local_element(edge_constrained_indices[i]) = 0.;
}
data.cell_loop (&LaplaceOperator::local_apply,
// destination
for (unsigned int i=0; i<edge_constrained_indices.size(); ++i)
{
- const_cast<parallel::distributed::Vector<double>&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first;
+ const_cast<LinearAlgebra::distributed::Vector<double>&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first;
dst.local_element(edge_constrained_indices[i]) = edge_constrained_values[i].second + edge_constrained_values[i].first;
}
}
- void vmult_interface_down(parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ void vmult_interface_down(LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
for (unsigned int i=0; i<edge_constrained_indices.size(); ++i)
{
const double src_val = src.local_element(edge_constrained_indices[i]);
- const_cast<parallel::distributed::Vector<double>&>(src).local_element(edge_constrained_indices[i]) = 0.;
+ const_cast<LinearAlgebra::distributed::Vector<double>&>(src).local_element(edge_constrained_indices[i]) = 0.;
edge_constrained_values[i] = std::pair<number,number>(src_val,
dst.local_element(edge_constrained_indices[i]));
}
++c;
// reset the src values
- const_cast<parallel::distributed::Vector<double>&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first;
+ const_cast<LinearAlgebra::distributed::Vector<double>&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first;
}
for ( ; c<dst.local_size(); ++c)
dst.local_element(c) = 0.;
}
- void vmult_interface_up(parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ void vmult_interface_up(LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
if (!have_interface_matrices)
return;
- parallel::distributed::Vector<double> src_cpy (src);
+ LinearAlgebra::distributed::Vector<double> src_cpy (src);
unsigned int c=0;
for (unsigned int i=0; i<edge_constrained_indices.size(); ++i)
{
}
void
- initialize_dof_vector(parallel::distributed::Vector<number> &vector) const
+ initialize_dof_vector(LinearAlgebra::distributed::Vector<number> &vector) const
{
if (!vector.partitioners_are_compatible(*data.get_dof_info(0).vector_partitioner))
data.initialize_dof_vector(vector);
ExcInternalError());
}
- const parallel::distributed::Vector<number> &
+ const LinearAlgebra::distributed::Vector<number> &
get_matrix_diagonal_inverse() const
{
Assert(inverse_diagonal_entries.size() > 0, ExcNotInitialized());
private:
void
local_apply (const MatrixFree<dim,number> &data,
- parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src,
+ LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
FEEvaluation<dim,fe_degree,n_q_points_1d,1,number> phi (data);
void
local_diagonal_cell (const MatrixFree<dim,number> &data,
- parallel::distributed::Vector<number> &dst,
+ LinearAlgebra::distributed::Vector<number> &dst,
const unsigned int &,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
}
MatrixFree<dim,number> data;
- parallel::distributed::Vector<number> inverse_diagonal_entries;
+ LinearAlgebra::distributed::Vector<number> inverse_diagonal_entries;
std::vector<unsigned int> edge_constrained_indices;
mutable std::vector<std::pair<number,number> > edge_constrained_values;
bool have_interface_matrices;
this->laplace = &laplace;
}
- void vmult (parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ void vmult (LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
laplace->vmult_interface_down(dst, src);
}
- void Tvmult (parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ void Tvmult (LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
laplace->vmult_interface_up(dst, src);
}
template <typename LAPLACEOPERATOR>
-class MGTransferMF : public MGTransferPrebuilt<parallel::distributed::Vector<double> >
+class MGTransferMF : public MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >
{
public:
MGTransferMF(const MGLevelObject<LAPLACEOPERATOR> &laplace,
const ConstraintMatrix &hanging_node_constraints,
const MGConstrainedDoFs &mg_constrained_dofs)
:
- MGTransferPrebuilt<parallel::distributed::Vector<double> >(hanging_node_constraints,
- mg_constrained_dofs),
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >(hanging_node_constraints,
+ mg_constrained_dofs),
laplace_operator (laplace)
{
}
template <int dim, class InVector, int spacedim>
void
copy_to_mg (const DoFHandler<dim,spacedim> &mg_dof_handler,
- MGLevelObject<parallel::distributed::Vector<double> > &dst,
+ MGLevelObject<LinearAlgebra::distributed::Vector<double> > &dst,
const InVector &src) const
{
for (unsigned int level=dst.min_level();
level<=dst.max_level(); ++level)
laplace_operator[level].initialize_dof_vector(dst[level]);
- MGTransferPrebuilt<parallel::distributed::Vector<double> >::
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >::
copy_to_mg(mg_dof_handler, dst, src);
}
template<typename MatrixType, typename Number>
-class MGCoarseIterative : public MGCoarseGridBase<parallel::distributed::Vector<Number> >
+class MGCoarseIterative : public MGCoarseGridBase<LinearAlgebra::distributed::Vector<Number> >
{
public:
MGCoarseIterative() {}
}
virtual void operator() (const unsigned int level,
- parallel::distributed::Vector<double> &dst,
- const parallel::distributed::Vector<double> &src) const
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const LinearAlgebra::distributed::Vector<double> &src) const
{
ReductionControl solver_control (1e4, 1e-50, 1e-10);
- SolverCG<parallel::distributed::Vector<double> > solver_coarse (solver_control);
+ SolverCG<LinearAlgebra::distributed::Vector<double> > solver_coarse (solver_control);
solver_coarse.solve (*coarse_matrix, dst, src, PreconditionIdentity());
}
fine_matrix.initialize(mapping, dof, mg_constrained_dofs, dirichlet_boundary,
numbers::invalid_unsigned_int);
- parallel::distributed::Vector<number> in, sol;
+ LinearAlgebra::distributed::Vector<number> in, sol;
fine_matrix.initialize_dof_vector(in);
fine_matrix.initialize_dof_vector(sol);
MGCoarseIterative<LevelMatrixType,number> mg_coarse;
mg_coarse.initialize(mg_matrices[0]);
- typedef PreconditionChebyshev<LevelMatrixType,parallel::distributed::Vector<number> > SMOOTHER;
- MGSmootherPrecondition<LevelMatrixType, SMOOTHER, parallel::distributed::Vector<number> >
+ typedef PreconditionChebyshev<LevelMatrixType,LinearAlgebra::distributed::Vector<number> > SMOOTHER;
+ MGSmootherPrecondition<LevelMatrixType, SMOOTHER, LinearAlgebra::distributed::Vector<number> >
mg_smoother;
MGLevelObject<typename SMOOTHER::AdditionalData> smoother_data;
}
mg_smoother.initialize(mg_matrices, smoother_data);
- mg::Matrix<parallel::distributed::Vector<double> >
+ mg::Matrix<LinearAlgebra::distributed::Vector<double> >
mg_matrix(mg_matrices);
- mg::Matrix<parallel::distributed::Vector<double> >
+ mg::Matrix<LinearAlgebra::distributed::Vector<double> >
mg_interface(mg_interface_matrices);
- Multigrid<parallel::distributed::Vector<double> > mg(dof,
- mg_matrix,
- mg_coarse,
- mg_transfer,
- mg_smoother,
- mg_smoother);
+ Multigrid<LinearAlgebra::distributed::Vector<double> > mg(dof,
+ mg_matrix,
+ mg_coarse,
+ mg_transfer,
+ mg_smoother,
+ mg_smoother);
mg.set_edge_matrices(mg_interface, mg_interface);
- PreconditionMG<dim, parallel::distributed::Vector<double>,
+ PreconditionMG<dim, LinearAlgebra::distributed::Vector<double>,
MGTransferMF<LevelMatrixType> >
preconditioner(dof, mg, mg_transfer);
{
ReductionControl control(30, 1e-20, 1e-7);
- SolverCG<parallel::distributed::Vector<double> > solver(control);
+ SolverCG<LinearAlgebra::distributed::Vector<double> > solver(control);
solver.solve(fine_matrix, sol, in, preconditioner);
}
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/grid/tria.h>
compute_inverse_diagonal();
}
- void vmult(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
dst = 0;
vmult_add(dst, src);
}
- void Tvmult(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void Tvmult(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
dst = 0;
vmult_add(dst, src);
}
- void Tvmult_add(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void Tvmult_add(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
vmult_add(dst, src);
}
- void vmult_add(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult_add(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
edge_constrained_values[i] =
std::pair<number,number>(src.local_element(edge_constrained_indices[i]),
dst.local_element(edge_constrained_indices[i]));
- const_cast<parallel::distributed::Vector<number>&>(src).local_element(edge_constrained_indices[i]) = 0.;
+ const_cast<LinearAlgebra::distributed::Vector<number>&>(src).local_element(edge_constrained_indices[i]) = 0.;
}
data.cell_loop (&LaplaceOperator::local_apply,
// destination
for (unsigned int i=0; i<edge_constrained_indices.size(); ++i)
{
- const_cast<parallel::distributed::Vector<number>&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first;
+ const_cast<LinearAlgebra::distributed::Vector<number>&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first;
dst.local_element(edge_constrained_indices[i]) = edge_constrained_values[i].second + edge_constrained_values[i].first;
}
}
- void vmult_interface_down(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult_interface_down(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
for (unsigned int i=0; i<edge_constrained_indices.size(); ++i)
{
const double src_val = src.local_element(edge_constrained_indices[i]);
- const_cast<parallel::distributed::Vector<number>&>(src).local_element(edge_constrained_indices[i]) = 0.;
+ const_cast<LinearAlgebra::distributed::Vector<number>&>(src).local_element(edge_constrained_indices[i]) = 0.;
edge_constrained_values[i] = std::pair<number,number>(src_val,
dst.local_element(edge_constrained_indices[i]));
}
++c;
// reset the src values
- const_cast<parallel::distributed::Vector<number>&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first;
+ const_cast<LinearAlgebra::distributed::Vector<number>&>(src).local_element(edge_constrained_indices[i]) = edge_constrained_values[i].first;
}
for ( ; c<dst.local_size(); ++c)
dst.local_element(c) = 0.;
}
- void vmult_interface_up(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult_interface_up(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
Assert(src.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
Assert(dst.partitioners_are_globally_compatible(*data.get_dof_info(0).vector_partitioner), ExcInternalError());
if (!have_interface_matrices)
return;
- parallel::distributed::Vector<number> src_cpy (src);
+ LinearAlgebra::distributed::Vector<number> src_cpy (src);
unsigned int c=0;
for (unsigned int i=0; i<edge_constrained_indices.size(); ++i)
{
}
void
- initialize_dof_vector(parallel::distributed::Vector<number> &vector) const
+ initialize_dof_vector(LinearAlgebra::distributed::Vector<number> &vector) const
{
if (!vector.partitioners_are_compatible(*data.get_dof_info(0).vector_partitioner))
data.initialize_dof_vector(vector);
ExcInternalError());
}
- const parallel::distributed::Vector<number> &
+ const LinearAlgebra::distributed::Vector<number> &
get_matrix_diagonal_inverse() const
{
Assert(inverse_diagonal_entries.size() > 0, ExcNotInitialized());
private:
void
local_apply (const MatrixFree<dim,number> &data,
- parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src,
+ LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
FEEvaluation<dim,fe_degree,n_q_points_1d,1,number> phi (data);
void
local_diagonal_cell (const MatrixFree<dim,number> &data,
- parallel::distributed::Vector<number> &dst,
+ LinearAlgebra::distributed::Vector<number> &dst,
const unsigned int &,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
}
MatrixFree<dim,number> data;
- parallel::distributed::Vector<number> inverse_diagonal_entries;
+ LinearAlgebra::distributed::Vector<number> inverse_diagonal_entries;
std::vector<unsigned int> edge_constrained_indices;
mutable std::vector<std::pair<number,number> > edge_constrained_values;
bool have_interface_matrices;
this->laplace = &laplace;
}
- void vmult (parallel::distributed::Vector<typename LAPLACEOPERATOR::value_type> &dst,
- const parallel::distributed::Vector<typename LAPLACEOPERATOR::value_type> &src) const
+ void vmult (LinearAlgebra::distributed::Vector<typename LAPLACEOPERATOR::value_type> &dst,
+ const LinearAlgebra::distributed::Vector<typename LAPLACEOPERATOR::value_type> &src) const
{
laplace->vmult_interface_down(dst, src);
}
- void Tvmult (parallel::distributed::Vector<typename LAPLACEOPERATOR::value_type> &dst,
- const parallel::distributed::Vector<typename LAPLACEOPERATOR::value_type> &src) const
+ void Tvmult (LinearAlgebra::distributed::Vector<typename LAPLACEOPERATOR::value_type> &dst,
+ const LinearAlgebra::distributed::Vector<typename LAPLACEOPERATOR::value_type> &src) const
{
laplace->vmult_interface_up(dst, src);
}
template <class InVector, int spacedim>
void
copy_to_mg (const DoFHandler<dim,spacedim> &mg_dof_handler,
- MGLevelObject<parallel::distributed::Vector<typename LAPLACEOPERATOR::value_type> > &dst,
+ MGLevelObject<LinearAlgebra::distributed::Vector<typename LAPLACEOPERATOR::value_type> > &dst,
const InVector &src) const
{
for (unsigned int level=dst.min_level();
level<=dst.max_level(); ++level)
laplace_operator[level].initialize_dof_vector(dst[level]);
- MGLevelGlobalTransfer<parallel::distributed::Vector<typename LAPLACEOPERATOR::value_type> >::
+ MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<typename LAPLACEOPERATOR::value_type> >::
copy_to_mg(mg_dof_handler, dst, src);
}
template<typename MatrixType, typename Number>
-class MGCoarseIterative : public MGCoarseGridBase<parallel::distributed::Vector<Number> >
+class MGCoarseIterative : public MGCoarseGridBase<LinearAlgebra::distributed::Vector<Number> >
{
public:
MGCoarseIterative() {}
}
virtual void operator() (const unsigned int level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const
{
ReductionControl solver_control (1e4, 1e-50, 1e-10);
- SolverCG<parallel::distributed::Vector<Number> > solver_coarse (solver_control);
+ SolverCG<LinearAlgebra::distributed::Vector<Number> > solver_coarse (solver_control);
solver_coarse.solve (*coarse_matrix, dst, src, PreconditionIdentity());
}
fine_matrix.initialize(mapping, dof, mg_constrained_dofs, dirichlet_boundary,
numbers::invalid_unsigned_int);
- parallel::distributed::Vector<double> in, sol;
+ LinearAlgebra::distributed::Vector<double> in, sol;
fine_matrix.initialize_dof_vector(in);
fine_matrix.initialize_dof_vector(sol);
MGCoarseIterative<LevelMatrixType,number> mg_coarse;
mg_coarse.initialize(mg_matrices[0]);
- typedef PreconditionChebyshev<LevelMatrixType,parallel::distributed::Vector<number> > SMOOTHER;
- MGSmootherPrecondition<LevelMatrixType, SMOOTHER, parallel::distributed::Vector<number> >
+ typedef PreconditionChebyshev<LevelMatrixType,LinearAlgebra::distributed::Vector<number> > SMOOTHER;
+ MGSmootherPrecondition<LevelMatrixType, SMOOTHER, LinearAlgebra::distributed::Vector<number> >
mg_smoother;
MGLevelObject<typename SMOOTHER::AdditionalData> smoother_data;
deallog.depth_file(0);
mg_smoother.initialize(mg_matrices, smoother_data);
- mg::Matrix<parallel::distributed::Vector<number> >
+ mg::Matrix<LinearAlgebra::distributed::Vector<number> >
mg_matrix(mg_matrices);
- mg::Matrix<parallel::distributed::Vector<number> >
+ mg::Matrix<LinearAlgebra::distributed::Vector<number> >
mg_interface(mg_interface_matrices);
- Multigrid<parallel::distributed::Vector<number> > mg(dof,
- mg_matrix,
- mg_coarse,
- mg_transfer,
- mg_smoother,
- mg_smoother);
+ Multigrid<LinearAlgebra::distributed::Vector<number> > mg(dof,
+ mg_matrix,
+ mg_coarse,
+ mg_transfer,
+ mg_smoother,
+ mg_smoother);
mg.set_edge_matrices(mg_interface, mg_interface);
- PreconditionMG<dim, parallel::distributed::Vector<number>,
+ PreconditionMG<dim, LinearAlgebra::distributed::Vector<number>,
MGTransferMF<dim,LevelMatrixType> >
preconditioner(dof, mg, mg_transfer);
// avoid output from inner (coarse-level) solver
deallog.depth_file(2);
ReductionControl control(30, 1e-20, 1e-7);
- SolverCG<parallel::distributed::Vector<double> > solver(control);
+ SolverCG<LinearAlgebra::distributed::Vector<double> > solver(control);
solver.solve(fine_matrix, sol, in, preconditioner);
}
}
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/solver_cg.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/grid/tria.h>
compute_inverse_diagonal();
}
- void vmult(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
dst = 0;
vmult_add(dst, src);
}
- void Tvmult(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void Tvmult(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
dst = 0;
vmult_add(dst, src);
}
- void Tvmult_add(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void Tvmult_add(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
vmult_add(dst, src);
}
- void vmult_add(parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src) const
+ void vmult_add(LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src) const
{
data.cell_loop (&LaplaceOperator::local_apply,
this, dst, src);
}
void
- initialize_dof_vector(parallel::distributed::Vector<number> &vector) const
+ initialize_dof_vector(LinearAlgebra::distributed::Vector<number> &vector) const
{
if (!vector.partitioners_are_compatible(*data.get_dof_info(0).vector_partitioner))
data.initialize_dof_vector(vector);
ExcInternalError());
}
- const parallel::distributed::Vector<number> &
+ const LinearAlgebra::distributed::Vector<number> &
get_matrix_diagonal_inverse() const
{
Assert(inverse_diagonal_entries.size() > 0, ExcNotInitialized());
private:
void
local_apply (const MatrixFree<dim,number> &data,
- parallel::distributed::Vector<number> &dst,
- const parallel::distributed::Vector<number> &src,
+ LinearAlgebra::distributed::Vector<number> &dst,
+ const LinearAlgebra::distributed::Vector<number> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
FEEvaluation<dim,fe_degree,n_q_points_1d,1,number> phi (data);
void
local_diagonal_cell (const MatrixFree<dim,number> &data,
- parallel::distributed::Vector<number> &dst,
+ LinearAlgebra::distributed::Vector<number> &dst,
const unsigned int &,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
}
MatrixFree<dim,number> data;
- parallel::distributed::Vector<number> inverse_diagonal_entries;
+ LinearAlgebra::distributed::Vector<number> inverse_diagonal_entries;
};
template <class InVector, int spacedim>
void
copy_to_mg (const DoFHandler<dim,spacedim> &mg_dof,
- MGLevelObject<parallel::distributed::Vector<typename MatrixType::value_type> > &dst,
+ MGLevelObject<LinearAlgebra::distributed::Vector<typename MatrixType::value_type> > &dst,
const InVector &src) const
{
for (unsigned int level=dst.min_level();
level<=dst.max_level(); ++level)
laplace_operator[level].initialize_dof_vector(dst[level]);
- MGLevelGlobalTransfer<parallel::distributed::Vector<typename MatrixType::value_type> >::copy_to_mg(mg_dof, dst, src);
+ MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<typename MatrixType::value_type> >::copy_to_mg(mg_dof, dst, src);
}
private:
template<typename MatrixType, typename Number>
-class MGCoarseIterative : public MGCoarseGridBase<parallel::distributed::Vector<Number> >
+class MGCoarseIterative : public MGCoarseGridBase<LinearAlgebra::distributed::Vector<Number> >
{
public:
MGCoarseIterative() {}
}
virtual void operator() (const unsigned int level,
- parallel::distributed::Vector<Number> &dst,
- const parallel::distributed::Vector<Number> &src) const
+ LinearAlgebra::distributed::Vector<Number> &dst,
+ const LinearAlgebra::distributed::Vector<Number> &src) const
{
ReductionControl solver_control (1e4, 1e-50, 1e-10);
- SolverCG<parallel::distributed::Vector<Number> > solver_coarse (solver_control);
+ SolverCG<LinearAlgebra::distributed::Vector<Number> > solver_coarse (solver_control);
solver_coarse.solve (*coarse_matrix, dst, src, PreconditionIdentity());
}
dirichlet_boundaries.insert(0);
fine_matrix.initialize(mapping, dof, dirichlet_boundaries);
- parallel::distributed::Vector<double> in, sol;
+ LinearAlgebra::distributed::Vector<double> in, sol;
fine_matrix.initialize_dof_vector(in);
fine_matrix.initialize_dof_vector(sol);
MGCoarseIterative<LevelMatrixType,number> mg_coarse;
mg_coarse.initialize(mg_matrices[0]);
- typedef PreconditionChebyshev<LevelMatrixType,parallel::distributed::Vector<number> > SMOOTHER;
- MGSmootherPrecondition<LevelMatrixType, SMOOTHER, parallel::distributed::Vector<number> >
+ typedef PreconditionChebyshev<LevelMatrixType,LinearAlgebra::distributed::Vector<number> > SMOOTHER;
+ MGSmootherPrecondition<LevelMatrixType, SMOOTHER, LinearAlgebra::distributed::Vector<number> >
mg_smoother;
MGLevelObject<typename SMOOTHER::AdditionalData> smoother_data;
deallog.depth_file(0);
mg_smoother.initialize(mg_matrices, smoother_data);
- mg::Matrix<parallel::distributed::Vector<number> >
+ mg::Matrix<LinearAlgebra::distributed::Vector<number> >
mg_matrix(mg_matrices);
- Multigrid<parallel::distributed::Vector<number> > mg(dof,
- mg_matrix,
- mg_coarse,
- mg_transfer,
- mg_smoother,
- mg_smoother);
- PreconditionMG<dim, parallel::distributed::Vector<number>,
+ Multigrid<LinearAlgebra::distributed::Vector<number> > mg(dof,
+ mg_matrix,
+ mg_coarse,
+ mg_transfer,
+ mg_smoother,
+ mg_smoother);
+ PreconditionMG<dim, LinearAlgebra::distributed::Vector<number>,
MGTransferPrebuiltMF<dim,LevelMatrixType> >
preconditioner(dof, mg, mg_transfer);
// avoid output from inner (coarse-level) solver
deallog.depth_file(2);
ReductionControl control(30, 1e-20, 1e-7);
- SolverCG<parallel::distributed::Vector<double> > solver(control);
+ SolverCG<LinearAlgebra::distributed::Vector<double> > solver(control);
solver.solve(fine_matrix, sol, in, preconditioner);
}
}
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/distributed/tria.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/matrix_free/matrix_free.h>
#include <deal.II/matrix_free/fe_evaluation.h>
SineGordonOperation(const MatrixFree<dim,double> &data_in,
const double time_step);
- void apply (parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src) const;
+ void apply (LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src) const;
private:
const MatrixFree<dim,double> &data;
const VectorizedArray<double> delta_t_sqr;
- parallel::distributed::Vector<double> inv_mass_matrix;
+ LinearAlgebra::distributed::Vector<double> inv_mass_matrix;
void local_apply (const MatrixFree<dim,double> &data,
- parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src,
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const;
};
template <int dim, int fe_degree>
void SineGordonOperation<dim, fe_degree>::
local_apply (const MatrixFree<dim> &data,
- parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src,
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
AssertDimension (src.size(), 2);
template <int dim, int fe_degree>
void SineGordonOperation<dim, fe_degree>::
- apply (parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src) const
+ apply (LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src) const
{
dst = 0;
data.cell_loop (&SineGordonOperation<dim,fe_degree>::local_apply,
MatrixFree<dim,double> matrix_free_data;
- parallel::distributed::Vector<double> solution, old_solution, old_old_solution;
+ LinearAlgebra::distributed::Vector<double> solution, old_solution, old_old_solution;
const unsigned int n_global_refinements;
double time, time_step;
old_solution);
output_results (0);
- std::vector<parallel::distributed::Vector<double>*> previous_solutions;
+ std::vector<LinearAlgebra::distributed::Vector<double>*> previous_solutions;
previous_solutions.push_back(&old_solution);
previous_solutions.push_back(&old_old_solution);
#include <deal.II/fe/fe_values.h>
#include <deal.II/numerics/vector_tools.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/matrix_free/matrix_free.h>
#include <deal.II/matrix_free/fe_evaluation.h>
SineGordonOperation(const MatrixFree<dim,double> &data_in,
const double time_step);
- void apply (parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src) const;
+ void apply (LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src) const;
private:
const MatrixFree<dim,double> &data;
const VectorizedArray<double> delta_t_sqr;
- parallel::distributed::Vector<double> inv_mass_matrix;
+ LinearAlgebra::distributed::Vector<double> inv_mass_matrix;
void local_apply (const MatrixFree<dim,double> &data,
- parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src,
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const;
};
template <int dim, int fe_degree>
void SineGordonOperation<dim, fe_degree>::
local_apply (const MatrixFree<dim> &data,
- parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src,
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
AssertDimension (src.size(), 2);
template <int dim, int fe_degree>
void SineGordonOperation<dim, fe_degree>::
- apply (parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src) const
+ apply (LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src) const
{
dst = 0;
data.cell_loop (&SineGordonOperation<dim,fe_degree>::local_apply,
MatrixFree<dim,double> matrix_free_data;
- parallel::distributed::Vector<double> solution, old_solution, old_old_solution;
+ LinearAlgebra::distributed::Vector<double> solution, old_solution, old_old_solution;
const unsigned int n_global_refinements;
double time, time_step;
old_solution);
output_norm ();
- std::vector<parallel::distributed::Vector<double>*> previous_solutions;
+ std::vector<LinearAlgebra::distributed::Vector<double>*> previous_solutions;
previous_solutions.push_back(&old_solution);
previous_solutions.push_back(&old_old_solution);
#include <deal.II/numerics/vector_tools.h>
#include <deal.II/distributed/tria.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/matrix_free/matrix_free.h>
#include <deal.II/matrix_free/fe_evaluation.h>
SineGordonOperation(const MatrixFree<dim,double> &data_in,
const double time_step);
- void apply (parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src) const;
+ void apply (LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src) const;
private:
const MatrixFree<dim,double> &data;
const VectorizedArray<double> delta_t_sqr;
- parallel::distributed::Vector<double> inv_mass_matrix;
+ LinearAlgebra::distributed::Vector<double> inv_mass_matrix;
void local_apply (const MatrixFree<dim,double> &data,
- parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src,
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const;
};
template <int dim, int fe_degree>
void SineGordonOperation<dim, fe_degree>::
local_apply (const MatrixFree<dim> &data,
- parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src,
+ LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src,
const std::pair<unsigned int,unsigned int> &cell_range) const
{
AssertDimension (src.size(), 2);
template <int dim, int fe_degree>
void SineGordonOperation<dim, fe_degree>::
- apply (parallel::distributed::Vector<double> &dst,
- const std::vector<parallel::distributed::Vector<double>*> &src) const
+ apply (LinearAlgebra::distributed::Vector<double> &dst,
+ const std::vector<LinearAlgebra::distributed::Vector<double>*> &src) const
{
dst = 0;
data.cell_loop (&SineGordonOperation<dim,fe_degree>::local_apply,
MatrixFree<dim,double> matrix_free_data;
- parallel::distributed::Vector<double> solution, old_solution, old_old_solution;
+ LinearAlgebra::distributed::Vector<double> solution, old_solution, old_old_solution;
const unsigned int n_global_refinements;
double time, time_step;
old_solution);
output_results (0);
- std::vector<parallel::distributed::Vector<double>*> previous_solutions;
+ std::vector<LinearAlgebra::distributed::Vector<double>*> previous_solutions;
previous_solutions.push_back(&old_solution);
previous_solutions.push_back(&old_old_solution);
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/dofs/dof_tools.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <fstream>
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/dofs/dof_tools.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <fstream>
local_active[1].set_size(2*numproc);
local_active[1].add_range(myid*2,myid*2+2);
- parallel::distributed::BlockVector<double> v(2);
+ LinearAlgebra::distributed::BlockVector<double> v(2);
v.block(0).reinit(local_active[0], complete_index_set(numproc), MPI_COMM_WORLD);
v.block(1).reinit(local_active[1], complete_index_set(2*numproc), MPI_COMM_WORLD);
v.collect_sizes();
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v(local_owned, local_relevant,
- MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant,
+ MPI_COMM_WORLD);
// set local values
if (myid < 8)
AssertThrow (v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
}
- parallel::distributed::BlockVector<double> w(3);
+ LinearAlgebra::distributed::BlockVector<double> w(3);
for (unsigned int i=0; i<3; ++i)
w.block(i) = v;
w.collect_sizes();
const double norm_sqr = w.l2_norm() * w.l2_norm();
AssertThrow (std::fabs(w * w - norm_sqr) < 1e-12,
ExcInternalError());
- parallel::distributed::BlockVector<double> w2;
+ LinearAlgebra::distributed::BlockVector<double> w2;
w2 = w;
AssertThrow (std::fabs(w2 * w - norm_sqr) < 1e-12,
ExcInternalError());
bool allzero = w.all_zero();
if (myid == 0)
deallog << " v==0 ? " << allzero << std::endl;
- parallel::distributed::BlockVector<double> w2;
+ LinearAlgebra::distributed::BlockVector<double> w2;
w2.reinit (w);
allzero = w2.all_zero();
if (myid == 0)
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_block_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v(local_owned, local_relevant,
- MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant,
+ MPI_COMM_WORLD);
// set local values
if (myid < 8)
}
v.compress(VectorOperation::insert);
- parallel::distributed::BlockVector<double> w(3);
+ LinearAlgebra::distributed::BlockVector<double> w(3);
for (unsigned int i=0; i<3; ++i)
{
w.block(i) = v;
// create a vector copy that gets the entries from w. First, it should not
// have updated the ghosts because it is created from an empty state.
- parallel::distributed::BlockVector<double> x(w);
+ LinearAlgebra::distributed::BlockVector<double> x(w);
Assert(x.has_ghost_elements() == false, ExcInternalError());
for (unsigned int i=0; i<3; ++i)
if (myid == 0)
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
IndexSet local_owned(numproc*2);
local_owned.add_range(myid*2,myid*2+2);
- parallel::distributed::Vector<double> v(local_owned, local_owned, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_owned, MPI_COMM_WORLD);
// set local values
v(myid*2)=myid*2.0;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// set local values and check them
v(myid*2)=myid*2.0;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// set local values and check them
v(myid*2)=myid*2.0;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// set local values and check them
v(myid*2)=myid*2.0;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// set local values and check them
v(myid*2)=myid*2.0;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v(local_owned, local_owned, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_owned, MPI_COMM_WORLD);
// set local values
if (myid < 8)
const double norm_sqr = v.l2_norm() * v.l2_norm();
AssertThrow (std::fabs(v * v - norm_sqr) < 1e-15,
ExcInternalError());
- parallel::distributed::Vector<double> v2;
+ LinearAlgebra::distributed::Vector<double> v2;
v2 = v;
AssertThrow (std::fabs(v2 * v - norm_sqr) < 1e-15,
ExcInternalError());
bool allzero = v.all_zero();
if (myid == 0)
deallog << " v==0 ? " << allzero << std::endl;
- parallel::distributed::Vector<double> v2;
+ LinearAlgebra::distributed::Vector<double> v2;
v2.reinit (v);
allzero = v2.all_zero();
if (myid == 0)
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
};
local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// set a few of the local elements
for (unsigned i=0; i<local_size; ++i)
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
// v has ghosts, w has none. set some entries
// on w, copy into v and check if they are
// there
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
- parallel::distributed::Vector<double> w(local_owned, local_owned, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> w(local_owned, local_owned, MPI_COMM_WORLD);
// set a few of the local elements
for (unsigned i=0; i<local_size; ++i)
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
};
local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// check number of ghosts everywhere (counted
// the above)
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
- parallel::distributed::Vector<double> w(v);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> w(v);
// set local values and check them
v(myid*2)=myid*2.0;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_index (2);
- parallel::distributed::Vector<double> v(local_owned, local_relevant,
- MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant,
+ MPI_COMM_WORLD);
AssertDimension (static_cast<unsigned int>(actual_local_size), v.local_size());
- parallel::distributed::Vector<double> w (v), x(v), y(v);
+ LinearAlgebra::distributed::Vector<double> w (v), x(v), y(v);
// set local elements
for (int i=0; i<actual_local_size; ++i)
if (myid==0) deallog << "OK" << std::endl;
if (myid==0) deallog << "Check equ<float> (factor, vector): ";
- parallel::distributed::Vector<float> z;
+ LinearAlgebra::distributed::Vector<float> z;
z = v;
y = z;
for (int i=0; i<actual_local_size; ++i)
// ---------------------------------------------------------------------
-// check parallel::distributed::Vector::swap
+// check LinearAlgebra::distributed::Vector::swap
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
if (numproc > 2)
local_relevant0.add_index(8);
- parallel::distributed::Vector<double> v0(local_owned0, local_relevant0,
- MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v0(local_owned0, local_relevant0,
+ MPI_COMM_WORLD);
// vector1: local size 4
const unsigned int local_size1 = 4;
local_relevant1.add_index(10);
}
- parallel::distributed::Vector<double> v1(local_owned1, local_relevant1,
- MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v1(local_owned1, local_relevant1,
+ MPI_COMM_WORLD);
v0 = 1;
v1 = 2;
if (myid==0) deallog << "Ghost values after re-set OK" << std::endl;
// swap with an empty vector
- parallel::distributed::Vector<double> v2;
+ LinearAlgebra::distributed::Vector<double> v2;
v2.swap (v0);
AssertDimension (v0.size(), 0);
AssertDimension (v2.size(), global_size1);
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant.add_range(1,2);
local_relevant.add_range(4,5);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// set local values and check them
v(myid*2)=myid*2.0;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
// and once where they have not
for (unsigned int run = 0; run < 2; ++run)
{
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// set local values
if (myid < 2)
v.compress(VectorOperation::insert);
- parallel::distributed::Vector<double> w(v), u(v);
+ LinearAlgebra::distributed::Vector<double> w(v), u(v);
u = 0;
v*=2.0;
}
// copy vector content to non-ghosted vectors, manually created.
- parallel::distributed::Vector<double> v_dist(local_owned, MPI_COMM_WORLD),
- w_dist(v_dist), u_dist(v_dist);
+ LinearAlgebra::distributed::Vector<double> v_dist(local_owned, MPI_COMM_WORLD),
+ w_dist(v_dist), u_dist(v_dist);
v_dist = v;
w_dist = w;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
if (numproc > 1)
local_relevant.add_range(3,4);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
// set local values
if (myid < 2)
if (myid==0)
deallog << "v has ghost elements: " << v.has_ghost_elements() << std::endl;
- parallel::distributed::Vector<double> w, x;
+ LinearAlgebra::distributed::Vector<double> w, x;
w = v;
if (myid==0)
deallog << "w has ghost elements: " << w.has_ghost_elements() << std::endl;
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant.add_range(min_index+38,min_index+40);
local_relevant.add_range(min_index+41,min_index+43);
- parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
deallog << "Local range of proc 0: " << v.local_range().first << " "
<< v.local_range().second << std::endl;
// ---------------------------------------------------------------------
-// check parallel::distributed::Vector::partitioners_are_compatible and
+// check LinearAlgebra::distributed::Vector::partitioners_are_compatible and
// partitioners_are_globally_compatible
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
local_relevant = local_owned;
local_relevant.add_range(1,2);
- parallel::distributed::Vector<double> v1, v2, v3, v4, v5, v6;
+ LinearAlgebra::distributed::Vector<double> v1, v2, v3, v4, v5, v6;
v1.reinit(local_owned, MPI_COMM_WORLD);
v2.reinit(local_owned, local_relevant, MPI_COMM_WORLD);
v3.reinit(local_owned, local_relevant, MPI_COMM_WORLD);
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/constraint_matrix.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
DoFTools::extract_locally_relevant_dofs (dof2,
locally_relevant_dofs2);
- parallel::distributed::Vector<double>
+ LinearAlgebra::distributed::Vector<double>
v2(dof2.locally_owned_dofs(), locally_relevant_dofs2, MPI_COMM_WORLD),
v2_interpolated(v2);
#include "../tests.h"
#include <deal.II/base/utilities.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/dofs/dof_handler.h>
DoFTools::extract_locally_relevant_dofs (dof2,
locally_relevant_dofs2);
- parallel::distributed::Vector<double> v1(dof1.locally_owned_dofs(),
- locally_relevant_dofs1,
- MPI_COMM_WORLD),
- v2(dof2.locally_owned_dofs(), locally_relevant_dofs2, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> v1(dof1.locally_owned_dofs(),
+ locally_relevant_dofs1,
+ MPI_COMM_WORLD),
+ v2(dof2.locally_owned_dofs(), locally_relevant_dofs2, MPI_COMM_WORLD);
// set first vector to 1
VectorTools::interpolate(dof1, ConstantFunction<dim>(1.), v1);
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/index_set.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_accessor.h>
dof_handler.distribute_dofs (fe);
IndexSet locally_owned_dofs = dof_handler.locally_owned_dofs ();
- parallel::distributed::Vector<double> locally_owned_solution
+ LinearAlgebra::distributed::Vector<double> locally_owned_solution
(locally_owned_dofs, MPI_COMM_WORLD);
locally_owned_solution=1;
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
x.reinit (col_partitioning, MPI_COMM_WORLD);
y.reinit (row_partitioning, MPI_COMM_WORLD);
- parallel::distributed::Vector<double>
+ LinearAlgebra::distributed::Vector<double>
dx (col_partitioning, col_partitioning, MPI_COMM_WORLD),
dy (row_partitioning, row_partitioning, MPI_COMM_WORLD);
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
x.reinit (col_partitioning, MPI_COMM_WORLD);
y.reinit (row_partitioning, MPI_COMM_WORLD);
- parallel::distributed::Vector<double>
+ LinearAlgebra::distributed::Vector<double>
dx (col_partitioning, col_partitioning, MPI_COMM_WORLD),
dy (row_partitioning, row_partitioning, MPI_COMM_WORLD);
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
x.reinit (col_partitioning, MPI_COMM_WORLD);
y.reinit (row_partitioning, MPI_COMM_WORLD);
- parallel::distributed::Vector<double>
+ LinearAlgebra::distributed::Vector<double>
dx (col_partitioning, col_partitioning, MPI_COMM_WORLD),
dy (row_partitioning, row_partitioning, MPI_COMM_WORLD);
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <fstream>
#include <iostream>
#include <vector>
// ---------------------------------------------------------------------
// Similar to step-16-03 (starting the mg hierarchy at level 2 rather than
-// level 0) but for parallel::distributed::Vector that has a different code
+// level 0) but for LinearAlgebra::distributed::Vector that has a different code
// path
#include "../tests.h"
#include <deal.II/base/mpi.h>
#include <deal.II/lac/constraint_matrix.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/sparse_matrix.h>
#include <deal.II/lac/solver_cg.h>
ConstraintMatrix hanging_node_constraints;
ConstraintMatrix constraints;
- parallel::distributed::Vector<double> solution;
- parallel::distributed::Vector<double> system_rhs;
+ LinearAlgebra::distributed::Vector<double> solution;
+ LinearAlgebra::distributed::Vector<double> system_rhs;
const unsigned int degree;
const unsigned int min_level;
template <int dim>
void LaplaceProblem<dim>::solve ()
{
- MGTransferPrebuilt<parallel::distributed::Vector<double> >
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >
mg_transfer(hanging_node_constraints, mg_constrained_dofs);
mg_transfer.build_matrices(mg_dof_handler);
SolverControl coarse_solver_control (1000, 1e-10, false, false);
- SolverCG<parallel::distributed::Vector<double> > coarse_solver(coarse_solver_control);
+ SolverCG<LinearAlgebra::distributed::Vector<double> > coarse_solver(coarse_solver_control);
PreconditionIdentity id;
- MGCoarseGridLACIteration<SolverCG<parallel::distributed::Vector<double> >,parallel::distributed::Vector<double> >
+ MGCoarseGridLACIteration<SolverCG<LinearAlgebra::distributed::Vector<double> >,LinearAlgebra::distributed::Vector<double> >
coarse_grid_solver(coarse_solver, mg_matrices[min_level], id);
deallog << " Size of coarse grid matrix: " << mg_matrices[min_level].m() << std::endl;
- typedef PreconditionChebyshev<SparseMatrix<double>,parallel::distributed::Vector<double> > Smoother;
- GrowingVectorMemory<parallel::distributed::Vector<double> > vector_memory;
- MGSmootherPrecondition<SparseMatrix<double>, Smoother, parallel::distributed::Vector<double> >
+ typedef PreconditionChebyshev<SparseMatrix<double>,LinearAlgebra::distributed::Vector<double> > Smoother;
+ GrowingVectorMemory<LinearAlgebra::distributed::Vector<double> > vector_memory;
+ MGSmootherPrecondition<SparseMatrix<double>, Smoother, LinearAlgebra::distributed::Vector<double> >
mg_smoother;
typename Smoother::AdditionalData smoother_data;
smoother_data.smoothing_range = 20.;
smoother_data.eig_cg_n_iterations = 20;
mg_smoother.initialize(mg_matrices, smoother_data);
- mg::Matrix<parallel::distributed::Vector<double> > mg_matrix(mg_matrices);
- mg::Matrix<parallel::distributed::Vector<double> > mg_interface_up(mg_interface_matrices);
- mg::Matrix<parallel::distributed::Vector<double> > mg_interface_down(mg_interface_matrices);
-
- Multigrid<parallel::distributed::Vector<double> > mg(min_level,
- triangulation.n_global_levels()-1,
- mg_matrix,
- coarse_grid_solver,
- mg_transfer,
- mg_smoother,
- mg_smoother);
+ mg::Matrix<LinearAlgebra::distributed::Vector<double> > mg_matrix(mg_matrices);
+ mg::Matrix<LinearAlgebra::distributed::Vector<double> > mg_interface_up(mg_interface_matrices);
+ mg::Matrix<LinearAlgebra::distributed::Vector<double> > mg_interface_down(mg_interface_matrices);
+
+ Multigrid<LinearAlgebra::distributed::Vector<double> > mg(min_level,
+ triangulation.n_global_levels()-1,
+ mg_matrix,
+ coarse_grid_solver,
+ mg_transfer,
+ mg_smoother,
+ mg_smoother);
mg.set_edge_matrices(mg_interface_down, mg_interface_up);
- PreconditionMG<dim, parallel::distributed::Vector<double>, MGTransferPrebuilt<parallel::distributed::Vector<double> > >
+ PreconditionMG<dim, LinearAlgebra::distributed::Vector<double>, MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> > >
preconditioner(mg_dof_handler, mg, mg_transfer);
SolverControl solver_control (1000, 1e-12);
- SolverCG<parallel::distributed::Vector<double> > cg (solver_control);
+ SolverCG<LinearAlgebra::distributed::Vector<double> > cg (solver_control);
solution = 0;
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/fe/fe_q.h>
mg_constrained_dofs.initialize(mgdof, dirichlet_boundary);
// build reference
- MGTransferPrebuilt<parallel::distributed::Vector<double> >
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >
transfer_ref(hanging_node_constraints, mg_constrained_dofs);
transfer_ref.build_matrices(mgdof);
// check prolongation for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
// check restriction for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
mg_constrained_dofs.initialize(mgdof, dirichlet_boundary);
// build reference
- MGTransferPrebuilt<parallel::distributed::Vector<double> >
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >
transfer_ref(hanging_node_constraints, mg_constrained_dofs);
transfer_ref.build_matrices(mgdof);
// check prolongation for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
// check restriction for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
mg_constrained_dofs.initialize(mgdof, dirichlet_boundary);
// build reference
- MGTransferPrebuilt<parallel::distributed::Vector<double> >
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >
transfer_ref(hanging_node_constraints, mg_constrained_dofs);
transfer_ref.build_matrices(mgdof);
// check prolongation for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
// check restriction for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/fe/fe_dgq.h>
mgdof.distribute_mg_dofs(fe);
// build reference
- MGTransferPrebuilt<parallel::distributed::Vector<double> > transfer_ref;
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> > transfer_ref;
transfer_ref.build_matrices(mgdof);
// build matrix-free transfer
// check prolongation for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
// check restriction for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
#include <deal.II/fe/fe_dgq.h>
mgdof.distribute_mg_dofs(fe);
// build reference
- MGTransferPrebuilt<parallel::distributed::Vector<double> > transfer_ref;
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> > transfer_ref;
transfer_ref.build_matrices(mgdof);
// build matrix-free transfer
// check prolongation for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
// check restriction for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
mg_constrained_dofs.initialize(mgdof, dirichlet_boundary);
// build reference
- MGTransferPrebuilt<parallel::distributed::Vector<double> >
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >
transfer_ref(hanging_node_constraints, mg_constrained_dofs);
transfer_ref.build_matrices(mgdof);
// check prolongation for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
// check restriction for all levels using random vector
for (unsigned int level=1; level<mgdof.get_triangulation().n_global_levels(); ++level)
{
- parallel::distributed::Vector<Number> v1, v2;
- parallel::distributed::Vector<double> v1_cpy, v2_cpy, v3;
+ LinearAlgebra::distributed::Vector<Number> v1, v2;
+ LinearAlgebra::distributed::Vector<double> v1_cpy, v2_cpy, v3;
v1.reinit(mgdof.locally_owned_mg_dofs(level), MPI_COMM_WORLD);
v2.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
v3.reinit(mgdof.locally_owned_mg_dofs(level-1), MPI_COMM_WORLD);
#include "../tests.h"
#include <deal.II/base/logstream.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/dofs/dof_tools.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>
grid_out.write_svg (tr, grid_output);
}
- MGTransferPrebuilt<parallel::distributed::Vector<double> >
+ MGTransferPrebuilt<LinearAlgebra::distributed::Vector<double> >
transfer_ref(hanging_node_constraints, mg_constrained_dofs);
transfer_ref.build_matrices(mgdof);
}
-// Test parallel::distributed::Vector::operator=(PETScWrappers::MPI::Vector&)
+// Test LinearAlgebra::distributed::Vector::operator=(PETScWrappers::MPI::Vector&)
#include "../tests.h"
#include <deal.II/lac/petsc_parallel_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/base/index_set.h>
#include <fstream>
#include <iostream>
PETScWrappers::MPI::Vector vb(local_active, MPI_COMM_WORLD);
PETScWrappers::MPI::Vector v(local_active, local_relevant, MPI_COMM_WORLD);
- parallel::distributed::Vector<double> copied(local_active, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> copied(local_active, local_relevant, MPI_COMM_WORLD);
// set local values
vb(myid*2)=myid*2.0;
-// Test parallel::distributed::Vector::operator=(PETScWrappers::MPI::BlockVector&)
+// Test LinearAlgebra::distributed::Vector::operator=(PETScWrappers::MPI::BlockVector&)
#include "../tests.h"
#include <deal.II/lac/petsc_parallel_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/petsc_parallel_block_vector.h>
#include <deal.II/base/index_set.h>
#include <fstream>
PETScWrappers::MPI::Vector vb_one(local_active, MPI_COMM_WORLD);
PETScWrappers::MPI::Vector v_one(local_active, local_relevant, MPI_COMM_WORLD);
- parallel::distributed::Vector<double> copied_one(local_active, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> copied_one(local_active, local_relevant, MPI_COMM_WORLD);
// set local values
vb_one(myid*2)=myid*2.0;
PETScWrappers::MPI::BlockVector vb, v;
vb.reinit(2);
v.reinit(2);
- parallel::distributed::BlockVector<double> copied(2);
+ LinearAlgebra::distributed::BlockVector<double> copied(2);
for (unsigned int bl=0; bl<2; ++bl)
{
vb.block(bl) = vb_one;
#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/distributed/grid_refinement.h>
DoFTools::extract_locally_relevant_dofs (dof_handler,locally_relevant_dofs);
PETScWrappers::MPI::Vector vector, vector_locally_relevant;
- parallel::distributed::Vector< double > vector_Re, vector_Re_locally_relevant,
- vector_Im, vector_Im_locally_relevant;
+ LinearAlgebra::distributed::Vector< double > vector_Re, vector_Re_locally_relevant,
+ vector_Im, vector_Im_locally_relevant;
vector.reinit(locally_owned_dofs, mpi_communicator);
vector_locally_relevant.reinit (locally_owned_dofs,
locally_relevant_dofs,mpi_communicator);
-// Test parallel::distributed::Vector::operator=(TrilinosWrappers::MPI::Vector&)
+// Test LinearAlgebra::distributed::Vector::operator=(TrilinosWrappers::MPI::Vector&)
#include "../tests.h"
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/base/index_set.h>
#include <fstream>
#include <iostream>
TrilinosWrappers::MPI::Vector vb(local_active, MPI_COMM_WORLD);
TrilinosWrappers::MPI::Vector v(local_active, local_relevant, MPI_COMM_WORLD);
- parallel::distributed::Vector<double> copied(local_active, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> copied(local_active, local_relevant, MPI_COMM_WORLD);
// set local values
vb(myid*2)=myid*2.0;
Assert(copied(myid*2) == myid*4.0, ExcInternalError());
Assert(copied(myid*2+1) == myid*4.0+2.0, ExcInternalError());
- copied = v;
+ copied.update_ghost_values();
// check ghost values
if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
-// Test parallel::distributed::Vector::operator=(TrilinosWrappers::MPI::BlockVector&)
+// Test LinearAlgebra::distributed::Vector::operator=(TrilinosWrappers::MPI::BlockVector&)
#include "../tests.h"
#include <deal.II/lac/trilinos_vector.h>
-#include <deal.II/lac/parallel_vector.h>
-#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/trilinos_parallel_block_vector.h>
#include <deal.II/base/index_set.h>
#include <fstream>
TrilinosWrappers::MPI::Vector vb_one(local_active, MPI_COMM_WORLD);
TrilinosWrappers::MPI::Vector v_one(local_active, local_relevant, MPI_COMM_WORLD);
- parallel::distributed::Vector<double> copied_one(local_active, local_relevant, MPI_COMM_WORLD);
+ LinearAlgebra::distributed::Vector<double> copied_one(local_active, local_relevant, MPI_COMM_WORLD);
// set local values
vb_one(myid*2)=myid*2.0;
v_one=vb_one;
TrilinosWrappers::MPI::BlockVector vb(2), v(2);
- parallel::distributed::BlockVector<double> copied(2);
+ LinearAlgebra::distributed::BlockVector<double> copied(2);
for (unsigned int bl=0; bl<2; ++bl)
{
vb.block(bl) = vb_one;
#include "../tests.h"
#include <deal.II/base/utilities.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <fstream>
#include <iostream>
#include <vector>
-void test (parallel::distributed::Vector<double> &v,
- parallel::distributed::Vector<double> &w)
+void test (LinearAlgebra::distributed::Vector<double> &v,
+ LinearAlgebra::distributed::Vector<double> &w)
{
TrilinosWrappers::SparseMatrix m(w.size(),v.size(),v.size());
for (unsigned int i=0; i<m.m(); ++i)
try
{
{
- parallel::distributed::Vector<double> v (100);
- parallel::distributed::Vector<double> w (95);
+ LinearAlgebra::distributed::Vector<double> v (100);
+ LinearAlgebra::distributed::Vector<double> w (95);
test (v,w);
}
}
#include "../tests.h"
#include <deal.II/base/utilities.h>
-#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <fstream>
#include <iostream>
#include <vector>
-void test (parallel::distributed::Vector<double> &v,
- parallel::distributed::Vector<double> &w)
+void test (LinearAlgebra::distributed::Vector<double> &v,
+ LinearAlgebra::distributed::Vector<double> &w)
{
TrilinosWrappers::SparseMatrix m(v.size(),w.size(),w.size());
for (unsigned int i=0; i<m.m(); ++i)
try
{
{
- parallel::distributed::Vector<double> v (95);
- parallel::distributed::Vector<double> w (100);
+ LinearAlgebra::distributed::Vector<double> v (95);
+ LinearAlgebra::distributed::Vector<double> w (100);
test (v,w);
}
}