BlockVector<double>;
BlockVector<float>;
- parallel::distributed::Vector<double>;
- parallel::distributed::Vector<float> ;
+ LinearAlgebra::distributed::Vector<double>;
+ LinearAlgebra::distributed::Vector<float> ;
parallel::distributed::BlockVector<double>;
parallel::distributed::BlockVector<float> ;
template <typename> class ReadWriteVector;
}
+#ifdef DEAL_II_WITH_PETSC
+namespace PETScWrappers
+{
+ namespace MPI
+ {
+ class Vector;
+ }
+}
+#endif
+
+#ifdef DEAL_II_WITH_TRILINOS
+namespace TrilinosWrappers
+{
+ namespace MPI
+ {
+ class Vector;
+ }
+}
+#endif
+
namespace LinearAlgebra
{
- namespace parallel
+ namespace distributed
{
/*! @addtogroup Vectors
*@{
template <typename Number2>
Vector<Number> &
operator = (const Vector<Number2> &in_vector);
+
+#ifdef DEAL_II_WITH_PETSC
+ /**
+ * Copy the content of a PETSc vector into the calling vector. This
+ * function assumes that the vectors layouts have already been
+ * initialized to match.
+ *
+ * This operator is only available if deal.II was configured with PETSc.
+ *
+ * This function is deprecated. Use the interface through
+ * ReadWriteVector instead.
+ */
+ Vector<Number> &
+ operator = (const PETScWrappers::MPI::Vector &petsc_vec) DEAL_II_DEPRECATED;
+#endif
+
+#ifdef DEAL_II_WITH_TRILINOS
+ /**
+ * Copy the content of a Trilinos vector into the calling vector. This
+ * function assumes that the vectors layouts have already been
+ * initialized to match.
+ *
+ * This operator is only available if deal.II was configured with
+ * Trilinos.
+ *
+ * This function is deprecated. Use the interface through
+ * ReadWriteVector instead.
+ */
+ Vector<Number> &
+ operator = (const TrilinosWrappers::MPI::Vector &trilinos_vec) DEAL_II_DEPRECATED;
+#endif
//@}
/**
* Exception
*/
DeclException3 (ExcNonMatchingElements,
- double, double, unsigned int,
+ Number, Number, unsigned int,
<< "Called compress(VectorOperation::insert), but"
<< " the element received from a remote processor, value "
<< std::setprecision(16) << arg1
*/
template <typename Number>
inline
-void swap (LinearAlgebra::parallel::Vector<Number> &u,
- LinearAlgebra::parallel::Vector<Number> &v)
+void swap (LinearAlgebra::distributed::Vector<Number> &u,
+ LinearAlgebra::distributed::Vector<Number> &v)
{
u.swap (v);
}
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/vector_operations_internal.h>
#include <deal.II/lac/read_write_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/trilinos_vector.h>
DEAL_II_NAMESPACE_OPEN
namespace LinearAlgebra
{
- namespace parallel
+ namespace distributed
{
template <typename Number>
+#ifdef DEAL_II_WITH_PETSC
+
+ template <typename Number>
+ Vector<Number> &
+ Vector<Number>::operator = (const PETScWrappers::MPI::Vector &petsc_vec)
+ {
+ IndexSet combined_set = partitioner->locally_owned_range();
+ combined_set.add_indices(partitioner->ghost_indices());
+ ReadWriteVector<Number> rw_vector(combined_set);
+ rw_vector.import(petsc_vec, VectorOperation::insert);
+ import(rw_vector, VectorOperation::insert);
+
+ if (vector_is_ghosted || petsc_vec.has_ghost_elements())
+ update_ghost_values();
+
+ return *this;
+ }
+
+#endif
+
+
+
+#ifdef DEAL_II_WITH_TRILINOS
+
+ template <typename Number>
+ Vector<Number> &
+ Vector<Number>::operator = (const TrilinosWrappers::MPI::Vector &trilinos_vec)
+ {
+ IndexSet combined_set = partitioner->locally_owned_range();
+ combined_set.add_indices(partitioner->ghost_indices());
+ ReadWriteVector<Number> rw_vector(combined_set);
+ rw_vector.import(trilinos_vec, VectorOperation::insert);
+ import(rw_vector, VectorOperation::insert);
+
+ if (vector_is_ghosted || trilinos_vec.has_ghost_elements())
+ update_ghost_values();
+
+ return *this;
+ }
+
+#endif
+
+
+
template <typename Number>
void
Vector<Number>::compress (::dealii::VectorOperation::values operation)
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
for (unsigned int j=my_imports->first; j<my_imports->second;
j++, read_position++)
- Assert(*read_position == 0. ||
+ Assert(*read_position == Number() ||
std::abs(local_element(j) - *read_position) <=
std::abs(local_element(j)) * 1000. *
- std::numeric_limits<Number>::epsilon(),
+ std::numeric_limits<real_type>::epsilon(),
ExcNonMatchingElements(*read_position, local_element(j),
part.this_mpi_process()));
AssertDimension(read_position-import_data,part.n_import_indices());
comm_pattern =
std_cxx11::dynamic_pointer_cast<const Utilities::MPI::Partitioner> (communication_pattern);
AssertThrow(comm_pattern != NULL,
- ExcMessage(std::string("The communication pattern is not of type ") +
+ ExcMessage("The communication pattern is not of type "
"Utilities::MPI::Partitioner."));
}
- LinearAlgebra::parallel::Vector<Number> tmp_vector(comm_pattern);
+ Vector<Number> tmp_vector(comm_pattern);
// fill entries from ReadWriteVector into the distributed vector,
// including ghost entries. this is not really efficient right now
- // because indices are translated twice, once for
+ // because indices are translated twice, once by nth_index_in_set(i) and
+ // once for operator() of tmp_vector
const IndexSet &v_stored = V.get_stored_elements();
for (size_type i=0; i<v_stored.n_elements(); ++i)
tmp_vector(v_stored.nth_index_in_set(i)) = V.local_element(i);
Vector<Number> &
Vector<Number>::operator /= (const Number factor)
{
- operator *= (1./factor);
+ operator *= (static_cast<Number>(1.)/factor);
return *this;
}
*
* @author Katharina Kormann, Martin Kronbichler, 2010, 2011
*/
- using LinearAlgebra::parallel::Vector;
+ using LinearAlgebra::distributed::Vector;
}
}
inline void
PreconditionIdentity::vmult_add (VectorType &dst, const VectorType &src) const
{
- dst.add(src);
+ dst += src;
}
inline void
PreconditionIdentity::Tvmult_add (VectorType &dst, const VectorType &src) const
{
- dst.add(src);
+ dst += src;
}
inline PreconditionIdentity::size_type
namespace LinearAlgebra
{
class CommunicationPatternBase;
+ namespace distributed
+ {
+ template <typename> class Vector;
+ }
}
#ifdef DEAL_II_WITH_PETSC
*/
ReadWriteVector<Number> &operator = (const Number s);
+ /**
+ * Imports all the elements present in the vector's IndexSet from the
+ * input vector @p vec. VectorOperation::values @p operation
+ * is used to decide if the elements in @p V should be added to the
+ * current vector or replace the current elements. The last parameter can
+ * be used if the same communication pattern is used multiple times. This
+ * can be used to improve performance.
+ */
+ void import(const distributed::Vector<Number> &vec,
+ VectorOperation::values operation,
+ std_cxx11::shared_ptr<const CommunicationPatternBase> communication_pattern =
+ std_cxx11::shared_ptr<const CommunicationPatternBase> ());
+
#ifdef DEAL_II_WITH_PETSC
/**
* Imports all the elements present in the vector's IndexSet from the input
#include <deal.II/base/config.h>
#include <deal.II/lac/read_write_vector.h>
#include <deal.II/lac/vector_operations_internal.h>
+#include <deal.II/base/partitioner.h>
+#include <deal.II/lac/la_parallel_vector.h>
#ifdef DEAL_II_WITH_PETSC
# include <deal.II/lac/petsc_parallel_vector.h>
+ template <typename Number>
+ void
+ ReadWriteVector<Number>::import(const distributed::Vector<Number> &vec,
+ VectorOperation::values operation,
+ std_cxx11::shared_ptr<const CommunicationPatternBase> communication_pattern)
+ {
+ // If no communication pattern is given, create one. Otherwise, use the
+ // given one.
+ std_cxx11::shared_ptr<const Utilities::MPI::Partitioner> comm_pattern;
+ if (communication_pattern.get() == NULL)
+ {
+ comm_pattern.reset(new Utilities::MPI::Partitioner(vec.locally_owned_elements(),
+ get_stored_elements(),
+ vec.get_mpi_communicator()));
+ }
+ else
+ {
+ comm_pattern =
+ std_cxx11::dynamic_pointer_cast<const Utilities::MPI::Partitioner> (communication_pattern);
+ AssertThrow(comm_pattern != NULL,
+ ExcMessage("The communication pattern is not of type "
+ "Utilities::MPI::Partitioner."));
+ }
+ distributed::Vector<Number> tmp_vector(comm_pattern);
+
+ std::copy(vec.begin(), vec.end(), tmp_vector.begin());
+ tmp_vector.update_ghost_values();
+
+ const IndexSet &stored = get_stored_elements();
+ if (operation == VectorOperation::add)
+ for (size_type i=0; i<stored.n_elements(); ++i)
+ local_element(i) += tmp_vector(stored.nth_index_in_set(i));
+ else
+ for (size_type i=0; i<stored.n_elements(); ++i)
+ local_element(i) = tmp_vector(stored.nth_index_in_set(i));
+ }
+
+
+
#ifdef DEAL_II_WITH_PETSC
namespace internal
{
// forward declarations
namespace LinearAlgebra
{
- namespace parallel
+ namespace distributed
{
template <typename> class Vector;
}
template <typename Number>
inline
Number &
- vector_access (LinearAlgebra::parallel::Vector<Number> &vec,
- const unsigned int entry)
+ vector_access (LinearAlgebra::distributed::Vector<Number> &vec,
+ const unsigned int entry)
{
return vec.local_element(entry);
}
template <typename Number>
inline
Number
- vector_access (const LinearAlgebra::parallel::Vector<Number> &vec,
- const unsigned int entry)
+ vector_access (const LinearAlgebra::distributed::Vector<Number> &vec,
+ const unsigned int entry)
{
return vec.local_element(entry);
}
// this is to make sure that the parallel partitioning in the
- // parallel::distributed::Vector is really the same as stored in MatrixFree
+ // LinearAlgebra::distributed::Vector is really the same as stored in
+ // MatrixFree
template <typename VectorType>
inline
void check_vector_compatibility (const VectorType &vec,
template <typename Number>
inline
- void check_vector_compatibility (const LinearAlgebra::parallel::Vector<Number> &vec,
- const internal::MatrixFreeFunctions::DoFInfo &dof_info)
+ void check_vector_compatibility (const LinearAlgebra::distributed::Vector<Number> &vec,
+ const internal::MatrixFreeFunctions::DoFInfo &dof_info)
{
Assert (vec.partitioners_are_compatible(*dof_info.vector_partitioner),
ExcMessage("The parallel layout of the given vector is not "
namespace LinearAlgebra
{
- namespace parallel
+ namespace distributed
{
#define TEMPL_COPY_CONSTRUCTOR(S1,S2) \
template Vector<S1>& Vector<S1>::operator=<S2> (const Vector<S2> &)
{
namespace LinearAlgebra
\{
- namespace parallel
+ namespace distributed
\{
template class Vector<SCALAR>;
\}
{
namespace LinearAlgebra
\{
- namespace parallel
+ namespace distributed
+ \{
+ template void Vector<S1>::reinit<S2> (const Vector<S2>&,
+ const bool);
+ \}
+ \}
+}
+
+for (SCALAR : COMPLEX_SCALARS)
+{
+ namespace LinearAlgebra
+ \{
+ namespace distributed
+ \{
+ template class Vector<SCALAR>;
+ \}
+ \}
+}
+
+for (S1, S2 : COMPLEX_SCALARS)
+{
+ namespace LinearAlgebra
+ \{
+ namespace distributed
\{
template void Vector<S1>::reinit<S2> (const Vector<S2>&,
const bool);