From 07dca1392bfede6cfdafc7cbfed5225f0e030ab1 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Sat, 11 Jun 2016 22:24:52 +0200 Subject: [PATCH] Fix according to review --- cmake/config/template-arguments.in | 4 +- include/deal.II/lac/la_parallel_vector.h | 59 ++++++++++++++++-- .../lac/la_parallel_vector.templates.h | 61 ++++++++++++++++--- include/deal.II/lac/parallel_vector.h | 2 +- include/deal.II/lac/precondition.h | 4 +- include/deal.II/lac/read_write_vector.h | 17 ++++++ .../deal.II/lac/read_write_vector.templates.h | 41 +++++++++++++ include/deal.II/matrix_free/fe_evaluation.h | 17 +++--- source/lac/la_parallel_vector.cc | 2 +- source/lac/la_parallel_vector.inst.in | 27 +++++++- 10 files changed, 207 insertions(+), 27 deletions(-) diff --git a/cmake/config/template-arguments.in b/cmake/config/template-arguments.in index adf41b2ab8..ec553ea34d 100644 --- a/cmake/config/template-arguments.in +++ b/cmake/config/template-arguments.in @@ -23,8 +23,8 @@ SERIAL_VECTORS := { Vector; BlockVector; BlockVector; - parallel::distributed::Vector; - parallel::distributed::Vector ; + LinearAlgebra::distributed::Vector; + LinearAlgebra::distributed::Vector ; parallel::distributed::BlockVector; parallel::distributed::BlockVector ; diff --git a/include/deal.II/lac/la_parallel_vector.h b/include/deal.II/lac/la_parallel_vector.h index e9bf996890..2cc4e0999d 100644 --- a/include/deal.II/lac/la_parallel_vector.h +++ b/include/deal.II/lac/la_parallel_vector.h @@ -39,9 +39,29 @@ namespace LinearAlgebra template class ReadWriteVector; } +#ifdef DEAL_II_WITH_PETSC +namespace PETScWrappers +{ + namespace MPI + { + class Vector; + } +} +#endif + +#ifdef DEAL_II_WITH_TRILINOS +namespace TrilinosWrappers +{ + namespace MPI + { + class Vector; + } +} +#endif + namespace LinearAlgebra { - namespace parallel + namespace distributed { /*! @addtogroup Vectors *@{ @@ -335,6 +355,37 @@ namespace LinearAlgebra template Vector & operator = (const Vector &in_vector); + +#ifdef DEAL_II_WITH_PETSC + /** + * Copy the content of a PETSc vector into the calling vector. This + * function assumes that the vectors layouts have already been + * initialized to match. + * + * This operator is only available if deal.II was configured with PETSc. + * + * This function is deprecated. Use the interface through + * ReadWriteVector instead. + */ + Vector & + operator = (const PETScWrappers::MPI::Vector &petsc_vec) DEAL_II_DEPRECATED; +#endif + +#ifdef DEAL_II_WITH_TRILINOS + /** + * Copy the content of a Trilinos vector into the calling vector. This + * function assumes that the vectors layouts have already been + * initialized to match. + * + * This operator is only available if deal.II was configured with + * Trilinos. + * + * This function is deprecated. Use the interface through + * ReadWriteVector instead. + */ + Vector & + operator = (const TrilinosWrappers::MPI::Vector &trilinos_vec) DEAL_II_DEPRECATED; +#endif //@} /** @@ -906,7 +957,7 @@ namespace LinearAlgebra * Exception */ DeclException3 (ExcNonMatchingElements, - double, double, unsigned int, + Number, Number, unsigned int, << "Called compress(VectorOperation::insert), but" << " the element received from a remote processor, value " << std::setprecision(16) << arg1 @@ -1372,8 +1423,8 @@ namespace LinearAlgebra */ template inline -void swap (LinearAlgebra::parallel::Vector &u, - LinearAlgebra::parallel::Vector &v) +void swap (LinearAlgebra::distributed::Vector &u, + LinearAlgebra::distributed::Vector &v) { u.swap (v); } diff --git a/include/deal.II/lac/la_parallel_vector.templates.h b/include/deal.II/lac/la_parallel_vector.templates.h index 2e6b7cf7ea..7c18e5b419 100644 --- a/include/deal.II/lac/la_parallel_vector.templates.h +++ b/include/deal.II/lac/la_parallel_vector.templates.h @@ -21,6 +21,8 @@ #include #include #include +#include +#include DEAL_II_NAMESPACE_OPEN @@ -28,7 +30,7 @@ DEAL_II_NAMESPACE_OPEN namespace LinearAlgebra { - namespace parallel + namespace distributed { template @@ -384,6 +386,50 @@ namespace LinearAlgebra +#ifdef DEAL_II_WITH_PETSC + + template + Vector & + Vector::operator = (const PETScWrappers::MPI::Vector &petsc_vec) + { + IndexSet combined_set = partitioner->locally_owned_range(); + combined_set.add_indices(partitioner->ghost_indices()); + ReadWriteVector rw_vector(combined_set); + rw_vector.import(petsc_vec, VectorOperation::insert); + import(rw_vector, VectorOperation::insert); + + if (vector_is_ghosted || petsc_vec.has_ghost_elements()) + update_ghost_values(); + + return *this; + } + +#endif + + + +#ifdef DEAL_II_WITH_TRILINOS + + template + Vector & + Vector::operator = (const TrilinosWrappers::MPI::Vector &trilinos_vec) + { + IndexSet combined_set = partitioner->locally_owned_range(); + combined_set.add_indices(partitioner->ghost_indices()); + ReadWriteVector rw_vector(combined_set); + rw_vector.import(trilinos_vec, VectorOperation::insert); + import(rw_vector, VectorOperation::insert); + + if (vector_is_ghosted || trilinos_vec.has_ghost_elements()) + update_ghost_values(); + + return *this; + } + +#endif + + + template void Vector::compress (::dealii::VectorOperation::values operation) @@ -576,10 +622,10 @@ namespace LinearAlgebra for ( ; my_imports!=part.import_indices().end(); ++my_imports) for (unsigned int j=my_imports->first; jsecond; j++, read_position++) - Assert(*read_position == 0. || + Assert(*read_position == Number() || std::abs(local_element(j) - *read_position) <= std::abs(local_element(j)) * 1000. * - std::numeric_limits::epsilon(), + std::numeric_limits::epsilon(), ExcNonMatchingElements(*read_position, local_element(j), part.this_mpi_process())); AssertDimension(read_position-import_data,part.n_import_indices()); @@ -738,14 +784,15 @@ namespace LinearAlgebra comm_pattern = std_cxx11::dynamic_pointer_cast (communication_pattern); AssertThrow(comm_pattern != NULL, - ExcMessage(std::string("The communication pattern is not of type ") + + ExcMessage("The communication pattern is not of type " "Utilities::MPI::Partitioner.")); } - LinearAlgebra::parallel::Vector tmp_vector(comm_pattern); + Vector tmp_vector(comm_pattern); // fill entries from ReadWriteVector into the distributed vector, // including ghost entries. this is not really efficient right now - // because indices are translated twice, once for + // because indices are translated twice, once by nth_index_in_set(i) and + // once for operator() of tmp_vector const IndexSet &v_stored = V.get_stored_elements(); for (size_type i=0; i & Vector::operator /= (const Number factor) { - operator *= (1./factor); + operator *= (static_cast(1.)/factor); return *this; } diff --git a/include/deal.II/lac/parallel_vector.h b/include/deal.II/lac/parallel_vector.h index 8cb7113f42..146e35085d 100644 --- a/include/deal.II/lac/parallel_vector.h +++ b/include/deal.II/lac/parallel_vector.h @@ -139,7 +139,7 @@ namespace parallel * * @author Katharina Kormann, Martin Kronbichler, 2010, 2011 */ - using LinearAlgebra::parallel::Vector; + using LinearAlgebra::distributed::Vector; } } diff --git a/include/deal.II/lac/precondition.h b/include/deal.II/lac/precondition.h index b633050b42..1439a66ae4 100644 --- a/include/deal.II/lac/precondition.h +++ b/include/deal.II/lac/precondition.h @@ -1021,7 +1021,7 @@ template inline void PreconditionIdentity::vmult_add (VectorType &dst, const VectorType &src) const { - dst.add(src); + dst += src; } @@ -1030,7 +1030,7 @@ template inline void PreconditionIdentity::Tvmult_add (VectorType &dst, const VectorType &src) const { - dst.add(src); + dst += src; } inline PreconditionIdentity::size_type diff --git a/include/deal.II/lac/read_write_vector.h b/include/deal.II/lac/read_write_vector.h index d42be66c2f..51746c395a 100644 --- a/include/deal.II/lac/read_write_vector.h +++ b/include/deal.II/lac/read_write_vector.h @@ -39,6 +39,10 @@ DEAL_II_NAMESPACE_OPEN namespace LinearAlgebra { class CommunicationPatternBase; + namespace distributed + { + template class Vector; + } } #ifdef DEAL_II_WITH_PETSC @@ -246,6 +250,19 @@ namespace LinearAlgebra */ ReadWriteVector &operator = (const Number s); + /** + * Imports all the elements present in the vector's IndexSet from the + * input vector @p vec. VectorOperation::values @p operation + * is used to decide if the elements in @p V should be added to the + * current vector or replace the current elements. The last parameter can + * be used if the same communication pattern is used multiple times. This + * can be used to improve performance. + */ + void import(const distributed::Vector &vec, + VectorOperation::values operation, + std_cxx11::shared_ptr communication_pattern = + std_cxx11::shared_ptr ()); + #ifdef DEAL_II_WITH_PETSC /** * Imports all the elements present in the vector's IndexSet from the input diff --git a/include/deal.II/lac/read_write_vector.templates.h b/include/deal.II/lac/read_write_vector.templates.h index 11d5dfbe55..3a745de513 100644 --- a/include/deal.II/lac/read_write_vector.templates.h +++ b/include/deal.II/lac/read_write_vector.templates.h @@ -20,6 +20,8 @@ #include #include #include +#include +#include #ifdef DEAL_II_WITH_PETSC # include @@ -188,6 +190,45 @@ namespace LinearAlgebra + template + void + ReadWriteVector::import(const distributed::Vector &vec, + VectorOperation::values operation, + std_cxx11::shared_ptr communication_pattern) + { + // If no communication pattern is given, create one. Otherwise, use the + // given one. + std_cxx11::shared_ptr comm_pattern; + if (communication_pattern.get() == NULL) + { + comm_pattern.reset(new Utilities::MPI::Partitioner(vec.locally_owned_elements(), + get_stored_elements(), + vec.get_mpi_communicator())); + } + else + { + comm_pattern = + std_cxx11::dynamic_pointer_cast (communication_pattern); + AssertThrow(comm_pattern != NULL, + ExcMessage("The communication pattern is not of type " + "Utilities::MPI::Partitioner.")); + } + distributed::Vector tmp_vector(comm_pattern); + + std::copy(vec.begin(), vec.end(), tmp_vector.begin()); + tmp_vector.update_ghost_values(); + + const IndexSet &stored = get_stored_elements(); + if (operation == VectorOperation::add) + for (size_type i=0; i class Vector; } @@ -2117,8 +2117,8 @@ namespace internal template inline Number & - vector_access (LinearAlgebra::parallel::Vector &vec, - const unsigned int entry) + vector_access (LinearAlgebra::distributed::Vector &vec, + const unsigned int entry) { return vec.local_element(entry); } @@ -2131,8 +2131,8 @@ namespace internal template inline Number - vector_access (const LinearAlgebra::parallel::Vector &vec, - const unsigned int entry) + vector_access (const LinearAlgebra::distributed::Vector &vec, + const unsigned int entry) { return vec.local_element(entry); } @@ -2140,7 +2140,8 @@ namespace internal // this is to make sure that the parallel partitioning in the - // parallel::distributed::Vector is really the same as stored in MatrixFree + // LinearAlgebra::distributed::Vector is really the same as stored in + // MatrixFree template inline void check_vector_compatibility (const VectorType &vec, @@ -2155,8 +2156,8 @@ namespace internal template inline - void check_vector_compatibility (const LinearAlgebra::parallel::Vector &vec, - const internal::MatrixFreeFunctions::DoFInfo &dof_info) + void check_vector_compatibility (const LinearAlgebra::distributed::Vector &vec, + const internal::MatrixFreeFunctions::DoFInfo &dof_info) { Assert (vec.partitioners_are_compatible(*dof_info.vector_partitioner), ExcMessage("The parallel layout of the given vector is not " diff --git a/source/lac/la_parallel_vector.cc b/source/lac/la_parallel_vector.cc index f62ac75049..e347761296 100644 --- a/source/lac/la_parallel_vector.cc +++ b/source/lac/la_parallel_vector.cc @@ -27,7 +27,7 @@ DEAL_II_NAMESPACE_OPEN namespace LinearAlgebra { - namespace parallel + namespace distributed { #define TEMPL_COPY_CONSTRUCTOR(S1,S2) \ template Vector& Vector::operator= (const Vector &) diff --git a/source/lac/la_parallel_vector.inst.in b/source/lac/la_parallel_vector.inst.in index 99b7760bd4..e06292f4e9 100644 --- a/source/lac/la_parallel_vector.inst.in +++ b/source/lac/la_parallel_vector.inst.in @@ -19,7 +19,7 @@ for (SCALAR : REAL_SCALARS) { namespace LinearAlgebra \{ - namespace parallel + namespace distributed \{ template class Vector; \} @@ -30,7 +30,30 @@ for (S1, S2 : REAL_SCALARS) { namespace LinearAlgebra \{ - namespace parallel + namespace distributed + \{ + template void Vector::reinit (const Vector&, + const bool); + \} + \} +} + +for (SCALAR : COMPLEX_SCALARS) +{ + namespace LinearAlgebra + \{ + namespace distributed + \{ + template class Vector; + \} + \} +} + +for (S1, S2 : COMPLEX_SCALARS) +{ + namespace LinearAlgebra + \{ + namespace distributed \{ template void Vector::reinit (const Vector&, const bool); -- 2.39.5