From: bangerth Date: Thu, 30 May 2013 22:29:18 +0000 (+0000) Subject: Check in a revision of the ConstraintMatrix::distribute patch. I'm not sure it actual... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=6b3fb62e26a8753131f8b40c5d809a30e1438393;p=dealii-svn.git Check in a revision of the ConstraintMatrix::distribute patch. I'm not sure it actually works but the network here is going to shut down in a few minutes and I'd like to have it sent to the tester. Will re-evaluate later today. git-svn-id: https://svn.dealii.org/trunk@29674 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/constraint_matrix.h b/deal.II/include/deal.II/lac/constraint_matrix.h index 40d824c3d5..ce286f57c6 100644 --- a/deal.II/include/deal.II/lac/constraint_matrix.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.h @@ -1507,6 +1507,24 @@ public: << " should not be stored by this object, but a constraint " << "is being added."); + /** + * Exception + * + * @ingroup Exceptions + */ + DeclException2 (ExcIncorrectConstraint, + int, int, + << "While distributing the constraint for DoF " + << arg1 << ", it turns out that one of the processors " + << "who own the " << arg2 + << " degrees of freedom that x_" << arg1 + << " is constrained against does not know about " + << "the constraint on x_" << arg1 + << ". Did you not initialize the ConstraintMatrix " + << "with the appropriate locally_relevant set so " + << "that every processor who owns a DoF that constrains " + << "another DoF also knows about this constraint?"); + private: /** diff --git a/deal.II/include/deal.II/lac/constraint_matrix.templates.h b/deal.II/include/deal.II/lac/constraint_matrix.templates.h index af2b713a70..a1c17c9538 100644 --- a/deal.II/include/deal.II/lac/constraint_matrix.templates.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.templates.h @@ -24,6 +24,9 @@ #include #include #include +#include +#include +#include #include @@ -973,27 +976,205 @@ ConstraintMatrix::distribute (const VectorType &condensed, } +namespace internal +{ + namespace + { + // create an output vector that consists of the input vector's locally owned + // elements plus some ghost elements that need to be imported from elsewhere + // + // this is an operation that is different for all vector types and so we + // need a few overloads +#ifdef DEAL_II_WITH_TRILINOS + void + import_vector_with_ghost_elements (const TrilinosWrappers::MPI::Vector &vec, + const IndexSet &/*locally_owned_elements*/, + const IndexSet &needed_elements, + TrilinosWrappers::MPI::Vector &output) + { + output.reinit (needed_elements, + dynamic_cast(&vec.vector_partitioner().Comm())->GetMpiComm()); + output = vec; + } -template + + void + import_vector_with_ghost_elements (const TrilinosWrappers::Vector &vec, + const IndexSet &, + const IndexSet &, + TrilinosWrappers::Vector &output) + { + output.reinit (vec.size()); + output = vec; + } +#endif + +#ifdef DEAL_II_WITH_PETSC + void + import_vector_with_ghost_elements (const PETScWrappers::MPI::Vector &vec, + const IndexSet &locally_owned_elements, + const IndexSet &needed_elements, + PETScWrappers::MPI::Vector &output) + { + output.reinit (vec.get_mpi_communicator(), locally_owned_elements, needed_elements); + output = vec; + } + + + + void + import_vector_with_ghost_elements (const PETScWrappers::Vector &vec, + const IndexSet &, + const IndexSet &, + PETScWrappers::Vector &output) + { + output.reinit (vec.size()); + output = vec; + } +#endif + + template + void + import_vector_with_ghost_elements (const parallel::distributed::Vector &vec, + const IndexSet &locally_owned_elements, + const IndexSet &needed_elements, + parallel::distributed::Vector &output) + { + output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator()); + output = vec; + } + + + template + void + import_vector_with_ghost_elements (const dealii::Vector &vec, + const IndexSet &locally_owned_elements, + const IndexSet &needed_elements, + dealii::Vector &output) + { + Assert (false, ExcMessage ("We shouldn't even get here!")); + } + + + // for block vectors, simply dispatch to the individual blocks + template + void + import_vector_with_ghost_elements (const BlockVectorBase &vec, + const IndexSet &locally_owned_elements, + const IndexSet &needed_elements, + BlockVectorBase &output) + { + types::global_dof_index block_start = 0; + for (unsigned int b=0; b void ConstraintMatrix::distribute (VectorType &vec) const { - Assert (sorted == true, ExcMatrixNotClosed()); + Assert (sorted==true, ExcMatrixIsClosed()); + + // if the vector type supports parallel storage and if the + // vector actually does store only part of the vector, distributing + // is slightly more complicated. we can skip the complicated part + // if the local processor stores the *entire* vector and pretend + // that this is a sequential vector but we need to pay attention that + // in that case the other processors don't actually do anything (in + // particular that they do not call compress because the processor + // that owns everything doesn't do so either); this is the first if + // case here, the second is for the complicated case, the last else + // is for the simple case (sequential vector or distributed vector + // where the current processor stores everything) + if ((vec.supports_distributed_data == true) + && + (vec.locally_owned_elements().n_elements() == 0)) + { + // do nothing, in particular don't call compress() + } + else if ((vec.supports_distributed_data == true) + && + (vec.locally_owned_elements() != complete_index_set(vec.size()))) + { + // This processor owns only part of the vector. one may think that + // every processor should be able to simply communicate those elements + // it owns and for which it knows that they act as sources to constrained + // DoFs to the owner of these DoFs. This would lead to a scheme where all + // we need to do is to add some local elements to (possibly non-local) ones + // and then call compress(). + // + // Alas, this scheme does not work as evidenced by the disaster of bug #51, + // see http://code.google.com/p/dealii/issues/detail?id=51 and the + // reversion of one attempt that implements this in r29662. Rather, we + // need to get a vector that has all the *sources* or constraints we + // own locally, possibly as ghost vector elements, then read from them, + // and finally throw away the ghosted vector. Implement this in the following. + const IndexSet vec_owned_elements = vec.locally_owned_elements(); + IndexSet needed_elements = vec_owned_elements; + + typedef std::vector::const_iterator constraint_iterator; + for (constraint_iterator it = lines.begin(); + it != lines.end(); ++it) + if (vec_owned_elements.is_element(it->line)) + for (unsigned int i=0; ientries.size(); ++i) + needed_elements.add_index(it->entries[i].first); + + VectorType ghosted_vector; + internal::import_vector_with_ghost_elements (vec, + vec_owned_elements, needed_elements, + ghosted_vector); + + for (constraint_iterator it = lines.begin(); + it != lines.end(); ++it) + if (vec_owned_elements.is_element(it->line)) + for (unsigned int i=0; ientries.size(); ++i) + { + typename VectorType::value_type + new_value = it->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (static_cast + (ghosted_vector(it->entries[i].first)) * + it->entries[i].second); + Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); + vec(it->line) = new_value; + } - std::vector::const_iterator next_constraint = lines.begin(); - for (; next_constraint != lines.end(); ++next_constraint) + // now compress to communicate the entries that we added to + // and that weren't to local processors to the owner + // + // this shouldn't be strictly necessary but it probably doesn't + // hurt either + vec.compress (VectorOperation::insert); + } + else + // purely sequential vector (either because the type doesn't + // support anything else or because it's completely stored + // locally { - // fill entry in line - // next_constraint.line by adding the - // different contributions - typename VectorType::value_type - new_value = next_constraint->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (static_cast - (vec(next_constraint->entries[i].first)) * - next_constraint->entries[i].second); - Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); - vec(next_constraint->line) = new_value; + std::vector::const_iterator next_constraint = lines.begin(); + for (; next_constraint != lines.end(); ++next_constraint) + { + // fill entry in line + // next_constraint.line by adding the + // different contributions + typename VectorType::value_type + new_value = next_constraint->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (static_cast + (vec(next_constraint->entries[i].first)) * + next_constraint->entries[i].second); + Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); + vec(next_constraint->line) = new_value; + } } } diff --git a/deal.II/source/lac/constraint_matrix.cc b/deal.II/source/lac/constraint_matrix.cc index 306691df8b..3e7c63c384 100644 --- a/deal.II/source/lac/constraint_matrix.cc +++ b/deal.II/source/lac/constraint_matrix.cc @@ -1,7 +1,7 @@ //--------------------------------------------------------------------------- // $Id$ // -// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 by the deal.II authors +// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -1518,268 +1518,6 @@ void ConstraintMatrix::condense (BlockCompressedSimpleSparsityPattern &sparsity) -#ifdef DEAL_II_WITH_TRILINOS - -// this is a specialization for a parallel (non-block) Trilinos vector. The -// basic idea is to just work on the local range of the vector. But we need -// access to values that the local nodes are constrained to. - -template<> -void -ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const -{ - Assert (sorted==true, ExcMatrixIsClosed()); - - //TODO: not implemented yet, we need to fix LocalRange() first to only - //include "owned" indices. For this we need to keep track of the owned - //indices, because Trilinos doesn't. Use same constructor interface as in - //PETSc with two IndexSets! - AssertThrow (vec.vector_partitioner().IsOneToOne(), - ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning.")); - - typedef std::vector::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.local_range().first; - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.local_range().second; - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - // Here we search all the indices that we need to have read-access to - - // the local nodes and all the nodes that the constraints indicate. - IndexSet my_indices (vec.size()); - { - const std::pair - local_range = vec.local_range(); - - my_indices.add_range (local_range.first, local_range.second); - - std::set individual_indices; - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - for (unsigned int i=0; ientries.size(); ++i) - if ((it->entries[i].first < local_range.first) - || - (it->entries[i].first >= local_range.second)) - individual_indices.insert (it->entries[i].first); - - my_indices.add_indices (individual_indices.begin(), - individual_indices.end()); - } - -#ifdef DEAL_II_WITH_MPI - const Epetra_MpiComm *mpi_comm - = dynamic_cast(&vec.trilinos_vector().Comm()); - - Assert (mpi_comm != 0, ExcInternalError()); - - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (mpi_comm->Comm(), true)); -#else - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)); -#endif - - // here we import the data - vec_distribute.reinit(vec,false,true); - - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - { - // fill entry in line next_constraint.line by adding the different - // contributions - double new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (vec_distribute(it->entries[i].first) * - it->entries[i].second); - vec(it->line) = new_value; - } - - // some processes might not apply constraints, so we need to explicitly - // state, that the others are doing an insert here: - vec.compress (::dealii::VectorOperation::insert); -} - - - -template<> -void -ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const -{ - Assert (sorted==true, ExcMatrixIsClosed()); - - IndexSet my_indices (vec.size()); - for (unsigned int block=0; block::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.block(block).local_range().first - +vec.get_block_indices().block_start(block); - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.block(block).local_range().second - +vec.get_block_indices().block_start(block); - - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - // Here we search all the indices that we need to have read-access to - // - the local nodes and all the nodes that the constraints indicate. - // No caching done yet. would need some more clever data structures - // for doing that. - const std::pair - local_range = vec.block(block).local_range(); - - my_indices.add_range (local_range.first, local_range.second); - - std::set individual_indices; - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - for (unsigned int i=0; ientries.size(); ++i) - if ((it->entries[i].first < local_range.first) - || - (it->entries[i].first >= local_range.second)) - individual_indices.insert (it->entries[i].first); - - my_indices.add_indices (individual_indices.begin(), - individual_indices.end()); - } - -#ifdef DEAL_II_WITH_MPI - const Epetra_MpiComm *mpi_comm - = dynamic_cast(&vec.block(0).trilinos_vector().Comm()); - - Assert (mpi_comm != 0, ExcInternalError()); - - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (mpi_comm->Comm(), true)); -#else - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)); -#endif - - // here we import the data - vec_distribute.reinit(vec,true); - - for (unsigned int block=0; block::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.block(block).local_range().first - +vec.get_block_indices().block_start(block); - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.block(block).local_range().second - +vec.get_block_indices().block_start(block); - - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - { - // fill entry in line next_constraint.line by adding the - // different contributions - double new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (vec_distribute(it->entries[i].first) * - it->entries[i].second); - vec(it->line) = new_value; - } - vec.block(block).compress(::dealii::VectorOperation::insert); - } -} - -#endif - -#ifdef DEAL_II_WITH_PETSC - -// this is a specialization for a parallel (non-block) PETSc vector. The -// basic idea is to just work on the local range of the vector. But we need -// access to values that the local nodes are constrained to. - -template<> -void -ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const -{ - Assert (sorted==true, ExcMatrixIsClosed()); - Assert (vec.has_ghost_elements() == false, - ExcMessage ("This operation can only be performed on vectors without ghost elements.")); - - typedef std::vector::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.local_range().first; - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.local_range().second; - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - // all indices we need to read from - IndexSet my_indices (vec.size()); - - const std::pair - local_range = vec.local_range(); - - my_indices.add_range (local_range.first, local_range.second); - - std::set individual_indices; - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - for (unsigned int i=0; ientries.size(); ++i) - if ((it->entries[i].first < local_range.first) - || - (it->entries[i].first >= local_range.second)) - individual_indices.insert (it->entries[i].first); - - my_indices.add_indices (individual_indices.begin(), - individual_indices.end()); - - IndexSet local_range_is (vec.size()); - local_range_is.add_range(local_range.first, local_range.second); - - - // create a vector and import those indices - PETScWrappers::MPI::Vector ghost_vec (vec.get_mpi_communicator(), - local_range_is, - my_indices); - ghost_vec = vec; - ghost_vec.update_ghost_values(); - - // finally do the distribution on own constraints - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - { - // fill entry in line next_constraint.line by adding the different - // contributions - PetscScalar new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (PetscScalar(ghost_vec(it->entries[i].first)) * - it->entries[i].second); - vec(it->line) = new_value; - } - - vec.compress (VectorOperation::insert); -} - - -template<> -void -ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const -{ - Assert (sorted==true, ExcMatrixIsClosed()); - AssertThrow (false, ExcNotImplemented()); -} - -#endif - - - bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const { if (is_constrained(index) == false) @@ -1921,8 +1659,7 @@ ConstraintMatrix::memory_consumption () const VectorType &, \ const FullMatrix &) const; \ template void ConstraintMatrix::distribute(const VectorType &condensed,\ - VectorType &uncondensed) const;\ - template void ConstraintMatrix::distribute(VectorType &vec) const + VectorType &uncondensed) const #define PARALLEL_VECTOR_FUNCTIONS(VectorType) \ template void ConstraintMatrix:: \ @@ -1932,11 +1669,6 @@ ConstraintMatrix::memory_consumption () const const FullMatrix &) const -// TODO: Can PETSc really do all the operations required by the above -// condense/distribute function etc also on distributed vectors? Trilinos -// can't do that - we have to rewrite those functions by hand if we want to -// use them. The key is to use local ranges etc., which still needs to be -// implemented. #ifdef DEAL_II_WITH_PETSC VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector); VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector); diff --git a/deal.II/source/lac/constraint_matrix.inst.in b/deal.II/source/lac/constraint_matrix.inst.in index 400f44cd4f..1a69fff4a2 100644 --- a/deal.II/source/lac/constraint_matrix.inst.in +++ b/deal.II/source/lac/constraint_matrix.inst.in @@ -5,7 +5,6 @@ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES) template void ConstraintMatrix::distribute_local_to_global > ( const Vector&, const std::vector &, T &, const FullMatrix&) const; template void ConstraintMatrix::distribute >(const T &, T &) const; - template void ConstraintMatrix::distribute >(T &) const; template void ConstraintMatrix::set_zero >(T &) const; } @@ -17,7 +16,6 @@ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES) template void ConstraintMatrix::distribute_local_to_global > ( const Vector&, const std::vector &, parallel::distributed::T &, const FullMatrix&) const; template void ConstraintMatrix::distribute >(const parallel::distributed::T &, parallel::distributed::T &) const; - template void ConstraintMatrix::distribute >(parallel::distributed::T &) const; template void ConstraintMatrix::set_zero >(parallel::distributed::T &) const; } @@ -29,7 +27,6 @@ for (V: EXTERNAL_SEQUENTIAL_VECTORS) template void ConstraintMatrix::distribute_local_to_global ( const Vector&, const std::vector &, V&, const FullMatrix&) const; template void ConstraintMatrix::distribute(const V&, V&) const; - template void ConstraintMatrix::distribute(V&) const; template void ConstraintMatrix::set_zero(V&) const; } @@ -57,3 +54,9 @@ for (S1 : REAL_SCALARS; S2 : REAL_SCALARS) template void ConstraintMatrix::condense >( const SparseMatrix&, const BlockVector&, SparseMatrix &, BlockVector&) const; } + + +for (Vec : SERIAL_VECTORS) + { + template void ConstraintMatrix::distribute(Vec &) const; + }