From: bangerth Date: Tue, 28 May 2013 20:00:27 +0000 (+0000) Subject: Revert changes to ConstraintMatrix::distribute, see bug #51. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fbc01bacfc1a9e8f31695306cfecdabdaa593142;p=dealii-svn.git Revert changes to ConstraintMatrix::distribute, see bug #51. git-svn-id: https://svn.dealii.org/trunk@29662 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/constraint_matrix.h b/deal.II/include/deal.II/lac/constraint_matrix.h index ce286f57c6..40d824c3d5 100644 --- a/deal.II/include/deal.II/lac/constraint_matrix.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.h @@ -1507,24 +1507,6 @@ public: << " should not be stored by this object, but a constraint " << "is being added."); - /** - * Exception - * - * @ingroup Exceptions - */ - DeclException2 (ExcIncorrectConstraint, - int, int, - << "While distributing the constraint for DoF " - << arg1 << ", it turns out that one of the processors " - << "who own the " << arg2 - << " degrees of freedom that x_" << arg1 - << " is constrained against does not know about " - << "the constraint on x_" << arg1 - << ". Did you not initialize the ConstraintMatrix " - << "with the appropriate locally_relevant set so " - << "that every processor who owns a DoF that constrains " - << "another DoF also knows about this constraint?"); - private: /** diff --git a/deal.II/include/deal.II/lac/constraint_matrix.templates.h b/deal.II/include/deal.II/lac/constraint_matrix.templates.h index 0e69fc0847..af2b713a70 100644 --- a/deal.II/include/deal.II/lac/constraint_matrix.templates.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.templates.h @@ -978,112 +978,22 @@ template void ConstraintMatrix::distribute (VectorType &vec) const { - Assert (sorted==true, ExcMatrixIsClosed()); - - // if the vector type supports parallel storage and if the - // vector actually does store only part of the vector, distributing - // is slightly more complicated. we can skip the complicated part - // if the local processor stores the *entire* vector and pretend - // that this is a sequential vector but we need to pay attention that - // in that case the other processors don't actually do anything (in - // particular that they do not call compress because the processor - // that owns everything doesn't do so either); this is the first if - // case here, the second is for the complicated case, the last else - // is for the simple case (sequential vector or distributed vector - // where the current processor stores everything) - if ((vec.supports_distributed_data == true) - && - (vec.locally_owned_elements().n_elements() == 0)) - { - // do nothing, in particular don't call compress() - } - else if ((vec.supports_distributed_data == true) - && - (vec.locally_owned_elements() != complete_index_set(vec.size()))) - { - const IndexSet vec_owned_elements = vec.locally_owned_elements(); - - typedef std::vector::const_iterator constraint_iterator; - -#ifdef DEBUG - { - // the algorithm below pushes contributions c_ij x_j to a constrained - // DoF x_j *from the side of the owner of x_j*, as opposed to pulling - // it from the owner of the target side x_i. this relies on the - // assumption that both target and source know about the constraint - // on x_i. - // - // the disadvantage is that it is incredibly difficult to find out what - // is happening if the assumption is not satisfied. to help debug - // problems of this kind, we do the following in a first step if - // debugging is enabled: - // - every processor who owns an x_j where c_ij!=0 sends a one - // to the owner of x_i to add up - // - the owner of x_i knows how many nonzero entries exist, so can - // verify that the correct number of ones has been added - // - if the sum is not correct, then apparently one of the owners - // of the x_j's did not know to send its one and, consequently, - // will also not know to send the correct c_ij*x_j later on, - // leading to a bug - set_zero (vec); - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) - for (unsigned int i=0; ientries.size(); ++i) - if (vec_owned_elements.is_element (it->entries[i].first)) - vec(it->line) += 1; - vec.compress (VectorOperation::add); - - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) - if (vec_owned_elements.is_element(it->line)) - Assert (vec(it->line) == it->entries.size(), - ExcIncorrectConstraint(it->line, it->entries.size())); - } -#endif + Assert (sorted == true, ExcMatrixNotClosed()); - // set the values of all constrained DoFs to zero, then add the - // contributions for each constrained DoF of all the ones we - // own locally - set_zero (vec); - - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) - for (unsigned int i=0; ientries.size(); ++i) - if (vec_owned_elements.is_element(it->entries[i].first)) - vec(it->line) += it->entries[i].second * - vec(it->entries[i].first); - - // in a final step, add the inhomogeneities to the elements we - // own locally - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) - if (vec_owned_elements.is_element(it->line)) - vec(it->line) += it->inhomogeneity; - - // now compress to communicate the entries that we added to - // and that weren't to local processors to the owner - vec.compress (VectorOperation::add); - } - else - // purely sequential vector (either because the type doesn't - // support anything else or because it's completely stored - // locally + std::vector::const_iterator next_constraint = lines.begin(); + for (; next_constraint != lines.end(); ++next_constraint) { - std::vector::const_iterator next_constraint = lines.begin(); - for (; next_constraint != lines.end(); ++next_constraint) - { - // fill entry in line - // next_constraint.line by adding the - // different contributions - typename VectorType::value_type - new_value = next_constraint->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (static_cast - (vec(next_constraint->entries[i].first)) * - next_constraint->entries[i].second); - Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); - vec(next_constraint->line) = new_value; - } + // fill entry in line + // next_constraint.line by adding the + // different contributions + typename VectorType::value_type + new_value = next_constraint->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (static_cast + (vec(next_constraint->entries[i].first)) * + next_constraint->entries[i].second); + Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); + vec(next_constraint->line) = new_value; } } diff --git a/deal.II/source/lac/constraint_matrix.cc b/deal.II/source/lac/constraint_matrix.cc index 3e7c63c384..306691df8b 100644 --- a/deal.II/source/lac/constraint_matrix.cc +++ b/deal.II/source/lac/constraint_matrix.cc @@ -1,7 +1,7 @@ //--------------------------------------------------------------------------- // $Id$ // -// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 by the deal.II authors +// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -1518,6 +1518,268 @@ void ConstraintMatrix::condense (BlockCompressedSimpleSparsityPattern &sparsity) +#ifdef DEAL_II_WITH_TRILINOS + +// this is a specialization for a parallel (non-block) Trilinos vector. The +// basic idea is to just work on the local range of the vector. But we need +// access to values that the local nodes are constrained to. + +template<> +void +ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const +{ + Assert (sorted==true, ExcMatrixIsClosed()); + + //TODO: not implemented yet, we need to fix LocalRange() first to only + //include "owned" indices. For this we need to keep track of the owned + //indices, because Trilinos doesn't. Use same constructor interface as in + //PETSc with two IndexSets! + AssertThrow (vec.vector_partitioner().IsOneToOne(), + ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning.")); + + typedef std::vector::const_iterator constraint_iterator; + ConstraintLine index_comparison; + index_comparison.line = vec.local_range().first; + const constraint_iterator begin_my_constraints = + Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); + + index_comparison.line = vec.local_range().second; + const constraint_iterator end_my_constraints + = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); + + // Here we search all the indices that we need to have read-access to - + // the local nodes and all the nodes that the constraints indicate. + IndexSet my_indices (vec.size()); + { + const std::pair + local_range = vec.local_range(); + + my_indices.add_range (local_range.first, local_range.second); + + std::set individual_indices; + for (constraint_iterator it = begin_my_constraints; + it != end_my_constraints; ++it) + for (unsigned int i=0; ientries.size(); ++i) + if ((it->entries[i].first < local_range.first) + || + (it->entries[i].first >= local_range.second)) + individual_indices.insert (it->entries[i].first); + + my_indices.add_indices (individual_indices.begin(), + individual_indices.end()); + } + +#ifdef DEAL_II_WITH_MPI + const Epetra_MpiComm *mpi_comm + = dynamic_cast(&vec.trilinos_vector().Comm()); + + Assert (mpi_comm != 0, ExcInternalError()); + + TrilinosWrappers::MPI::Vector vec_distribute + (my_indices.make_trilinos_map (mpi_comm->Comm(), true)); +#else + TrilinosWrappers::MPI::Vector vec_distribute + (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)); +#endif + + // here we import the data + vec_distribute.reinit(vec,false,true); + + for (constraint_iterator it = begin_my_constraints; + it != end_my_constraints; ++it) + { + // fill entry in line next_constraint.line by adding the different + // contributions + double new_value = it->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (vec_distribute(it->entries[i].first) * + it->entries[i].second); + vec(it->line) = new_value; + } + + // some processes might not apply constraints, so we need to explicitly + // state, that the others are doing an insert here: + vec.compress (::dealii::VectorOperation::insert); +} + + + +template<> +void +ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const +{ + Assert (sorted==true, ExcMatrixIsClosed()); + + IndexSet my_indices (vec.size()); + for (unsigned int block=0; block::const_iterator constraint_iterator; + ConstraintLine index_comparison; + index_comparison.line = vec.block(block).local_range().first + +vec.get_block_indices().block_start(block); + const constraint_iterator begin_my_constraints = + Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); + + index_comparison.line = vec.block(block).local_range().second + +vec.get_block_indices().block_start(block); + + const constraint_iterator end_my_constraints + = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); + + // Here we search all the indices that we need to have read-access to + // - the local nodes and all the nodes that the constraints indicate. + // No caching done yet. would need some more clever data structures + // for doing that. + const std::pair + local_range = vec.block(block).local_range(); + + my_indices.add_range (local_range.first, local_range.second); + + std::set individual_indices; + for (constraint_iterator it = begin_my_constraints; + it != end_my_constraints; ++it) + for (unsigned int i=0; ientries.size(); ++i) + if ((it->entries[i].first < local_range.first) + || + (it->entries[i].first >= local_range.second)) + individual_indices.insert (it->entries[i].first); + + my_indices.add_indices (individual_indices.begin(), + individual_indices.end()); + } + +#ifdef DEAL_II_WITH_MPI + const Epetra_MpiComm *mpi_comm + = dynamic_cast(&vec.block(0).trilinos_vector().Comm()); + + Assert (mpi_comm != 0, ExcInternalError()); + + TrilinosWrappers::MPI::Vector vec_distribute + (my_indices.make_trilinos_map (mpi_comm->Comm(), true)); +#else + TrilinosWrappers::MPI::Vector vec_distribute + (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)); +#endif + + // here we import the data + vec_distribute.reinit(vec,true); + + for (unsigned int block=0; block::const_iterator constraint_iterator; + ConstraintLine index_comparison; + index_comparison.line = vec.block(block).local_range().first + +vec.get_block_indices().block_start(block); + const constraint_iterator begin_my_constraints = + Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); + + index_comparison.line = vec.block(block).local_range().second + +vec.get_block_indices().block_start(block); + + const constraint_iterator end_my_constraints + = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); + + for (constraint_iterator it = begin_my_constraints; + it != end_my_constraints; ++it) + { + // fill entry in line next_constraint.line by adding the + // different contributions + double new_value = it->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (vec_distribute(it->entries[i].first) * + it->entries[i].second); + vec(it->line) = new_value; + } + vec.block(block).compress(::dealii::VectorOperation::insert); + } +} + +#endif + +#ifdef DEAL_II_WITH_PETSC + +// this is a specialization for a parallel (non-block) PETSc vector. The +// basic idea is to just work on the local range of the vector. But we need +// access to values that the local nodes are constrained to. + +template<> +void +ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const +{ + Assert (sorted==true, ExcMatrixIsClosed()); + Assert (vec.has_ghost_elements() == false, + ExcMessage ("This operation can only be performed on vectors without ghost elements.")); + + typedef std::vector::const_iterator constraint_iterator; + ConstraintLine index_comparison; + index_comparison.line = vec.local_range().first; + const constraint_iterator begin_my_constraints = + Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); + + index_comparison.line = vec.local_range().second; + const constraint_iterator end_my_constraints + = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); + + // all indices we need to read from + IndexSet my_indices (vec.size()); + + const std::pair + local_range = vec.local_range(); + + my_indices.add_range (local_range.first, local_range.second); + + std::set individual_indices; + for (constraint_iterator it = begin_my_constraints; + it != end_my_constraints; ++it) + for (unsigned int i=0; ientries.size(); ++i) + if ((it->entries[i].first < local_range.first) + || + (it->entries[i].first >= local_range.second)) + individual_indices.insert (it->entries[i].first); + + my_indices.add_indices (individual_indices.begin(), + individual_indices.end()); + + IndexSet local_range_is (vec.size()); + local_range_is.add_range(local_range.first, local_range.second); + + + // create a vector and import those indices + PETScWrappers::MPI::Vector ghost_vec (vec.get_mpi_communicator(), + local_range_is, + my_indices); + ghost_vec = vec; + ghost_vec.update_ghost_values(); + + // finally do the distribution on own constraints + for (constraint_iterator it = begin_my_constraints; + it != end_my_constraints; ++it) + { + // fill entry in line next_constraint.line by adding the different + // contributions + PetscScalar new_value = it->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (PetscScalar(ghost_vec(it->entries[i].first)) * + it->entries[i].second); + vec(it->line) = new_value; + } + + vec.compress (VectorOperation::insert); +} + + +template<> +void +ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const +{ + Assert (sorted==true, ExcMatrixIsClosed()); + AssertThrow (false, ExcNotImplemented()); +} + +#endif + + + bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const { if (is_constrained(index) == false) @@ -1659,7 +1921,8 @@ ConstraintMatrix::memory_consumption () const VectorType &, \ const FullMatrix &) const; \ template void ConstraintMatrix::distribute(const VectorType &condensed,\ - VectorType &uncondensed) const + VectorType &uncondensed) const;\ + template void ConstraintMatrix::distribute(VectorType &vec) const #define PARALLEL_VECTOR_FUNCTIONS(VectorType) \ template void ConstraintMatrix:: \ @@ -1669,6 +1932,11 @@ ConstraintMatrix::memory_consumption () const const FullMatrix &) const +// TODO: Can PETSc really do all the operations required by the above +// condense/distribute function etc also on distributed vectors? Trilinos +// can't do that - we have to rewrite those functions by hand if we want to +// use them. The key is to use local ranges etc., which still needs to be +// implemented. #ifdef DEAL_II_WITH_PETSC VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector); VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector); diff --git a/deal.II/source/lac/constraint_matrix.inst.in b/deal.II/source/lac/constraint_matrix.inst.in index 1a69fff4a2..400f44cd4f 100644 --- a/deal.II/source/lac/constraint_matrix.inst.in +++ b/deal.II/source/lac/constraint_matrix.inst.in @@ -5,6 +5,7 @@ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES) template void ConstraintMatrix::distribute_local_to_global > ( const Vector&, const std::vector &, T &, const FullMatrix&) const; template void ConstraintMatrix::distribute >(const T &, T &) const; + template void ConstraintMatrix::distribute >(T &) const; template void ConstraintMatrix::set_zero >(T &) const; } @@ -16,6 +17,7 @@ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES) template void ConstraintMatrix::distribute_local_to_global > ( const Vector&, const std::vector &, parallel::distributed::T &, const FullMatrix&) const; template void ConstraintMatrix::distribute >(const parallel::distributed::T &, parallel::distributed::T &) const; + template void ConstraintMatrix::distribute >(parallel::distributed::T &) const; template void ConstraintMatrix::set_zero >(parallel::distributed::T &) const; } @@ -27,6 +29,7 @@ for (V: EXTERNAL_SEQUENTIAL_VECTORS) template void ConstraintMatrix::distribute_local_to_global ( const Vector&, const std::vector &, V&, const FullMatrix&) const; template void ConstraintMatrix::distribute(const V&, V&) const; + template void ConstraintMatrix::distribute(V&) const; template void ConstraintMatrix::set_zero(V&) const; } @@ -54,9 +57,3 @@ for (S1 : REAL_SCALARS; S2 : REAL_SCALARS) template void ConstraintMatrix::condense >( const SparseMatrix&, const BlockVector&, SparseMatrix &, BlockVector&) const; } - - -for (Vec : SERIAL_VECTORS) - { - template void ConstraintMatrix::distribute(Vec &) const; - }