From: Bruno Turcksin Date: Fri, 31 May 2013 14:16:07 +0000 (+0000) Subject: Merge from mainline. X-Git-Tag: v8.0.0~316^2~19 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c80c6ed63f13feeb9a2dd4ebd191906638af3ff8;p=dealii.git Merge from mainline. git-svn-id: https://svn.dealii.org/branches/branch_bigger_global_dof_indices_4@29683 0785d39b-7218-0410-832d-ea1e28bc413d --- c80c6ed63f13feeb9a2dd4ebd191906638af3ff8 diff --cc deal.II/include/deal.II/distributed/tria.h index ef59d652b1,18a6eed4ca..5578179d98 --- a/deal.II/include/deal.II/distributed/tria.h +++ b/deal.II/include/deal.II/distributed/tria.h @@@ -730,8 -730,10 +730,10 @@@ namespace paralle struct NumberCache { std::vector n_locally_owned_active_cells; - unsigned int n_global_active_cells; + types::global_dof_index n_global_active_cells; unsigned int n_global_levels; + + NumberCache(); }; NumberCache number_cache; diff --cc deal.II/include/deal.II/lac/constraint_matrix.templates.h index 35338ac6bb,cc2f90ca4a..af6d907509 --- a/deal.II/include/deal.II/lac/constraint_matrix.templates.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.templates.h @@@ -730,46 -733,39 +733,41 @@@ namespace interna { namespace { + typedef types::global_dof_index size_type; + - // TODO: in general we should iterate over the constraints and not over all DoFs - // for performance reasons template - void set_zero_parallel(const dealii::ConstraintMatrix &cm, VEC &vec, size_type shift = 0) - void set_zero_parallel(const std::vector &cm, VEC &vec, unsigned int shift = 0) ++ void set_zero_parallel(const std::vector &cm, VEC &vec, size_type shift = 0) { Assert(!vec.has_ghost_elements(), ExcInternalError());//ExcGhostsPresent()); - - const size_type - start = vec.local_range().first, - end = vec.local_range().second; - for (size_type i=start; i::const_iterator it = cm.begin(); ++ for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); ++it) + if (locally_owned.is_element(*it)) + vec(*it) = 0.; } - // TODO: in general we should iterate over the constraints and not over all DoFs - // for performance reasons template - void set_zero_parallel(const dealii::ConstraintMatrix &cm, parallel::distributed::Vector &vec, size_type shift = 0) - void set_zero_parallel(const std::vector &cm, parallel::distributed::Vector &vec, unsigned int shift = 0) ++ void set_zero_parallel(const std::vector &cm, parallel::distributed::Vector &vec, size_type shift = 0) { - for (unsigned int i=0; i::const_iterator it = cm.begin(); ++ for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); ++it) + if (vec.in_local_range(*it)) + vec(*it) = 0.; vec.zero_out_ghosts(); } template - void set_zero_in_parallel(const dealii::ConstraintMatrix &cm, VEC &vec, internal::bool2type) - void set_zero_in_parallel(const std::vector &cm, VEC &vec, internal::bool2type) ++ void set_zero_in_parallel(const std::vector &cm, VEC &vec, internal::bool2type) { set_zero_parallel(cm, vec, 0); } // in parallel for BlockVectors template - void set_zero_in_parallel(const dealii::ConstraintMatrix &cm, VEC &vec, internal::bool2type) - void set_zero_in_parallel(const std::vector &cm, VEC &vec, internal::bool2type) ++ void set_zero_in_parallel(const std::vector &cm, VEC &vec, internal::bool2type) { - unsigned int start_shift = 0; - for (unsigned int j=0; j - void set_zero_serial(const dealii::ConstraintMatrix &cm, VEC &vec) - void set_zero_serial(const std::vector &cm, VEC &vec) ++ void set_zero_serial(const std::vector &cm, VEC &vec) { - // TODO would be faster: - /* std::vector::const_iterator constraint_line = cm.lines.begin(); - for (; constraint_line!=cm.lines.end(); ++constraint_line) - vec(constraint_line->line) = 0.;*/ - for (size_type i=0; i::const_iterator it = cm.begin(); ++ for (typename std::vector::const_iterator it = cm.begin(); + it != cm.end(); ++it) + vec(*it) = 0.; } template - void set_zero_all(const dealii::ConstraintMatrix &cm, VEC &vec) - void set_zero_all(const std::vector &cm, VEC &vec) ++ void set_zero_all(const std::vector &cm, VEC &vec) { set_zero_in_parallel(cm, vec, internal::bool2type::value>()); vec.compress(VectorOperation::insert); @@@ -797,13 -789,13 +791,13 @@@ template - void set_zero_all(const dealii::ConstraintMatrix &cm, dealii::Vector &vec) - void set_zero_all(const std::vector &cm, dealii::Vector &vec) ++ void set_zero_all(const std::vector &cm, dealii::Vector &vec) { set_zero_serial(cm, vec); } template - void set_zero_all(const dealii::ConstraintMatrix &cm, dealii::BlockVector &vec) - void set_zero_all(const std::vector &cm, dealii::BlockVector &vec) ++ void set_zero_all(const std::vector &cm, dealii::BlockVector &vec) { set_zero_serial(cm, vec); } @@@ -816,7 -808,12 +810,12 @@@ template constrained_lines(lines.size()); ++ std::vector constrained_lines(lines.size()); + for (unsigned int i=0; ifast==false, the vector * is filled with zeros. */ - void reinit (const std::vector &N, - const bool fast=false); + void reinit (const std::vector &N, + const bool fast=false); /** - * Reinit the function - * according to a distributed - * block vector. The elements - * will be copied in this + * Reinitialize the vector in the same way as the given to a + * distributed block vector. The elements will be copied in this * process. */ void reinit (const MPI::BlockVector &V); diff --cc deal.II/source/lac/constraint_matrix.cc index d8ed22b9c3,3e7c63c384..f3176faf72 --- a/deal.II/source/lac/constraint_matrix.cc +++ b/deal.II/source/lac/constraint_matrix.cc @@@ -1522,269 -1518,7 +1522,7 @@@ void ConstraintMatrix::condense (BlockC - #ifdef DEAL_II_WITH_TRILINOS - - // this is a specialization for a parallel (non-block) Trilinos vector. The - // basic idea is to just work on the local range of the vector. But we need - // access to values that the local nodes are constrained to. - - template<> - void - ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const - { - Assert (sorted==true, ExcMatrixIsClosed()); - - //TODO: not implemented yet, we need to fix LocalRange() first to only - //include "owned" indices. For this we need to keep track of the owned - //indices, because Trilinos doesn't. Use same constructor interface as in - //PETSc with two IndexSets! - AssertThrow (vec.vector_partitioner().IsOneToOne(), - ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning.")); - - typedef std::vector::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.local_range().first; - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.local_range().second; - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - // Here we search all the indices that we need to have read-access to - - // the local nodes and all the nodes that the constraints indicate. - IndexSet my_indices (vec.size()); - { - const std::pair - local_range = vec.local_range(); - - my_indices.add_range (local_range.first, local_range.second); - - std::set individual_indices; - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - for (unsigned int i=0; ientries.size(); ++i) - if ((it->entries[i].first < local_range.first) - || - (it->entries[i].first >= local_range.second)) - individual_indices.insert (it->entries[i].first); - - my_indices.add_indices (individual_indices.begin(), - individual_indices.end()); - } - - #ifdef DEAL_II_WITH_MPI - const Epetra_MpiComm *mpi_comm - = dynamic_cast(&vec.trilinos_vector().Comm()); - - Assert (mpi_comm != 0, ExcInternalError()); - - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (mpi_comm->Comm(), true)); - #else - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)); - #endif - - // here we import the data - vec_distribute.reinit(vec,false,true); - - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - { - // fill entry in line next_constraint.line by adding the different - // contributions - double new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (vec_distribute(it->entries[i].first) * - it->entries[i].second); - vec(it->line) = new_value; - } - - // some processes might not apply constraints, so we need to explicitly - // state, that the others are doing an insert here: - vec.compress (::dealii::VectorOperation::insert); - } - - - - template<> - void - ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const - { - Assert (sorted==true, ExcMatrixIsClosed()); - - IndexSet my_indices (vec.size()); - for (unsigned int block=0; block::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.block(block).local_range().first - +vec.get_block_indices().block_start(block); - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.block(block).local_range().second - +vec.get_block_indices().block_start(block); - - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - // Here we search all the indices that we need to have read-access to - // - the local nodes and all the nodes that the constraints indicate. - // No caching done yet. would need some more clever data structures - // for doing that. - const std::pair - local_range = vec.block(block).local_range(); - - my_indices.add_range (local_range.first, local_range.second); - - std::set individual_indices; - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - for (unsigned int i=0; ientries.size(); ++i) - if ((it->entries[i].first < local_range.first) - || - (it->entries[i].first >= local_range.second)) - individual_indices.insert (it->entries[i].first); - - my_indices.add_indices (individual_indices.begin(), - individual_indices.end()); - } - - #ifdef DEAL_II_WITH_MPI - const Epetra_MpiComm *mpi_comm - = dynamic_cast(&vec.block(0).trilinos_vector().Comm()); - - Assert (mpi_comm != 0, ExcInternalError()); - - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (mpi_comm->Comm(), true)); - #else - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)); - #endif - - // here we import the data - vec_distribute.reinit(vec,true); - - for (unsigned int block=0; block::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.block(block).local_range().first - +vec.get_block_indices().block_start(block); - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.block(block).local_range().second - +vec.get_block_indices().block_start(block); - - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - { - // fill entry in line next_constraint.line by adding the - // different contributions - double new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (vec_distribute(it->entries[i].first) * - it->entries[i].second); - vec(it->line) = new_value; - } - vec.block(block).compress(::dealii::VectorOperation::insert); - } - } - - #endif - - #ifdef DEAL_II_WITH_PETSC - - // this is a specialization for a parallel (non-block) PETSc vector. The - // basic idea is to just work on the local range of the vector. But we need - // access to values that the local nodes are constrained to. - - template<> - void - ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const - { - Assert (sorted==true, ExcMatrixIsClosed()); - Assert (vec.has_ghost_elements() == false, - ExcMessage ("This operation can only be performed on vectors without ghost elements.")); - - typedef std::vector::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.local_range().first; - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.local_range().second; - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - // all indices we need to read from - IndexSet my_indices (vec.size()); - - const std::pair - local_range = vec.local_range(); - - my_indices.add_range (local_range.first, local_range.second); - - std::set individual_indices; - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - for (unsigned int i=0; ientries.size(); ++i) - if ((it->entries[i].first < local_range.first) - || - (it->entries[i].first >= local_range.second)) - individual_indices.insert (it->entries[i].first); - - my_indices.add_indices (individual_indices.begin(), - individual_indices.end()); - - IndexSet local_range_is (vec.size()); - local_range_is.add_range(local_range.first, local_range.second); - - - // create a vector and import those indices - PETScWrappers::MPI::Vector ghost_vec (vec.get_mpi_communicator(), - local_range_is, - my_indices); - ghost_vec = vec; - ghost_vec.update_ghost_values(); - - // finally do the distribution on own constraints - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - { - // fill entry in line next_constraint.line by adding the different - // contributions - PetscScalar new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (PetscScalar(ghost_vec(it->entries[i].first)) * - it->entries[i].second); - vec(it->line) = new_value; - } - - vec.compress (VectorOperation::insert); - } - - - template<> - void - ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const - { - Assert (sorted==true, ExcMatrixIsClosed()); - AssertThrow (false, ExcNotImplemented()); - } - - #endif - - - -bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const +bool ConstraintMatrix::is_identity_constrained (const size_type index) const { if (is_constrained(index) == false) return false; diff --cc deal.II/source/lac/constraint_matrix.inst.in index 72131b5396,1a69fff4a2..9880f7044a --- a/deal.II/source/lac/constraint_matrix.inst.in +++ b/deal.II/source/lac/constraint_matrix.inst.in @@@ -3,9 -3,8 +3,8 @@@ for (S: REAL_SCALARS; T : DEAL_II_VEC_T template void ConstraintMatrix::condense >(const T &, T &) const; template void ConstraintMatrix::condense >(T &vec) const; template void ConstraintMatrix::distribute_local_to_global > ( - const Vector&, const std::vector &, T &, const FullMatrix&) const; + const Vector&, const std::vector &, T &, const FullMatrix&) const; template void ConstraintMatrix::distribute >(const T &, T &) const; - template void ConstraintMatrix::distribute >(T &) const; template void ConstraintMatrix::set_zero >(T &) const; } @@@ -15,9 -14,8 +14,8 @@@ for (S: REAL_SCALARS; T : DEAL_II_VEC_T template void ConstraintMatrix::condense >(const parallel::distributed::T &, parallel::distributed::T &) const; template void ConstraintMatrix::condense >(parallel::distributed::T &vec) const; template void ConstraintMatrix::distribute_local_to_global > ( - const Vector&, const std::vector &, parallel::distributed::T &, const FullMatrix&) const; + const Vector&, const std::vector &, parallel::distributed::T &, const FullMatrix&) const; template void ConstraintMatrix::distribute >(const parallel::distributed::T &, parallel::distributed::T &) const; - template void ConstraintMatrix::distribute >(parallel::distributed::T &) const; template void ConstraintMatrix::set_zero >(parallel::distributed::T &) const; } @@@ -27,9 -25,8 +25,8 @@@ for (V: EXTERNAL_SEQUENTIAL_VECTORS template void ConstraintMatrix::condense(const V&, V&) const; template void ConstraintMatrix::condense(V&vec) const; template void ConstraintMatrix::distribute_local_to_global ( - const Vector&, const std::vector &, V&, const FullMatrix&) const; + const Vector&, const std::vector &, V&, const FullMatrix&) const; template void ConstraintMatrix::distribute(const V&, V&) const; - template void ConstraintMatrix::distribute(V&) const; template void ConstraintMatrix::set_zero(V&) const; } diff --cc deal.II/source/lac/petsc_parallel_block_vector.cc index fc11d85a10,49318b3840..45dbc8d1b1 --- a/deal.II/source/lac/petsc_parallel_block_vector.cc +++ b/deal.II/source/lac/petsc_parallel_block_vector.cc @@@ -24,6 -24,6 +24,8 @@@ namespace PETScWrapper { namespace MPI { ++ typedef types::global_dof_index size_type; ++ BlockVector & BlockVector::operator = (const PETScWrappers::BlockVector &v) { @@@ -35,6 -35,21 +37,21 @@@ return *this; } + + + void + BlockVector::reinit (const unsigned int num_blocks) + { - std::vector block_sizes (num_blocks, 0); ++ std::vector block_sizes (num_blocks, 0); + this->block_indices.reinit (block_sizes); + if (this->components.size() != this->n_blocks()) + this->components.resize(this->n_blocks()); + + for (unsigned int i=0; in_blocks(); ++i) + components[i].reinit (MPI_Comm(), 0, 0); + + collect_sizes(); + } } }