From: heister Date: Fri, 3 May 2013 23:35:19 +0000 (+0000) Subject: write ConstraintMatrix::distribute(Petsc BlockVector) and add some reinit functions... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=6d23149b063a7a9c18c60b523ec66ad3910683a3;p=dealii-svn.git write ConstraintMatrix::distribute(Petsc BlockVector) and add some reinit functions for ghosted vectors git-svn-id: https://svn.dealii.org/branches/branch_unify_linear_algebra@29451 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/petsc_parallel_block_vector.h b/deal.II/include/deal.II/lac/petsc_parallel_block_vector.h index d4b6d00e38..ba37ef1af7 100644 --- a/deal.II/include/deal.II/lac/petsc_parallel_block_vector.h +++ b/deal.II/include/deal.II/lac/petsc_parallel_block_vector.h @@ -133,6 +133,15 @@ namespace PETScWrappers explicit BlockVector (const std::vector ¶llel_partitioning, const MPI_Comm &communicator = MPI_COMM_WORLD); + /** + * Same as above, but include ghost elements + */ + BlockVector (const std::vector ¶llel_partitioning, + const std::vector &ghost_indices, + const MPI_Comm &communicator); + + + /** * Destructor. Clears memory */ @@ -279,6 +288,12 @@ namespace PETScWrappers void reinit (const std::vector ¶llel_partitioning, const MPI_Comm &communicator); + /** + * Same as above but include ghost entries. + */ + void reinit (const std::vector ¶llel_partitioning, + const std::vector &ghost_entries, + const MPI_Comm &communicator); /** * Return a reference to the MPI * communicator object in use with @@ -386,6 +401,13 @@ namespace PETScWrappers reinit(parallel_partitioning, communicator); } + inline + BlockVector::BlockVector (const std::vector ¶llel_partitioning, + const std::vector &ghost_indices, + const MPI_Comm &communicator) + { + reinit(parallel_partitioning, ghost_indices, communicator); + } inline BlockVector & @@ -475,6 +497,25 @@ namespace PETScWrappers block(i).reinit(parallel_partitioning[i], communicator); } + inline + void + BlockVector::reinit (const std::vector ¶llel_partitioning, + const std::vector &ghost_entries, + const MPI_Comm &communicator) + { + std::vector sizes(parallel_partitioning.size()); + for (unsigned int i=0; iblock_indices.reinit(sizes); + if (this->components.size() != this->n_blocks()) + this->components.resize(this->n_blocks()); + + for (unsigned int i=0; in_blocks(); ++i) + block(i).reinit(parallel_partitioning[i], ghost_entries[i], communicator); + } + + inline const MPI_Comm & diff --git a/deal.II/source/lac/constraint_matrix.cc b/deal.II/source/lac/constraint_matrix.cc index 711c024189..af5e906b81 100644 --- a/deal.II/source/lac/constraint_matrix.cc +++ b/deal.II/source/lac/constraint_matrix.cc @@ -1768,10 +1768,88 @@ ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const template<> void -ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const +ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &vec) const { Assert (sorted==true, ExcMatrixIsClosed()); - AssertThrow (false, ExcNotImplemented()); + + std::vector is_ghost(vec.n_blocks()); + std::vector is_owned(vec.n_blocks()); + unsigned int startidx = 0; // this is the global dof index of the first dof in the current block + for (unsigned int block=0; block::const_iterator constraint_iterator; + ConstraintLine index_comparison; + index_comparison.line = vec.block(block).local_range().first + +vec.get_block_indices().block_start(block); + const constraint_iterator begin_my_constraints = + Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); + + index_comparison.line = vec.block(block).local_range().second + +vec.get_block_indices().block_start(block); + + const constraint_iterator end_my_constraints + = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); + + // Here we search all the indices that we need to have read-access to + // - the local nodes and all the nodes that the constraints indicate. + // No caching done yet. would need some more clever data structures + // for doing that. + const std::pair + local_range = vec.block(block).local_range(); + + is_owned[block].add_range (local_range.first, local_range.second); + + std::set individual_indices; + for (constraint_iterator it = begin_my_constraints; + it != end_my_constraints; ++it) + for (unsigned int i=0; ientries.size(); ++i) + if ((it->entries[i].first < local_range.first) + || + (it->entries[i].first >= local_range.second)) + individual_indices.insert (it->entries[i].first - startidx); + + is_ghost[block].add_indices (individual_indices.begin(), + individual_indices.end()); + + startidx+=vec.block(block).size(); + } + + + PETScWrappers::MPI::BlockVector ghost_vec; + ghost_vec.reinit(is_owned, is_ghost, vec.get_mpi_communicator()); + ghost_vec = vec; + + for (unsigned int block=0; block::const_iterator constraint_iterator; + ConstraintLine index_comparison; + index_comparison.line = vec.block(block).local_range().first + +vec.get_block_indices().block_start(block); + const constraint_iterator begin_my_constraints = + Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); + + index_comparison.line = vec.block(block).local_range().second + +vec.get_block_indices().block_start(block); + + const constraint_iterator end_my_constraints + = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); + + for (constraint_iterator it = begin_my_constraints; + it != end_my_constraints; ++it) + { + // fill entry in line next_constraint.line by adding the + // different contributions + double new_value = it->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (ghost_vec(it->entries[i].first) * + it->entries[i].second); + vec(it->line) = new_value; + } + vec.block(block).compress(::dealii::VectorOperation::insert); + } } #endif