From: heister Date: Fri, 3 May 2013 00:58:30 +0000 (+0000) Subject: add reinit() taking a BlockCompressedSimpleSparsityPattern X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=b5d02c5b1a08d6450f2f96ae17166e8733dd64c8;p=dealii-svn.git add reinit() taking a BlockCompressedSimpleSparsityPattern git-svn-id: https://svn.dealii.org/branches/branch_unify_linear_algebra@29434 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h b/deal.II/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h index ac317b1fc0..1d0e1903b9 100644 --- a/deal.II/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/petsc_parallel_block_sparse_matrix.h @@ -19,6 +19,7 @@ # include # include +# include # include # include # include @@ -172,6 +173,31 @@ namespace PETScWrappers void reinit (const unsigned int n_block_rows, const unsigned int n_block_columns); + + /** + * Efficiently reinit the block matrix for a parallel computation. + * Only the BlockSparsityPattern of the Simple type can efficiently + * store large sparsity patterns in parallel, so this is the only + * supported argument. + * The IndexSets describe the locally owned range of DoFs for each block. + * Note that each IndexSet needs to contiguous. For a symmetric structure + * hand in the same vector for the first two arguments. + */ + void reinit(const std::vector &rows, + const std::vector &cols, + const BlockCompressedSimpleSparsityPattern &bcsp, + const MPI_Comm &com); + + + /** + * Same as above but only for a symmetric structure only. + */ + void reinit(const std::vector &sizes, + const BlockCompressedSimpleSparsityPattern &bcsp, + const MPI_Comm &com); + + + /** * Matrix-vector multiplication: * let $dst = M*src$ with $M$ diff --git a/deal.II/source/lac/petsc_parallel_block_sparse_matrix.cc b/deal.II/source/lac/petsc_parallel_block_sparse_matrix.cc index e56704cb8f..03c4d56283 100644 --- a/deal.II/source/lac/petsc_parallel_block_sparse_matrix.cc +++ b/deal.II/source/lac/petsc_parallel_block_sparse_matrix.cc @@ -66,6 +66,57 @@ namespace PETScWrappers } } + void + BlockSparseMatrix:: + reinit(const std::vector &rows, + const std::vector &cols, + const BlockCompressedSimpleSparsityPattern &bcsp, + const MPI_Comm &com) + { + Assert(rows.size() == bcsp.n_block_rows(), ExcMessage("invalid size")); + Assert(cols.size() == bcsp.n_block_cols(), ExcMessage("invalid size")); + + + clear(); + this->sub_objects.reinit (bcsp.n_block_rows(), + bcsp.n_block_cols()); + + std::vector row_sizes; + for (unsigned int r=0; rn_block_rows(); ++r) + row_sizes.push_back( bcsp.block(r,0).n_rows() ); + this->row_block_indices.reinit (row_sizes); + + std::vector col_sizes; + for (unsigned int c=0; cn_block_cols(); ++c) + col_sizes.push_back( bcsp.block(0,c).n_cols() ); + this->column_block_indices.reinit (col_sizes); + + for (unsigned int r=0; rn_block_rows(); ++r) + for (unsigned int c=0; cn_block_cols(); ++c) + { + Assert(rows[r].size() == bcsp.block(r,c).n_rows(), ExcMessage("invalid size")); + Assert(cols[r].size() == bcsp.block(r,c).n_cols(), ExcMessage("invalid size")); + + BlockType *p = new BlockType(); + p->reinit(rows[r], + cols[c], + bcsp.block(r,c), + com); + this->sub_objects[r][c] = p; + } + + collect_sizes(); + } + + void + BlockSparseMatrix:: + reinit(const std::vector &sizes, + const BlockCompressedSimpleSparsityPattern &bcsp, + const MPI_Comm &com) + { + reinit(sizes, sizes, bcsp, com); + } + void