From 060aa77e90c596f2c19749bf51f0cfd726f8715a Mon Sep 17 00:00:00 2001 From: heister Date: Thu, 18 Apr 2013 22:33:08 +0000 Subject: [PATCH] better reinit for PETSc matrix, now identical to Trilinos git-svn-id: https://svn.dealii.org/branches/branch_unify_linear_algebra@29336 0785d39b-7218-0410-832d-ea1e28bc413d --- .../lac/petsc_parallel_sparse_matrix.h | 20 ++ .../lac/petsc_parallel_sparse_matrix.cc | 186 ++++++++++++++++++ 2 files changed, 206 insertions(+) diff --git a/deal.II/include/deal.II/lac/petsc_parallel_sparse_matrix.h b/deal.II/include/deal.II/lac/petsc_parallel_sparse_matrix.h index 29dded8591..7eb8b9d087 100644 --- a/deal.II/include/deal.II/lac/petsc_parallel_sparse_matrix.h +++ b/deal.II/include/deal.II/lac/petsc_parallel_sparse_matrix.h @@ -368,6 +368,18 @@ namespace PETScWrappers const unsigned int this_process, const bool preset_nonzero_locations = true); + /** + * Create a matrix where the size() of the IndexSets determine the global + * number of rows and columns and the entries of the IndexSet give + * the rows and columns for the calling processor. + * Note that only contiguous IndexSets are supported. + */ + template + void reinit (const IndexSet & local_rows, + const IndexSet & local_columns, + const SparsityType &sparsity_pattern, + const MPI_Comm &communicator); + /** * Return a reference to the MPI * communicator object in use with @@ -472,6 +484,14 @@ namespace PETScWrappers const unsigned int this_process, const bool preset_nonzero_locations); + /** + * Same as previous functions. + */ + template + void do_reinit (const IndexSet & local_rows, + const IndexSet & local_columns, + const SparsityType &sparsity_pattern); + /** * To allow calling protected * prepare_add() and diff --git a/deal.II/source/lac/petsc_parallel_sparse_matrix.cc b/deal.II/source/lac/petsc_parallel_sparse_matrix.cc index b90774a75f..405bc0c2bf 100644 --- a/deal.II/source/lac/petsc_parallel_sparse_matrix.cc +++ b/deal.II/source/lac/petsc_parallel_sparse_matrix.cc @@ -178,7 +178,27 @@ namespace PETScWrappers preset_nonzero_locations); } + template + void + SparseMatrix:: + reinit (const IndexSet & local_rows, + const IndexSet & local_columns, + const SparsityType &sparsity_pattern, + const MPI_Comm &communicator) + { + this->communicator = communicator; + // get rid of old matrix and generate a + // new one +#if DEAL_II_PETSC_VERSION_LT(3,2,0) + const int ierr = MatDestroy (matrix); +#else + const int ierr = MatDestroy (&matrix); +#endif + AssertThrow (ierr == 0, ExcPETScError(ierr)); + + do_reinit (local_rows, local_columns, sparsity_pattern); + } void SparseMatrix::do_reinit (const unsigned int m, @@ -308,6 +328,159 @@ namespace PETScWrappers } + template + void + SparseMatrix:: + do_reinit (const IndexSet & local_rows, + const IndexSet & local_columns, + const SparsityType &sparsity_pattern) + { + Assert(sparsity_pattern.n_rows()==local_rows.size(), + ExcMessage("SparsityPattern and IndexSet have different number of rows")); + Assert(sparsity_pattern.n_cols()==local_columns.size(), + ExcMessage("SparsityPattern and IndexSet have different number of columns")); + Assert(local_rows.is_contiguous() && local_columns.is_contiguous(), + ExcMessage("PETSc only supports contiguous row/column ranges")); + + + + + // create the matrix. We do not set row length but set the + // correct SparsityPattern later. + int ierr; + + ierr = MatCreate(communicator,&matrix); + AssertThrow (ierr == 0, ExcPETScError(ierr)); + + ierr = MatSetSizes(matrix, + local_rows.n_elements(), + local_columns.n_elements(), + sparsity_pattern.n_rows(), + sparsity_pattern.n_cols()); + AssertThrow (ierr == 0, ExcPETScError(ierr)); + + ierr = MatSetType(matrix,MATMPIAIJ); + AssertThrow (ierr == 0, ExcPETScError(ierr)); + + + // next preset the exact given matrix + // entries with zeros. this doesn't avoid any + // memory allocations, but it at least + // avoids some searches later on. the + // key here is that we can use the + // matrix set routines that set an + // entire row at once, not a single + // entry at a time + // + // for the usefulness of this option + // read the documentation of this + // class. + //if (preset_nonzero_locations == true) + { + // MatMPIAIJSetPreallocationCSR + // can be used to allocate the sparsity + // pattern of a matrix + + const PetscInt local_row_start = local_rows.nth_index_in_set(0); + const PetscInt local_col_start = local_columns.nth_index_in_set(0); + const PetscInt + local_row_end = local_row_start + local_rows.n_elements(); + + + // first set up the column number + // array for the rows to be stored + // on the local processor. have one + // dummy entry at the end to make + // sure petsc doesn't read past the + // end + std::vector + + rowstart_in_window (local_row_end - local_row_start + 1, 0), + colnums_in_window; + { + unsigned int n_cols = 0; + for (PetscInt i=local_row_start; i void @@ -608,6 +781,13 @@ namespace PETScWrappers const unsigned int, const bool); + template void + SparseMatrix:: + reinit (const IndexSet &, + const IndexSet &, + const CompressedSimpleSparsityPattern &, + const MPI_Comm &); + template void SparseMatrix::do_reinit (const SparsityPattern &, const std::vector &, @@ -627,6 +807,12 @@ namespace PETScWrappers const unsigned int , const bool); + template void + SparseMatrix:: + do_reinit (const IndexSet &, + const IndexSet &, + const CompressedSimpleSparsityPattern &); + PetscScalar SparseMatrix::matrix_norm_square (const Vector &v) const -- 2.39.5