From 29e89fd131eea9baed4c930046255939ca3b565f Mon Sep 17 00:00:00 2001 From: wolf Date: Tue, 15 Jun 2004 14:31:18 +0000 Subject: [PATCH] Fix the functions that take a sparsity pattern so that they really do what they are supposed to, i.e. avoid any later memory allocation at all. git-svn-id: https://svn.dealii.org/trunk@9427 0785d39b-7218-0410-832d-ea1e28bc413d --- .../lac/petsc_parallel_sparse_matrix.h | 59 ++++++- .../source/petsc_parallel_sparse_matrix.cc | 105 +++++++++--- tests/bits/petsc_parallel_sparse_matrix_01.cc | 155 ++++++++++++++++++ .../petsc_parallel_sparse_matrix_01.output | 2 + 4 files changed, 288 insertions(+), 33 deletions(-) create mode 100644 tests/bits/petsc_parallel_sparse_matrix_01.cc create mode 100644 tests/results/i686-pc-linux-gnu+gcc3.2/bits/petsc_parallel_sparse_matrix_01.output diff --git a/deal.II/lac/include/lac/petsc_parallel_sparse_matrix.h b/deal.II/lac/include/lac/petsc_parallel_sparse_matrix.h index 469cb4373f..1bf16725b6 100644 --- a/deal.II/lac/include/lac/petsc_parallel_sparse_matrix.h +++ b/deal.II/lac/include/lac/petsc_parallel_sparse_matrix.h @@ -68,6 +68,38 @@ namespace PETScWrappers * does not reflect that only these columns are stored locally, but it * reflects the fact that these are the columns for which the elements of * incoming vectors are stored locally. + * + * To make things even more complicated, PETSc needs a very good estimate of + * the number of elements to be stored in each row to be efficient. Otherwise + * it spends most of the time with allocating small chunks of memory, a + * process that can slow down programs to a crawl if it happens to often. As + * if a good estimate of the number of entries per row isn't even, it even + * needs to split this as follows: for each row it owns, it needs an estimate + * for the number of elements in this row that fall into the columns that are + * set apart for this process (see above), and the number of elements that are + * in the rest of the columns. + * + * Since in general this information is not readily available, most of the + * initializing functions of this class assume that all of the number of + * elements you give as an argument to @p n_nonzero_per_row or by @p + * row_lengths fall into the columns "owned" by this process, and none into + * the other ones. This is a fair guess for most of the rows, since in a good + * domain partitioning, nodes only interact with nodes that are within the + * same subdomain. It does not hold for nodes on the interfaces of subdomain, + * however, and for the rows corresponding to these nodes, PETSc will have to + * allocate additional memory, a costly process. + * + * The only way to avoid this is to tell PETSc where the actual entries of the + * matrix will be. For this, there are constructors and reinit() functions of + * this class that take a CompressedSparsityPattern object containing all this + * information. While in the general case it is sufficient if the constructors + * and reinit() functions know the number of local rows and columns, the + * functions getting a sparsity pattern also need to know the number of local + * rows (@p local_rows_per_process) and columns (@p local_columns_per_process) + * for all other processes, in order to compute which parts of the matrix are + * which. Thus, it is not sufficient to just count the number of degrees of + * freedom that belong to a particular process, but you have to have the + * numbers for all processes available at all processes. * * @ingroup PETScWrappers * @author Wolfgang Bangerth, 2004 @@ -197,7 +229,8 @@ namespace PETScWrappers * provided @p communicator. * * For the meaning of the @p - * local_row and @p local_columns + * local_rows_per_process and @p + * local_columns_per_process * parameters, see the class * documentation. * @@ -247,8 +280,9 @@ namespace PETScWrappers */ SparseMatrix (const MPI_Comm &communicator, const CompressedSparsityPattern &sparsity_pattern, - const unsigned int local_rows, - const unsigned int local_columns, + const std::vector local_rows_per_process, + const std::vector local_columns_per_process, + const unsigned int this_process, const bool preset_nonzero_locations = false); /** @@ -353,8 +387,9 @@ namespace PETScWrappers */ void reinit (const MPI_Comm &communicator, const CompressedSparsityPattern &sparsity_pattern, - const unsigned int local_rows, - const unsigned int local_columns, + const std::vector local_rows_per_process, + const std::vector local_columns_per_process, + const unsigned int this_process, const bool preset_nonzero_locations = false); /** @@ -371,7 +406,14 @@ namespace PETScWrappers int, int, << "The number of local rows " << arg1 << " must be larger than the total number of rows " << arg2); - + /** + * Exception + */ + DeclException2 (ExcInvalidSizes, + int, int, + << "The sizes " << arg1 << " and " << arg2 + << " are supposed to match, but don't."); + private: /** @@ -409,8 +451,9 @@ namespace PETScWrappers * Same as previous functions. */ void do_reinit (const CompressedSparsityPattern &sparsity_pattern, - const unsigned int local_rows, - const unsigned int local_columns, + const std::vector local_rows_per_process, + const std::vector local_columns_per_process, + const unsigned int this_process, const bool preset_nonzero_locations); }; diff --git a/deal.II/lac/source/petsc_parallel_sparse_matrix.cc b/deal.II/lac/source/petsc_parallel_sparse_matrix.cc index cc313b3164..f7de1dac7b 100644 --- a/deal.II/lac/source/petsc_parallel_sparse_matrix.cc +++ b/deal.II/lac/source/petsc_parallel_sparse_matrix.cc @@ -72,13 +72,15 @@ namespace PETScWrappers SparseMatrix:: SparseMatrix (const MPI_Comm &communicator, const CompressedSparsityPattern &sparsity_pattern, - const unsigned int local_rows, - const unsigned int local_columns, + const std::vector local_rows_per_process, + const std::vector local_columns_per_process, + const unsigned int this_process, const bool preset_nonzero_locations) : communicator (communicator) { - do_reinit (sparsity_pattern, local_rows, local_columns, + do_reinit (sparsity_pattern, local_rows_per_process, + local_columns_per_process, this_process, preset_nonzero_locations); } @@ -140,8 +142,9 @@ namespace PETScWrappers SparseMatrix:: reinit (const MPI_Comm &communicator, const CompressedSparsityPattern &sparsity_pattern, - const unsigned int local_rows, - const unsigned int local_columns, + const std::vector local_rows_per_process, + const std::vector local_columns_per_process, + const unsigned int this_process, const bool preset_nonzero_locations) { this->communicator = communicator; @@ -151,7 +154,8 @@ namespace PETScWrappers const int ierr = MatDestroy (matrix); AssertThrow (ierr == 0, ExcPETScError(ierr)); - do_reinit (sparsity_pattern, local_rows, local_columns, + do_reinit (sparsity_pattern, local_rows_per_process, + local_columns_per_process, this_process, preset_nonzero_locations); } @@ -205,7 +209,7 @@ namespace PETScWrappers // For the case that // local_columns is smaller // than one of the row lengths - // MatCreateMPIAIJ throughs an + // MatCreateMPIAIJ throws an // error. In this case use a // PETScWrappers::SparseMatrix for (unsigned int i=0; i local_rows_per_process, + const std::vector local_columns_per_process, + const unsigned int this_process, + const bool preset_nonzero_locations) { - std::vector row_lengths (sparsity_pattern.n_rows()); - for (unsigned int i=0; i row_lengths_in_window (local_row_end - local_row_start); + std::vector row_lengths_out_of_window (local_row_end - local_row_start); + for (unsigned int row = local_row_start; row= local_col_start) && + (column < local_col_end)) + ++row_lengths_in_window[row-local_row_start]; + else + ++row_lengths_out_of_window[row-local_row_start]; + } + + + // create the matrix. completely + // confusingly, PETSc wants us to pass + // arrays for the local number of + // elements that starts with zero for + // the first _local_ row, i.e. it + // doesn't index into an array for + // _all_ rows. + const int ierr + = MatCreateMPIAIJ(communicator, + local_rows_per_process[this_process], + local_columns_per_process[this_process], + sparsity_pattern.n_rows(), + sparsity_pattern.n_cols(), + 0, &row_lengths_in_window[0], + 0, &row_lengths_out_of_window[0], + &matrix); + AssertThrow (ierr == 0, ExcPETScError(ierr)); + // next preset the exact given matrix // entries with zeros, if the user // requested so. this doesn't avoid any @@ -278,14 +333,14 @@ namespace PETScWrappers std::vector row_values; for (unsigned int i=0; i +#include +#include +#include +#include + + +unsigned int +get_n_mpi_processes () +{ + int n_jobs; + MPI_Comm_size (MPI_COMM_WORLD, &n_jobs); + + return n_jobs; +} + + + +unsigned int +get_this_mpi_process () +{ + int rank; + MPI_Comm_rank (MPI_COMM_WORLD, &rank); + + return rank; +} + + +void test () +{ + // create a parallel matrix where the first + // process has 10 rows, the second one 20, + // the third one 30, and so on + unsigned int N = 0; + std::vector local_rows_per_process (get_n_mpi_processes()); + std::vector start_row (get_n_mpi_processes()); + for (unsigned int i=0; i local_rows_per_process.back()) + csp.add (i,i-local_rows_per_process.back()); + } + + // here is a sparsity pattern for which no + // Inodes are used, but it doesn't allocate + // additional memory +// for (unsigned int bi=0; bi