* does not reflect that only these columns are stored locally, but it
* reflects the fact that these are the columns for which the elements of
* incoming vectors are stored locally.
+ *
+ * To make things even more complicated, PETSc needs a very good estimate of
+ * the number of elements to be stored in each row to be efficient. Otherwise
+ * it spends most of the time with allocating small chunks of memory, a
+ * process that can slow down programs to a crawl if it happens to often. As
+ * if a good estimate of the number of entries per row isn't even, it even
+ * needs to split this as follows: for each row it owns, it needs an estimate
+ * for the number of elements in this row that fall into the columns that are
+ * set apart for this process (see above), and the number of elements that are
+ * in the rest of the columns.
+ *
+ * Since in general this information is not readily available, most of the
+ * initializing functions of this class assume that all of the number of
+ * elements you give as an argument to @p n_nonzero_per_row or by @p
+ * row_lengths fall into the columns "owned" by this process, and none into
+ * the other ones. This is a fair guess for most of the rows, since in a good
+ * domain partitioning, nodes only interact with nodes that are within the
+ * same subdomain. It does not hold for nodes on the interfaces of subdomain,
+ * however, and for the rows corresponding to these nodes, PETSc will have to
+ * allocate additional memory, a costly process.
+ *
+ * The only way to avoid this is to tell PETSc where the actual entries of the
+ * matrix will be. For this, there are constructors and reinit() functions of
+ * this class that take a CompressedSparsityPattern object containing all this
+ * information. While in the general case it is sufficient if the constructors
+ * and reinit() functions know the number of local rows and columns, the
+ * functions getting a sparsity pattern also need to know the number of local
+ * rows (@p local_rows_per_process) and columns (@p local_columns_per_process)
+ * for all other processes, in order to compute which parts of the matrix are
+ * which. Thus, it is not sufficient to just count the number of degrees of
+ * freedom that belong to a particular process, but you have to have the
+ * numbers for all processes available at all processes.
*
* @ingroup PETScWrappers
* @author Wolfgang Bangerth, 2004
* provided @p communicator.
*
* For the meaning of the @p
- * local_row and @p local_columns
+ * local_rows_per_process and @p
+ * local_columns_per_process
* parameters, see the class
* documentation.
*
*/
SparseMatrix (const MPI_Comm &communicator,
const CompressedSparsityPattern &sparsity_pattern,
- const unsigned int local_rows,
- const unsigned int local_columns,
+ const std::vector<unsigned int> local_rows_per_process,
+ const std::vector<unsigned int> local_columns_per_process,
+ const unsigned int this_process,
const bool preset_nonzero_locations = false);
/**
*/
void reinit (const MPI_Comm &communicator,
const CompressedSparsityPattern &sparsity_pattern,
- const unsigned int local_rows,
- const unsigned int local_columns,
+ const std::vector<unsigned int> local_rows_per_process,
+ const std::vector<unsigned int> local_columns_per_process,
+ const unsigned int this_process,
const bool preset_nonzero_locations = false);
/**
int, int,
<< "The number of local rows " << arg1
<< " must be larger than the total number of rows " << arg2);
-
+ /**
+ * Exception
+ */
+ DeclException2 (ExcInvalidSizes,
+ int, int,
+ << "The sizes " << arg1 << " and " << arg2
+ << " are supposed to match, but don't.");
+
private:
/**
* Same as previous functions.
*/
void do_reinit (const CompressedSparsityPattern &sparsity_pattern,
- const unsigned int local_rows,
- const unsigned int local_columns,
+ const std::vector<unsigned int> local_rows_per_process,
+ const std::vector<unsigned int> local_columns_per_process,
+ const unsigned int this_process,
const bool preset_nonzero_locations);
};
SparseMatrix::
SparseMatrix (const MPI_Comm &communicator,
const CompressedSparsityPattern &sparsity_pattern,
- const unsigned int local_rows,
- const unsigned int local_columns,
+ const std::vector<unsigned int> local_rows_per_process,
+ const std::vector<unsigned int> local_columns_per_process,
+ const unsigned int this_process,
const bool preset_nonzero_locations)
:
communicator (communicator)
{
- do_reinit (sparsity_pattern, local_rows, local_columns,
+ do_reinit (sparsity_pattern, local_rows_per_process,
+ local_columns_per_process, this_process,
preset_nonzero_locations);
}
SparseMatrix::
reinit (const MPI_Comm &communicator,
const CompressedSparsityPattern &sparsity_pattern,
- const unsigned int local_rows,
- const unsigned int local_columns,
+ const std::vector<unsigned int> local_rows_per_process,
+ const std::vector<unsigned int> local_columns_per_process,
+ const unsigned int this_process,
const bool preset_nonzero_locations)
{
this->communicator = communicator;
const int ierr = MatDestroy (matrix);
AssertThrow (ierr == 0, ExcPETScError(ierr));
- do_reinit (sparsity_pattern, local_rows, local_columns,
+ do_reinit (sparsity_pattern, local_rows_per_process,
+ local_columns_per_process, this_process,
preset_nonzero_locations);
}
// For the case that
// local_columns is smaller
// than one of the row lengths
- // MatCreateMPIAIJ throughs an
+ // MatCreateMPIAIJ throws an
// error. In this case use a
// PETScWrappers::SparseMatrix
for (unsigned int i=0; i<row_lengths.size(); ++i)
void
- SparseMatrix::do_reinit (const CompressedSparsityPattern &sparsity_pattern,
- const unsigned int local_rows,
- const unsigned int local_columns,
- const bool preset_nonzero_locations)
+ SparseMatrix::
+ do_reinit (const CompressedSparsityPattern &sparsity_pattern,
+ const std::vector<unsigned int> local_rows_per_process,
+ const std::vector<unsigned int> local_columns_per_process,
+ const unsigned int this_process,
+ const bool preset_nonzero_locations)
{
- std::vector<unsigned int> row_lengths (sparsity_pattern.n_rows());
- for (unsigned int i=0; i<sparsity_pattern.n_rows(); ++i)
- row_lengths[i] = sparsity_pattern.row_length (i);
-
- do_reinit (sparsity_pattern.n_rows(),
- sparsity_pattern.n_cols(),
- local_rows,
- local_columns,
- row_lengths, false);
-
+ Assert (local_rows_per_process.size() == local_columns_per_process.size(),
+ ExcInvalidSizes (local_rows_per_process.size(),
+ local_columns_per_process.size()));
+ Assert (this_process < local_rows_per_process.size(),
+ ExcInternalError());
+ // for each row that we own locally, we
+ // have to count how many of the
+ // entries in the sparsity pattern lie
+ // in the column area we have locally,
+ // and how many arent. for this, we
+ // first have to know which areas are
+ // ours
+ unsigned int local_row_start = 0;
+ unsigned int local_col_start = 0;
+ for (unsigned int p=0; p<this_process; ++p)
+ {
+ local_row_start += local_rows_per_process[p];
+ local_col_start += local_columns_per_process[p];
+ }
+ const unsigned int
+ local_row_end = local_row_start + local_rows_per_process[this_process];
+ const unsigned int
+ local_col_end = local_col_start + local_columns_per_process[this_process];
+
+ // then count the elements in- and
+ // out-of-window for the rows we own
+ std::vector<int> row_lengths_in_window (local_row_end - local_row_start);
+ std::vector<int> row_lengths_out_of_window (local_row_end - local_row_start);
+ for (unsigned int row = local_row_start; row<local_row_end; ++row)
+ for (unsigned int c=0; c<sparsity_pattern.row_length(row); ++c)
+ {
+ const unsigned int column = sparsity_pattern.column_number(row,c);
+
+ if ((column >= local_col_start) &&
+ (column < local_col_end))
+ ++row_lengths_in_window[row-local_row_start];
+ else
+ ++row_lengths_out_of_window[row-local_row_start];
+ }
+
+
+ // create the matrix. completely
+ // confusingly, PETSc wants us to pass
+ // arrays for the local number of
+ // elements that starts with zero for
+ // the first _local_ row, i.e. it
+ // doesn't index into an array for
+ // _all_ rows.
+ const int ierr
+ = MatCreateMPIAIJ(communicator,
+ local_rows_per_process[this_process],
+ local_columns_per_process[this_process],
+ sparsity_pattern.n_rows(),
+ sparsity_pattern.n_cols(),
+ 0, &row_lengths_in_window[0],
+ 0, &row_lengths_out_of_window[0],
+ &matrix);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
// next preset the exact given matrix
// entries with zeros, if the user
// requested so. this doesn't avoid any
std::vector<PetscScalar> row_values;
for (unsigned int i=0; i<sparsity_pattern.n_rows(); ++i)
{
- row_entries.resize (row_lengths[i]);
- row_values.resize (row_lengths[i], 0.0);
- for (unsigned int j=0; j<row_lengths[i]; ++j)
+ row_entries.resize (sparsity_pattern.row_length(i));
+ row_values.resize (sparsity_pattern.row_length(i), 0.0);
+ for (unsigned int j=0; j<sparsity_pattern.row_length(i); ++j)
row_entries[j] = sparsity_pattern.column_number (i,j);
const int int_row = i;
MatSetValues (matrix, 1, &int_row,
- row_lengths[i], &row_entries[0],
+ sparsity_pattern.row_length(i), &row_entries[0],
&row_values[0], INSERT_VALUES);
}
compress ();
--- /dev/null
+//---------------------------- petsc_parallel_sparse_matrix_01.cc ---------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2004 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------- petsc_parallel_sparse_matrix_01.cc ---------------------------
+
+
+// PETScWrappers::MPI::SparseMatrix::reinit(CompressedSparsityPattern) should
+// create a matrix that, when filled with elements that match the sparsity
+// pattern, doesn't require any more memory allocation any more. This is
+// tricky to get right, though, and took a while until it worked.
+//
+// the test itself can only check this when run in parallel, but we can add it
+// anyway to the testsuite. in order to check that the library actually does
+// what it is supposed to do, run this program with more than one MPI process
+// and with the option -log_info. All the output should say that no additional
+// malloc calls have been performed
+
+#include "../tests.h"
+#include <lac/petsc_parallel_sparse_matrix.h>
+#include <lac/compressed_sparsity_pattern.h>
+#include <fstream>
+#include <iostream>
+#include <cstdlib>
+
+
+unsigned int
+get_n_mpi_processes ()
+{
+ int n_jobs;
+ MPI_Comm_size (MPI_COMM_WORLD, &n_jobs);
+
+ return n_jobs;
+}
+
+
+
+unsigned int
+get_this_mpi_process ()
+{
+ int rank;
+ MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+
+ return rank;
+}
+
+
+void test ()
+{
+ // create a parallel matrix where the first
+ // process has 10 rows, the second one 20,
+ // the third one 30, and so on
+ unsigned int N = 0;
+ std::vector<unsigned int> local_rows_per_process (get_n_mpi_processes());
+ std::vector<unsigned int> start_row (get_n_mpi_processes());
+ for (unsigned int i=0; i<get_n_mpi_processes(); ++i)
+ {
+ N += (i+1)*10;
+ local_rows_per_process[i] = (i+1)*10;
+ start_row[i] += i*10;
+ }
+
+ // here is a sparsity pattern for which we
+ // used to allocate additional memory for 2
+ // processes. note that only one of the
+ // four blocks uses Inodes
+ CompressedSparsityPattern csp (N,N);
+ for (unsigned int i=0; i<N; ++i)
+ for (unsigned int j=0; j<N; ++j)
+ {
+ csp.add (i,i);
+ if (i+local_rows_per_process.back() < N)
+ csp.add (i,i+local_rows_per_process.back());
+ if (i > local_rows_per_process.back())
+ csp.add (i,i-local_rows_per_process.back());
+ }
+
+ // here is a sparsity pattern for which no
+ // Inodes are used, but it doesn't allocate
+ // additional memory
+// for (unsigned int bi=0; bi<get_n_mpi_processes(); ++bi)
+// for (unsigned int bj=0; bj<get_n_mpi_processes(); ++bj)
+// for (unsigned int i=0; i<local_rows_per_process[bi]; ++i)
+// for (unsigned int k=0; k<6; ++k)
+// csp.add (start_row[bi] + i,
+// start_row[bj] + (i+2*k) % local_rows_per_process[bj]);
+
+
+
+ // now create a matrix with this sparsity
+ // pattern
+ PETScWrappers::MPI::SparseMatrix m;
+ m.reinit (MPI_COMM_WORLD, csp, local_rows_per_process,
+ local_rows_per_process, get_this_mpi_process());
+
+ // no write into the exact same matrix
+ // entries as have been created by the
+ // sparsity pattern above
+ for (unsigned int i=0; i<N; ++i)
+ for (unsigned int j=0; j<csp.row_length(i); ++j)
+ m.add (i, csp.column_number(i,j), 1.);
+
+ std::cout << "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << std::endl;
+ m.compress ();
+ std::cout << "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" << std::endl;
+
+ deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ std::ofstream logfile("petsc_parallel_sparse_matrix_01.output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+
+ try
+ {
+ PetscInitialize(&argc,&argv,0,0);
+ test ();
+ PetscFinalize();
+ }
+ catch (std::exception &exc)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << std::endl
+ << exc.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+
+ return 1;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ return 1;
+ };
+}
--- /dev/null
+JobId Mon Jun 14 22:02:32 2004
+DEAL::OK