From: kronbichler Date: Thu, 1 Oct 2009 12:39:04 +0000 (+0000) Subject: Fix PETSc problem when matrices are initialized with sparsity pattern. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0ab6b43dbf9d2c8e250e4a54a0d1ae8fcc47f42c;p=dealii-svn.git Fix PETSc problem when matrices are initialized with sparsity pattern. git-svn-id: https://svn.dealii.org/trunk@19636 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/examples/step-17/doc/intro.dox b/deal.II/examples/step-17/doc/intro.dox index db168de9c3..481f56731f 100644 --- a/deal.II/examples/step-17/doc/intro.dox +++ b/deal.II/examples/step-17/doc/intro.dox @@ -10,13 +10,13 @@ provides that wrap around the linear algebra implementation of the PETSc library. And since PETSc allows to distribute matrices and vectors across several computers within an MPI network, the resulting code will even be able to solve the -problem in parallel. If you don't know what PETSc is, then this would be a +problem in %parallel. If you don't know what PETSc is, then this would be a good time to take a quick glimpse at their homepage. As a prerequisite of this program, you need to have PETSc installed, and if -you want to run in parallel on a cluster, you also need METIS to partition meshes. The installation of deal.II together with these two additional libraries is described in the std::cout by a // new stream pcout which is used - // in parallel computations for + // in %parallel computations for // generating output only on one of // the MPI processes. #include @@ -130,7 +130,7 @@ class ElasticProblem void output_results (const unsigned int cycle) const; // The first variable is basically only - // for convenience: in parallel program, + // for convenience: in %parallel program, // if each process outputs status // information, then there quickly is a // lot of clutter. Rather, we would want @@ -168,9 +168,9 @@ class ElasticProblem // member variables for the sparsity // pattern, the system matrix, right // hand, and solution vector. We change - // these declarations to use parallel + // these declarations to use %parallel // PETSc objects instead (note that the - // fact that we use the parallel versions + // fact that we use the %parallel versions // is denoted the fact that we use the // classes from the // PETScWrappers::MPI namespace; @@ -195,11 +195,11 @@ class ElasticProblem // then PETSc provides some dummy type // for MPI_Comm, so we do not have to // care here whether the job is really a - // parallel one: + // %parallel one: MPI_Comm mpi_communicator; // Then we have two variables that tell - // us where in the parallel world we + // us where in the %parallel world we // are. The first of the following // variables, n_mpi_processes tells // us how many MPI processes there exist @@ -335,7 +335,7 @@ void ElasticProblem::setup_system () { // Before we even start out setting up the // system, there is one thing to do for a - // parallel program: we need to assign + // %parallel program: we need to assign // cells to each of the processes. We do // this by splitting (partitioning) the // mesh cells into as many chunks @@ -389,7 +389,7 @@ void ElasticProblem::setup_system () // Then we initialize the system matrix, // solution, and right hand side // vectors. Since they all need to work in - // parallel, we have to pass them an MPI + // %parallel, we have to pass them an MPI // communication object, as well as their // global sizes (both dimensions are equal // to the number of degrees of freedom), @@ -440,7 +440,7 @@ void ElasticProblem::setup_system () // problem. There are some things worth // mentioning before we go into // detail. First, we will be assembling the - // system in parallel, i.e. each process will + // system in %parallel, i.e. each process will // be responsible for assembling on cells // that belong to this particular // processor. Note that the degrees of @@ -730,7 +730,7 @@ void ElasticProblem::assemble_system () // The fourth step is to solve the linear // system, with its distributed matrix and // vector objects. Fortunately, PETSc offers - // a variety of sequential and parallel + // a variety of sequential and %parallel // solvers, for which we have written // wrappers that have almost the same // interface as is used for the deal.II @@ -744,7 +744,7 @@ unsigned int ElasticProblem::solve () // which we would like to solve the linear // system. Next, an actual solver object // using PETSc's CG solver which also works - // with parallel (distributed) vectors and + // with %parallel (distributed) vectors and // matrices. And finally a preconditioner; // we choose to use a block Jacobi // preconditioner which works by computing @@ -1088,7 +1088,7 @@ void ElasticProblem::refine_grid () // vector. // // So in the first step, we need to set up - // a parallel vector. For simplicity, every + // a %parallel vector. For simplicity, every // process will own a chunk with as many // elements as this process owns cells, so // that the first chunk of elements is diff --git a/deal.II/examples/step-18/doc/intro.dox b/deal.II/examples/step-18/doc/intro.dox index 2e5e514061..7ced0d0123 100644 --- a/deal.II/examples/step-18/doc/intro.dox +++ b/deal.II/examples/step-18/doc/intro.dox @@ -7,7 +7,7 @@ that we have already started with @ref step_8 "step-8" and @ref step_17 "step-17 different directions: first, it solves the quasistatic but time dependent elasticity problem for large deformations with a Lagrangian mesh movement approach. Secondly, it shows some more techniques for solving such problems -using parallel processing with PETSc's linear algebra. In addition to this, we +using %parallel processing with PETSc's linear algebra. In addition to this, we show how to work around the main bottleneck of @ref step_17 "step-17", namely that we generated graphical output from only one process, and that this scaled very badly with larger numbers of processes and on large problems. Finally, a good @@ -230,7 +230,7 @@ if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is in general still a function that is not easy to describe. In particular, it is not a piecewise constant function, and on general meshes (with cells that are -not rectangles parallel to the coordinate axes) or with non-constant +not rectangles %parallel to the coordinate axes) or with non-constant stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it is a priori not clear how to store $\sigma^n$ in a computer program. @@ -306,7 +306,7 @@ Both stress update and rotation are implemented in the function

Parallel graphical output

-In the @ref step_17 "step-17" example program, the main bottleneck for parallel computations +In the @ref step_17 "step-17" example program, the main bottleneck for %parallel computations was that only the first processor generated output for the entire domain. Since generating graphical output is expensive, this did not scale well when large numbers of processors were involved. However, no viable ways around this @@ -509,7 +509,7 @@ for (unsigned int i=0; i // This is then simply C++ again: @@ -657,13 +657,13 @@ namespace QuasiStaticElasticity // and the solution vector. Since we // anticipate solving big problems, we // use the same types as in step-17, - // i.e. distributed parallel matrices + // i.e. distributed %parallel matrices // and vectors built on top of the // PETSc library. Conveniently, they // can also be used when running on // only a single machine, in which case // this machine happens to be the only - // one in our parallel universe. + // one in our %parallel universe. // // However, as a difference to step-17, // we do not store the solution vector @@ -717,7 +717,7 @@ namespace QuasiStaticElasticity unsigned int timestep_no; // Then a few variables that have to do - // with parallel processing: first, a + // with %parallel processing: first, a // variable denoting the MPI // communicator we use, and then two // numbers telling us how many @@ -1395,7 +1395,7 @@ namespace QuasiStaticElasticity // freedom whether they will be owned by // the processor we are on or another one // (in case this program is run in - // parallel via MPI). This of course is + // %parallel via MPI). This of course is // not optimal -- it limits the size of // the problems we can solve, since // storing the entire sparsity pattern @@ -1438,7 +1438,7 @@ namespace QuasiStaticElasticity // the solution vector is a local // one, unlike the right hand // side that is a distributed - // parallel one and therefore + // %parallel one and therefore // needs to know the MPI // communicator over which it is // supposed to transmit messages: @@ -1617,7 +1617,7 @@ namespace QuasiStaticElasticity // solution vector compatible // with the matrix and right hand // side (i.e. here a distributed - // parallel vector, rather than + // %parallel vector, rather than // the sequential vector we use // in this program) in order to // preset the entries of the @@ -2266,7 +2266,7 @@ namespace QuasiStaticElasticity // Then set up a global vector into which // we merge the local indicators from - // each of the parallel processes: + // each of the %parallel processes: const unsigned int n_local_cells = GridTools::count_cells_with_subdomain_association (triangulation, this_mpi_process); diff --git a/deal.II/lac/include/lac/petsc_matrix_base.h b/deal.II/lac/include/lac/petsc_matrix_base.h index 442929fd06..7291d6f84f 100644 --- a/deal.II/lac/include/lac/petsc_matrix_base.h +++ b/deal.II/lac/include/lac/petsc_matrix_base.h @@ -1682,7 +1682,7 @@ namespace PETScWrappers const int ierr = MatSetValues (matrix, 1, &petsc_i, n_columns, col_index_ptr, col_value_ptr, ADD_VALUES); - AssertThrow (ierr == 0, ExcPETScError(ierr)); + Assert (ierr == 0, ExcPETScError(ierr)); } diff --git a/deal.II/lac/include/lac/petsc_sparse_matrix.h b/deal.II/lac/include/lac/petsc_sparse_matrix.h index 2c0eabfeb2..7ecc3079e9 100644 --- a/deal.II/lac/include/lac/petsc_sparse_matrix.h +++ b/deal.II/lac/include/lac/petsc_sparse_matrix.h @@ -185,7 +185,7 @@ namespace PETScWrappers */ template SparseMatrix (const SparsityType &sparsity_pattern, - const bool preset_nonzero_locations = false); + const bool preset_nonzero_locations = true); /** * This operator assigns a scalar to @@ -281,7 +281,7 @@ namespace PETScWrappers */ template void reinit (const SparsityType &sparsity_pattern, - const bool preset_nonzero_locations = false); + const bool preset_nonzero_locations = true); /** * Return a reference to the MPI