]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Fix PETSc problem when matrices are initialized with sparsity pattern.
authorMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Thu, 1 Oct 2009 12:39:04 +0000 (12:39 +0000)
committerMartin Kronbichler <kronbichler@lnm.mw.tum.de>
Thu, 1 Oct 2009 12:39:04 +0000 (12:39 +0000)
git-svn-id: https://svn.dealii.org/trunk@19636 0785d39b-7218-0410-832d-ea1e28bc413d

deal.II/examples/step-17/doc/intro.dox
deal.II/examples/step-17/step-17.cc
deal.II/examples/step-18/doc/intro.dox
deal.II/examples/step-18/step-18.cc
deal.II/lac/include/lac/petsc_matrix_base.h
deal.II/lac/include/lac/petsc_sparse_matrix.h

index db168de9c3c91b06a811fffcc324079b481daf23..481f56731fcf661a4132388569e402b89165f7ad 100644 (file)
@@ -10,13 +10,13 @@ provides that wrap around the linear algebra implementation of the <a
 href="http://www.mcs.anl.gov/petsc/" target="_top">PETSc</a> library. And
 since PETSc allows to distribute matrices and vectors across several computers
 within an MPI network, the resulting code will even be able to solve the
-problem in parallel. If you don't know what PETSc is, then this would be a
+problem in %parallel. If you don't know what PETSc is, then this would be a
 good time to take a quick glimpse at their homepage.
 
 
 
 As a prerequisite of this program, you need to have PETSc installed, and if
-you want to run in parallel on a cluster, you also need <a
+you want to run in %parallel on a cluster, you also need <a
 href="http://www-users.cs.umn.edu/~karypis/metis/index.html"
 target="_top">METIS</a> to partition meshes. The installation of deal.II
 together with these two additional libraries is described in the <a
@@ -45,7 +45,7 @@ interchangable as possible.
 
 While the sequential PETSc wrappers classes do not have any advantage over
 their deal.II counterparts, the main point of using PETSc is that it can run
-in parallel. We will make use of this by partitioning the domain into as many
+in %parallel. We will make use of this by partitioning the domain into as many
 blocks (``subdomains'') as there are processes in the MPI network. At the same
 time, PETSc provides dummy MPI stubs that allow to run the same program on a
 single machine if so desired, without any changes.
index a6e68dd501cdc5b0d4f14337b004c163604a0582..ccfb18058e3538ac322932343698c96b8511a9f8 100644 (file)
@@ -43,7 +43,7 @@
                                  // step-8. First, we replace the
                                  // standard output <code>std::cout</code> by a
                                  // new stream <code>pcout</code> which is used
-                                 // in parallel computations for
+                                 // in %parallel computations for
                                  // generating output only on one of
                                  // the MPI processes.
 #include <base/conditional_ostream.h>
@@ -130,7 +130,7 @@ class ElasticProblem
     void output_results (const unsigned int cycle) const;
 
                                      // The first variable is basically only
-                                     // for convenience: in parallel program,
+                                     // for convenience: in %parallel program,
                                      // if each process outputs status
                                      // information, then there quickly is a
                                      // lot of clutter. Rather, we would want
@@ -168,9 +168,9 @@ class ElasticProblem
                                      // member variables for the sparsity
                                      // pattern, the system matrix, right
                                      // hand, and solution vector. We change
-                                     // these declarations to use parallel
+                                     // these declarations to use %parallel
                                      // PETSc objects instead (note that the
-                                     // fact that we use the parallel versions
+                                     // fact that we use the %parallel versions
                                      // is denoted the fact that we use the
                                      // classes from the
                                      // <code>PETScWrappers::MPI</code> namespace;
@@ -195,11 +195,11 @@ class ElasticProblem
                                      // then PETSc provides some dummy type
                                      // for <code>MPI_Comm</code>, so we do not have to
                                      // care here whether the job is really a
-                                     // parallel one:
+                                     // %parallel one:
     MPI_Comm mpi_communicator;
     
                                      // Then we have two variables that tell
-                                     // us where in the parallel world we
+                                     // us where in the %parallel world we
                                      // are. The first of the following
                                      // variables, <code>n_mpi_processes</code> tells
                                      // us how many MPI processes there exist
@@ -335,7 +335,7 @@ void ElasticProblem<dim>::setup_system ()
 {
                                    // Before we even start out setting up the
                                    // system, there is one thing to do for a
-                                   // parallel program: we need to assign
+                                   // %parallel program: we need to assign
                                    // cells to each of the processes. We do
                                    // this by splitting (<code>partitioning</code>) the
                                    // mesh cells into as many chunks
@@ -389,7 +389,7 @@ void ElasticProblem<dim>::setup_system ()
                                    // Then we initialize the system matrix,
                                    // solution, and right hand side
                                    // vectors. Since they all need to work in
-                                   // parallel, we have to pass them an MPI
+                                   // %parallel, we have to pass them an MPI
                                    // communication object, as well as their
                                    // global sizes (both dimensions are equal
                                    // to the number of degrees of freedom),
@@ -440,7 +440,7 @@ void ElasticProblem<dim>::setup_system ()
                                  // problem. There are some things worth
                                  // mentioning before we go into
                                  // detail. First, we will be assembling the
-                                 // system in parallel, i.e. each process will
+                                 // system in %parallel, i.e. each process will
                                  // be responsible for assembling on cells
                                  // that belong to this particular
                                  // processor. Note that the degrees of
@@ -730,7 +730,7 @@ void ElasticProblem<dim>::assemble_system ()
                                  // The fourth step is to solve the linear
                                  // system, with its distributed matrix and
                                  // vector objects. Fortunately, PETSc offers
-                                 // a variety of sequential and parallel
+                                 // a variety of sequential and %parallel
                                  // solvers, for which we have written
                                  // wrappers that have almost the same
                                  // interface as is used for the deal.II
@@ -744,7 +744,7 @@ unsigned int ElasticProblem<dim>::solve ()
                                    // which we would like to solve the linear
                                    // system. Next, an actual solver object
                                    // using PETSc's CG solver which also works
-                                   // with parallel (distributed) vectors and
+                                   // with %parallel (distributed) vectors and
                                    // matrices. And finally a preconditioner;
                                    // we choose to use a block Jacobi
                                    // preconditioner which works by computing
@@ -1088,7 +1088,7 @@ void ElasticProblem<dim>::refine_grid ()
                                    // vector.
                                    //
                                    // So in the first step, we need to set up
-                                   // a parallel vector. For simplicity, every
+                                   // a %parallel vector. For simplicity, every
                                    // process will own a chunk with as many
                                    // elements as this process owns cells, so
                                    // that the first chunk of elements is
index 2e5e5140619033ca2630d2c304953ecc45f9de46..7ced0d0123a1e2254a0b23752a9671c28e7205e0 100644 (file)
@@ -7,7 +7,7 @@ that we have already started with @ref step_8 "step-8" and @ref step_17 "step-17
 different directions: first, it solves the quasistatic but time dependent
 elasticity problem for large deformations with a Lagrangian mesh movement
 approach. Secondly, it shows some more techniques for solving such problems
-using parallel processing with PETSc's linear algebra. In addition to this, we
+using %parallel processing with PETSc's linear algebra. In addition to this, we
 show how to work around the main bottleneck of @ref step_17 "step-17", namely that we
 generated graphical output from only one process, and that this scaled very
 badly with larger numbers of processes and on large problems. Finally, a good
@@ -230,7 +230,7 @@ if we compute the incremental updates $\Delta\mathbf{u}^n$ using lowest-order
 finite elements, then its symmetric gradient $\varepsilon(\Delta\mathbf{u}^n)$ is
 in general still a function that is not easy to describe. In particular, it is
 not a piecewise constant function, and on general meshes (with cells that are
-not rectangles parallel to the coordinate axes) or with non-constant
+not rectangles %parallel to the coordinate axes) or with non-constant
 stress-strain tensors $C$ it is not even a bi- or trilinear function. Thus, it
 is a priori not clear how to store $\sigma^n$ in a computer program.
 
@@ -306,7 +306,7 @@ Both stress update and rotation are implemented in the function
 
 <h3>Parallel graphical output</h3>
 
-In the @ref step_17 "step-17" example program, the main bottleneck for parallel computations
+In the @ref step_17 "step-17" example program, the main bottleneck for %parallel computations
 was that only the first processor generated output for the entire domain.
 Since generating graphical output is expensive, this did not scale well when
 large numbers of processors were involved. However, no viable ways around this
@@ -509,7 +509,7 @@ for (unsigned int i=0; i<dofs_per_cell; ++i)
   Unlike the previous one, this function is not really interesting, since it
   does what similar functions have done in all previous tutorial programs --
   solving the linear system using the CG method, using an incomplete LU
-  decomposition as a preconditioner (in the parallel case, it uses an ILU of
+  decomposition as a preconditioner (in the %parallel case, it uses an ILU of
   each processor's block separately). It is virtually unchanged
   from @ref step_17 "step-17".
 
index 0aca5141eefc4a6a9584b5384e9d063a06138e51..58409b789d861415281cf9957c450bf6c1a36092 100644 (file)
@@ -58,7 +58,7 @@
                                  // iterators looping over all cells. We will
                                  // use this when selecting only those cells
                                  // for output that are owned by the present
-                                 // process in a parallel program:
+                                 // process in a %parallel program:
 #include <grid/filtered_iterator.h>
 
                                  // This is then simply C++ again:
@@ -657,13 +657,13 @@ namespace QuasiStaticElasticity
                                        // and the solution vector. Since we
                                        // anticipate solving big problems, we
                                        // use the same types as in step-17,
-                                       // i.e. distributed parallel matrices
+                                       // i.e. distributed %parallel matrices
                                        // and vectors built on top of the
                                        // PETSc library. Conveniently, they
                                        // can also be used when running on
                                        // only a single machine, in which case
                                        // this machine happens to be the only
-                                       // one in our parallel universe.
+                                       // one in our %parallel universe.
                                        //
                                        // However, as a difference to step-17,
                                        // we do not store the solution vector
@@ -717,7 +717,7 @@ namespace QuasiStaticElasticity
       unsigned int timestep_no;
 
                                        // Then a few variables that have to do
-                                       // with parallel processing: first, a
+                                       // with %parallel processing: first, a
                                        // variable denoting the MPI
                                        // communicator we use, and then two
                                        // numbers telling us how many
@@ -1395,7 +1395,7 @@ namespace QuasiStaticElasticity
                                      // freedom whether they will be owned by
                                      // the processor we are on or another one
                                      // (in case this program is run in
-                                     // parallel via MPI). This of course is
+                                     // %parallel via MPI). This of course is
                                      // not optimal -- it limits the size of
                                      // the problems we can solve, since
                                      // storing the entire sparsity pattern
@@ -1438,7 +1438,7 @@ namespace QuasiStaticElasticity
                                      // the solution vector is a local
                                      // one, unlike the right hand
                                      // side that is a distributed
-                                     // parallel one and therefore
+                                     // %parallel one and therefore
                                      // needs to know the MPI
                                      // communicator over which it is
                                      // supposed to transmit messages:
@@ -1617,7 +1617,7 @@ namespace QuasiStaticElasticity
                                     // solution vector compatible
                                     // with the matrix and right hand
                                     // side (i.e. here a distributed
-                                    // parallel vector, rather than
+                                    // %parallel vector, rather than
                                     // the sequential vector we use
                                     // in this program) in order to
                                     // preset the entries of the
@@ -2266,7 +2266,7 @@ namespace QuasiStaticElasticity
 
                                      // Then set up a global vector into which
                                      // we merge the local indicators from
-                                     // each of the parallel processes:
+                                     // each of the %parallel processes:
     const unsigned int n_local_cells
       = GridTools::count_cells_with_subdomain_association (triangulation,
                                                           this_mpi_process);
index 442929fd06b2b1322a1bed88bc559fe055f53769..7291d6f84febe49e2680e898f5643e1a5734df09 100644 (file)
@@ -1682,7 +1682,7 @@ namespace PETScWrappers
     const int ierr
       = MatSetValues (matrix, 1, &petsc_i, n_columns, col_index_ptr,
                      col_value_ptr, ADD_VALUES);
-    AssertThrow (ierr == 0, ExcPETScError(ierr));
+    Assert (ierr == 0, ExcPETScError(ierr));
   }
 
 
index 2c0eabfeb27fb7ad3d3bdf1bf71caa207aabc4ec..7ecc3079e96ca0962daf40d1f426b9b19d015c76 100644 (file)
@@ -185,7 +185,7 @@ namespace PETScWrappers
                                         */
       template <typename SparsityType>
       SparseMatrix (const SparsityType &sparsity_pattern,
-                    const bool          preset_nonzero_locations = false);
+                    const bool          preset_nonzero_locations = true);
 
                                        /**
                                         * This operator assigns a scalar to
@@ -281,7 +281,7 @@ namespace PETScWrappers
                                         */
       template <typename SparsityType>
       void reinit (const SparsityType &sparsity_pattern,
-                   const bool          preset_nonzero_locations = false);
+                   const bool          preset_nonzero_locations = true);
 
                                        /**
                                         * Return a reference to the MPI

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.