<ol>
+<li> The class Utilities::MPI::MPI_InitFinalize now also initializes
+PETSc, when PETSc is installed.
+<br>
+(Timo Heister, 2012/11/02)
+
<li> step-6 now uses ConstraintMatrix::distribute_local_to_global()
instead of condense(), which is the preferred way to use a ConstraintMatrix
(and the only sensible way in parallel).
// Here is the only real difference:
// PETSc requires that we initialize it
// at the beginning of the program, and
- // un-initialize it at the end. So we
- // call <code>PetscInitialize</code> and
- // <code>PetscFinalize</code>. The original code
+ // un-initialize it at the end. The
+ // class MPI_InitFinalize takes care
+ // of that. The original code
// sits in between, enclosed in braces
// to make sure that the
// <code>elastic_problem</code> variable goes
// out of scope (and is destroyed)
- // before we call
+ // before PETSc is closed with
// <code>PetscFinalize</code>. (If we wouldn't
// use braces, the destructor of
// <code>elastic_problem</code> would run after
// destructor involves calls to PETSc
// functions, we would get strange
// error messages from PETSc.)
- PetscInitialize(&argc,&argv,0,0);
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
{
deallog.depth_console (0);
ElasticProblem<2> elastic_problem;
elastic_problem.run ();
}
-
- PetscFinalize();
}
catch (std::exception &exc)
{
using namespace dealii;
using namespace Step18;
- PetscInitialize(&argc,&argv,0,0);
-
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
{
deallog.depth_console (0);
TopLevel<3> elastic_problem;
elastic_problem.run ();
}
-
- PetscFinalize();
}
catch (std::exception &exc)
{
// step-6. Like in the other programs
// that use PETSc, we have to
// inialize and finalize PETSc, which
- // also initializes and finalizes the
- // MPI subsystem.
+ // is done using the helper object
+ // MPI_InitFinalize.
//
// Note how we enclose the use the
// use of the LaplaceProblem class in
// a pair of braces. This makes sure
// that all member variables of the
// object are destroyed by the time
- // we hit the
- // <code>PetscFinalize</code>
- // call. Not doing this will lead to
+ // we destroy the mpi_intialization
+ // object. Not doing this will lead to
// strange and hard to debug errors
// when <code>PetscFinalize</code>
// first deletes all PETSc vectors
using namespace dealii;
using namespace Step40;
- PetscInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
deallog.depth_console (0);
{
LaplaceProblem<2> laplace_problem_2d;
laplace_problem_2d.run ();
}
-
- PetscFinalize();
}
catch (std::exception &exc)
{
* program and to shut it down again at
* the end.
*
+ * If deal.II is configured with PETSc,
+ * the library will also be initialized
+ * in the beginning and destructed at the
+ * end automatically (internally by calling
+ * PetscInitialize() and PetscFinalize()).
+ *
* If a program uses MPI one would
* typically just create an object of
* this type at the beginning of
# endif
#endif
+#ifdef DEAL_II_USE_PETSC
+# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+# include <petscsys.h>
+# endif
+#endif
+
+
DEAL_II_NAMESPACE_OPEN
"in a program since it initializes the MPI system."));
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ // if we have PETSc, we will initialize it and let it handle MPI.
+ // Otherwise, we will do it.
+#ifdef DEAL_II_USE_PETSC
+ PetscInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
+#else
int MPI_has_been_started = 0;
MPI_Initialized(&MPI_has_been_started);
AssertThrow (MPI_has_been_started == 0,
mpi_err = MPI_Init (&argc, &argv);
AssertThrow (mpi_err == 0,
ExcMessage ("MPI could not be initialized."));
+#endif
#else
// make sure the compiler doesn't warn
// about these variables
::release_unused_memory ();
# endif
+#ifdef DEAL_II_USE_PETSC
+ PetscFinalize();
+#else
+
+
int mpi_err = 0;
int MPI_has_been_started = 0;
AssertThrow (mpi_err == 0,
ExcMessage ("An error occurred while calling MPI_Finalize()"));
+#endif
#endif
}