// already be familiar friends:
#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/function.h>
-
+ #include <deal.II/base/timer.h>
+
+#include <deal.II/lac/generic_linear_algebra.h>
+
+#define USE_PETSC_LA
+
+namespace LA
+{
+#ifdef USE_PETSC_LA
+ using namespace dealii::LinearAlgebraPETSc;
+#else
+ using namespace dealii::LinearAlgebraTrilinos;
+#endif
+}
+
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/solver_cg.h>
void refine_grid ();
void output_results (const unsigned int cycle) const;
- MPI_Comm mpi_communicator;
+ MPI_Comm mpi_communicator;
- parallel::distributed::Triangulation<dim> triangulation;
+ parallel::distributed::Triangulation<dim> triangulation;
- DoFHandler<dim> dof_handler;
- FE_Q<dim> fe;
+ DoFHandler<dim> dof_handler;
+ FE_Q<dim> fe;
- IndexSet locally_owned_dofs;
- IndexSet locally_relevant_dofs;
+ IndexSet locally_owned_dofs;
+ IndexSet locally_relevant_dofs;
- ConstraintMatrix constraints;
+ ConstraintMatrix constraints;
- PETScWrappers::MPI::SparseMatrix system_matrix;
- PETScWrappers::MPI::Vector locally_relevant_solution;
- PETScWrappers::MPI::Vector system_rhs;
+ LA::MPI::SparseMatrix system_matrix;
+ LA::MPI::Vector locally_relevant_solution;
+ LA::MPI::Vector system_rhs;
- ConditionalOStream pcout;
+ ConditionalOStream pcout;
+ TimerOutput computing_timer;
};
template <int dim>
void LaplaceProblem<dim>::solve ()
{
- PETScWrappers::MPI::Vector
- completely_distributed_solution (mpi_communicator,
- dof_handler.n_dofs(),
- dof_handler.n_locally_owned_dofs());
+ TimerOutput::Scope t(computing_timer, "solve");
+ LA::MPI::Vector
+ completely_distributed_solution (locally_owned_dofs, mpi_communicator);
SolverControl solver_control (dof_handler.n_dofs(), 1e-12);