// simply map to sequential, local vectors and matrices if there is only a
// single process, i.e. if you are running on only one machine, and without
// MPI support):
-#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
// Then we also need interfaces for solvers and preconditioners that PETSc
// In step-8, this would have been the place where we would have declared
// the member variables for the sparsity pattern, the system matrix, right
- // hand, and solution vector. We change these declarations to use
- // parallel PETSc objects instead (note that the fact that we use the
- // parallel versions is denoted the fact that we use the classes from the
- // <code>PETScWrappers::MPI</code> namespace; sequential versions of these
- // classes are in the <code>PETScWrappers</code> namespace, i.e. without
- // the <code>MPI</code> part, be aware that these classes are deprecated).
- // Note also that we do not use a separate sparsity pattern, since PETSc
- // manages that as part of its matrix data structures.
+ // hand, and solution vector. We change these declarations to use parallel
+ // PETSc objects instead. Note that we do not use a separate sparsity
+ // pattern, since PETSc manages that as part of its matrix data structures.
PETScWrappers::MPI::SparseMatrix system_matrix;
PETScWrappers::MPI::Vector solution;
#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
-#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_parallel_vector.h>
#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/lac/petsc_solver.h>
+ // When using a TrilinosWrappers::MPI::Vector or a
+ // TrilinosWrappers::MPI::BlockVector, the Vector is initialized using an
+ // IndexSet. IndexSet is used not only to resize the
+ // TrilinosWrappers::MPI::Vector but it also associates an index in the
+ // TrilinosWrappers::MPI::Vector with a degree of freedom (see step-40 for
+ // a more detailed explanation). The function complete_index_set() creates
+ // an IndexSet where every valid index is part of the set. Note that this
+ // program can only be run sequentially and will throw an exception if used
+ // in parallel.
template <class PreconditionerA, class PreconditionerMp>
BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::
BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
:
stokes_matrix (&S),
m_inverse (&Mpinv),
- a_preconditioner (Apreconditioner)
- {
- // When using a TrilinosWrappers::MPI::Vector or a
- // TrilinosWrappers::MPI::BlockVector, the Vector is initialized using an
- // IndexSet. IndexSet is used not only to resize the
- // TrilinosWrappers::MPI::Vector but it also associates an index in the
- // TrilinosWrappers::MPI::Vector with a degree of freedom (see step-40 for
- // a more detailed explanation). This assocation is done by the add_range()
- // function.
- IndexSet tmp_index_set(stokes_matrix->block(1,1).m());
- tmp_index_set.add_range(0,stokes_matrix->block(1,1).m());
- tmp.reinit(tmp_index_set, MPI_COMM_WORLD);
- }
+ a_preconditioner (Apreconditioner),
+ tmp (complete_index_set(stokes_matrix->block(1,1).m()))
+ {}
// Next is the <code>vmult</code> function. We implement the action of
DoFHandler<dim> stokes_dof_handler;
ConstraintMatrix stokes_constraints;
- std::vector<IndexSet> stokes_block_sizes;
+ std::vector<IndexSet> stokes_partitioning;
TrilinosWrappers::BlockSparseMatrix stokes_matrix;
TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
// Trilinos matrices store the sparsity pattern internally, there is no
// need to keep the sparsity pattern around after the initialization of
// the matrix.
- stokes_block_sizes.clear();
- stokes_block_sizes.resize (2);
- stokes_block_sizes[0].set_size(n_u);
- stokes_block_sizes[1].set_size(n_p);
- stokes_block_sizes[0].add_range(0,n_u);
- stokes_block_sizes[1].add_range(0,n_p);
+ stokes_partitioning.resize (2);
+ stokes_partitioning[0] = complete_index_set (n_u);
+ stokes_partitioning[1] = complete_index_set (n_p);
{
stokes_matrix.clear ();
// and $\mathbf u^{n-2}$, as well as for the temperatures $T^{n}$,
// $T^{n-1}$ and $T^{n-2}$ (required for time stepping) and all the system
// right hand sides to their correct sizes and block structure:
- IndexSet temperature_partitioning (n_T);
- temperature_partitioning.add_range(0,n_T);
- stokes_solution.reinit (stokes_block_sizes, MPI_COMM_WORLD);
- old_stokes_solution.reinit (stokes_block_sizes, MPI_COMM_WORLD);
- stokes_rhs.reinit (stokes_block_sizes, MPI_COMM_WORLD);
+ IndexSet temperature_partitioning = complete_index_set (n_T);
+ stokes_solution.reinit (stokes_partitioning, MPI_COMM_WORLD);
+ old_stokes_solution.reinit (stokes_partitioning, MPI_COMM_WORLD);
+ stokes_rhs.reinit (stokes_partitioning, MPI_COMM_WORLD);
temperature_solution.reinit (temperature_partitioning, MPI_COMM_WORLD);
old_temperature_solution.reinit (temperature_partitioning, MPI_COMM_WORLD);
numbers::invalid_unsigned_int);
// This program can only be run in serial. Otherwise, throw an exception.
- int size;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- AssertThrow(size==1, ExcMessage("This program can only be run in serial,"
- " use mpirun -np 1 ./step-31"));
+ AssertThrow(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)==1,
+ ExcMessage("This program can only be run in serial, use mpirun -np 1 ./step-31"));
BoussinesqFlowProblem<2> flow_problem;
flow_problem.run ();
// - In a bit of naming confusion, you will notice below that some of the
// variables from namespace TrilinosWrappers are taken from namespace
// TrilinosWrappers::MPI (such as the right hand side vectors) whereas
- // others are not (such as the various matrices). For the matrices, we
- // happen to use the same class names for %parallel and sequential data
- // structures, i.e., all matrices will actually be considered %parallel
- // below. On the other hand, for vectors, only those from namespace
- // TrilinosWrappers::MPI are actually distributed (be aware that
- // TrilinosWrappers::Vector and TrilinosWrappers::BlockVector are
- // deprecated). In particular, we will frequently have to query velocities
+ // others are not (such as the various matrices). This is due to legacy
+ // reasons. We will frequently have to query velocities
// and temperatures at arbitrary quadrature points; consequently, rather
// than importing ghost information of a vector whenever we need access
// to degrees of freedom that are relevant locally but owned by another
// PETSc appears here because SLEPc depends on this library:
#include <deal.II/lac/petsc_sparse_matrix.h>
-#include <deal.II/lac/petsc_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
// And then we need to actually import the interfaces for solvers that SLEPc
// provides:
// is initialized using an IndexSet. IndexSet is used not only to resize the
// PETScWrappers::MPI::Vector but it also associates an index in the
// PETScWrappers::MPI::Vector with a degree of freedom (see step-40 for a
- // more detailed explanation). This assocation is done by the add_range()
- // function:
- IndexSet eigenfunction_index_set(dof_handler.n_dofs ());
- eigenfunction_index_set.add_range(0, dof_handler.n_dofs ());
+ // a more detailed explanation). The function complete_index_set() creates
+ // an IndexSet where every valid index is part of the set. Note that this
+ // program can only be run sequentially and will throw an exception if used
+ // in parallel.
+ IndexSet eigenfunction_partitioning = complete_index_set(dof_handler.n_dofs ());
eigenfunctions
.resize (parameters.get_integer ("Number of eigenvalues/eigenfunctions"));
for (unsigned int i=0; i<eigenfunctions.size (); ++i)
- eigenfunctions[i].reinit (eigenfunction_index_set, MPI_COMM_WORLD);
+ eigenfunctions[i].reinit (eigenfunction_partitioning, MPI_COMM_WORLD);
eigenvalues.resize (eigenfunctions.size ());
}
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
// This program can only be run in serial. Otherwise, throw an exception.
- int size;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- AssertThrow(size==1, ExcMessage("This program can only be run in serial,"
- " use mpirun -np 1 ./step-36"));
+ AssertThrow(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)==1,
+ ExcMessage("This program can only be run in serial, use mpirun -np 1 ./step-36"));
{
deallog.depth_console (0);
DoFHandler<dim> dof_handler;
ConstraintMatrix constraints;
IndexSet active_set;
- IndexSet solution_index_set;
TrilinosWrappers::SparseMatrix system_matrix;
TrilinosWrappers::SparseMatrix complete_system_matrix;
system_matrix.reinit (dsp);
complete_system_matrix.reinit (dsp);
- solution_index_set.set_size(dof_handler.n_dofs());
- solution_index_set.add_range(0, dof_handler.n_dofs());
- solution.reinit (solution_index_set, MPI_COMM_WORLD);
- system_rhs.reinit (solution_index_set, MPI_COMM_WORLD);
- complete_system_rhs.reinit (solution_index_set, MPI_COMM_WORLD);
- contact_force.reinit (solution_index_set, MPI_COMM_WORLD);
+ IndexSet solution_partitioning = complete_index_set(dof_handler.n_dofs());
+ solution.reinit (solution_partitioning, MPI_COMM_WORLD);
+ system_rhs.reinit (solution_partitioning, MPI_COMM_WORLD);
+ complete_system_rhs.reinit (solution_partitioning, MPI_COMM_WORLD);
+ contact_force.reinit (solution_partitioning, MPI_COMM_WORLD);
// The only other thing to do here is to compute the factors in the $B$
// matrix which is used to scale the residual. As discussed in the
TrilinosWrappers::SparseMatrix mass_matrix;
mass_matrix.reinit (dsp);
assemble_mass_matrix_diagonal (mass_matrix);
- diagonal_of_mass_matrix.reinit (solution_index_set);
+ diagonal_of_mass_matrix.reinit (solution_partitioning);
for (unsigned int j=0; j<solution.size (); j++)
diagonal_of_mass_matrix (j) = mass_matrix.diag_element (j);
}
const double penalty_parameter = 100.0;
- TrilinosWrappers::MPI::Vector lambda (solution_index_set);
+ TrilinosWrappers::MPI::Vector lambda (complete_index_set(dof_handler.n_dofs()));
complete_system_matrix.residual (lambda,
solution, complete_system_rhs);
contact_force.ratio (lambda, diagonal_of_mass_matrix);
numbers::invalid_unsigned_int);
// This program can only be run in serial. Otherwise, throw an exception.
- int size;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- AssertThrow(size==1, ExcMessage("This program can only be run in serial,"
- " use mpirun -np 1 ./step-41"));
+ AssertThrow(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)==1,
+ ExcMessage("This program can only be run in serial, use mpirun -np 1 ./step-41"));
ObstacleProblem<2> obstacle_problem;
obstacle_problem.run ();
:
darcy_matrix (&S),
m_inverse (&Mpinv),
- a_preconditioner (Apreconditioner)
- {
- IndexSet tmp_index_set(darcy_matrix->block(1,1).m());
- tmp_index_set.add_range(0,darcy_matrix->block(1,1).m());
- tmp.reinit(tmp_index_set, MPI_COMM_WORLD);
- }
+ a_preconditioner (Apreconditioner),
+ tmp (complete_index_set(darcy_matrix->block(1,1).m()))
+ {}
template <class PreconditionerA, class PreconditionerMp>
FESystem<dim> darcy_fe;
DoFHandler<dim> darcy_dof_handler;
ConstraintMatrix darcy_constraints;
- std::vector<IndexSet> darcy_index_set;
ConstraintMatrix darcy_preconditioner_constraints;
FE_Q<dim> saturation_fe;
DoFHandler<dim> saturation_dof_handler;
ConstraintMatrix saturation_constraints;
- IndexSet saturation_index_set;
TrilinosWrappers::SparseMatrix saturation_matrix;
saturation_matrix.reinit (dsp);
}
- darcy_index_set.clear();
- darcy_index_set.resize(2);
- darcy_index_set[0].set_size(n_u);
- darcy_index_set[1].set_size(n_p);
- darcy_index_set[0].add_range(0,n_u);
- darcy_index_set[1].add_range(0,n_p);
- darcy_solution.reinit (darcy_index_set, MPI_COMM_WORLD);
+ std::vector<IndexSet> darcy_partitioning(2);
+ darcy_partitioning[0] = complete_index_set (n_u);
+ darcy_partitioning[1] = complete_index_set (n_p);
+ darcy_solution.reinit (darcy_partitioning, MPI_COMM_WORLD);
darcy_solution.collect_sizes ();
- last_computed_darcy_solution.reinit (darcy_index_set, MPI_COMM_WORLD);
+ last_computed_darcy_solution.reinit (darcy_partitioning, MPI_COMM_WORLD);
last_computed_darcy_solution.collect_sizes ();
- second_last_computed_darcy_solution.reinit (darcy_index_set, MPI_COMM_WORLD);
+ second_last_computed_darcy_solution.reinit (darcy_partitioning, MPI_COMM_WORLD);
second_last_computed_darcy_solution.collect_sizes ();
- darcy_rhs.reinit (darcy_index_set, MPI_COMM_WORLD);
+ darcy_rhs.reinit (darcy_partitioning, MPI_COMM_WORLD);
darcy_rhs.collect_sizes ();
- saturation_index_set.clear();
- saturation_index_set.set_size(n_s);
- saturation_index_set.add_range(0,n_s);
- saturation_solution.reinit (saturation_index_set, MPI_COMM_WORLD);
- old_saturation_solution.reinit (saturation_index_set, MPI_COMM_WORLD);
- old_old_saturation_solution.reinit (saturation_index_set, MPI_COMM_WORLD);
+ IndexSet saturation_partitioning = complete_index_set(n_s);
+ saturation_solution.reinit (saturation_partitioning, MPI_COMM_WORLD);
+ old_saturation_solution.reinit (saturation_partitioning, MPI_COMM_WORLD);
+ old_old_saturation_solution.reinit (saturation_partitioning, MPI_COMM_WORLD);
- saturation_matching_last_computed_darcy_solution.reinit (saturation_index_set,
+ saturation_matching_last_computed_darcy_solution.reinit (saturation_partitioning,
MPI_COMM_WORLD);
- saturation_rhs.reinit (saturation_index_set, MPI_COMM_WORLD);
+ saturation_rhs.reinit (saturation_partitioning, MPI_COMM_WORLD);
}
numbers::invalid_unsigned_int);
// This program can only be run in serial. Otherwise, throw an exception.
- int size;
- MPI_Comm_size(MPI_COMM_WORLD,&size);
- AssertThrow(size==1, ExcMessage("This program can only be run in serial,"
- " use mpirun -np 1 ./step-43"));
+ AssertThrow(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)==1,
+ ExcMessage("This program can only be run in serial, use mpirun -np 1 ./step-43"));
TwoPhaseFlowProblem<2> two_phase_flow_problem(1);
two_phase_flow_problem.run ();
* interface, this class handles the actual allocation of vectors and
* provides functions that are specific to the underlying vector type.
*
- * This class is deprecated use PETScWrappers::MPI::BlockVector.
+ * This class is deprecated, use PETScWrappers::MPI::BlockVector.
*
* @ingroup Vectors
*
* of different sizes.
*/
explicit BlockVector (const unsigned int num_blocks = 0,
- const size_type block_size = 0) DEAL_II_DEPRECATED;
+ const size_type block_size = 0);
/**
* Copy-Constructor. Dimension set to that of V, all components are copied
* from V
*/
- BlockVector (const BlockVector &V) DEAL_II_DEPRECATED;
+ BlockVector (const BlockVector &V);
/**
* Copy-constructor: copy the values from a PETSc wrapper parallel block
* It is not sufficient if only one processor tries to copy the elements
* from the other processors over to its own process space.
*/
- explicit BlockVector (const MPI::BlockVector &v) DEAL_II_DEPRECATED;
+ explicit BlockVector (const MPI::BlockVector &v);
/**
* Constructor. Set the number of blocks to <tt>n.size()</tt> and
* initialize each block with <tt>n[i]</tt> zero elements.
*/
- BlockVector (const std::vector<size_type> &n) DEAL_II_DEPRECATED;
+ BlockVector (const std::vector<size_type> &n);
/**
* Constructor. Set the number of blocks to <tt>n.size()</tt>. Initialize
template <typename InputIterator>
BlockVector (const std::vector<size_type> &n,
const InputIterator first,
- const InputIterator end) DEAL_II_DEPRECATED;
+ const InputIterator end);
/**
* Destructor. Clears memory
*/
DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
///@}
- };
+ } DEAL_II_DEPRECATED;
/*@}*/
* virtual functions). Only the functions creating a vector of specific type
* differ, and are implemented in this particular class.
*
- * This class is deprecated use PETScWrappers::MPI::Vector instead.
+ * This class is deprecated, use PETScWrappers::MPI::Vector instead.
*
* @ingroup Vectors
* @author Wolfgang Bangerth, 2004
/**
* Default constructor. Initialize the vector as empty.
*/
- Vector () DEAL_II_DEPRECATED;
+ Vector ();
/**
* Constructor. Set dimension to @p n and initialize all elements with
* <tt>v=Vector@<number@>(0);</tt>, i.e. the vector is replaced by one of
* length zero.
*/
- explicit Vector (const size_type n) DEAL_II_DEPRECATED;
+ explicit Vector (const size_type n);
/**
* Copy-constructor from deal.II vectors. Sets the dimension to that of
* the given vector, and copies all elements.
*/
template <typename Number>
- explicit Vector (const dealii::Vector<Number> &v) DEAL_II_DEPRECATED;
+ explicit Vector (const dealii::Vector<Number> &v);
/**
* Construct it from an existing PETSc Vector of type Vec. Note: this does
* the vector is not used twice at the same time or destroyed while in
* use. This class does not destroy the PETSc object. Handle with care!
*/
- explicit Vector (const Vec &v) DEAL_II_DEPRECATED;
+ explicit Vector (const Vec &v);
/**
* Copy-constructor the values from a PETSc wrapper vector class.
*/
- Vector (const Vector &v) DEAL_II_DEPRECATED;
+ Vector (const Vector &v);
/**
* Copy-constructor: copy the values from a PETSc wrapper parallel vector
* It is not sufficient if only one processor tries to copy the elements
* from the other processors over to its own process space.
*/
- explicit Vector (const MPI::Vector &v) DEAL_II_DEPRECATED;
+ explicit Vector (const MPI::Vector &v);
/**
* Copy the given vector. Resize the present vector if necessary.
* vector. @p n denotes the total size of the vector to be created.
*/
void create_vector (const size_type n);
- };
+ } DEAL_II_DEPRECATED;
/*@}*/
* block vector class do only work in case the program is run on only one
* processor, since the Trilinos matrices are inherently parallel.
*
- * This class is deprecated use TrilinosWrappers::MPI::BlockVector instead.
+ * This class is deprecated, use TrilinosWrappers::MPI::BlockVector instead.
*
* @ingroup Vectors
* @ingroup TrilinosWrappers @see
/**
* Default constructor. Generate an empty vector without any blocks.
*/
- BlockVector () DEAL_II_DEPRECATED;
+ BlockVector ();
/**
* Constructor. Generate a block vector with as many blocks as there are
* entries in Input_Maps. For this non-distributed vector, the %parallel
* partitioning is not used, just the global size of the partitioner.
*/
- explicit BlockVector (const std::vector<Epetra_Map> &partitioner) DEAL_II_DEPRECATED;
+ explicit BlockVector (const std::vector<Epetra_Map> &partitioner);
/**
* Constructor. Generate a block vector with as many blocks as there are
* partitioning is not used, just the global size of the partitioner.
*/
explicit BlockVector (const std::vector<IndexSet> &partitioner,
- const MPI_Comm &communicator = MPI_COMM_WORLD) DEAL_II_DEPRECATED;
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Copy-Constructor. Set all the properties of the non-%parallel vector to
* those of the given %parallel vector and import the elements.
*/
- BlockVector (const MPI::BlockVector &V) DEAL_II_DEPRECATED;
+ BlockVector (const MPI::BlockVector &V);
/**
* Copy-Constructor. Set all the properties of the vector to those of the
* given input vector and copy the elements.
*/
- BlockVector (const BlockVector &V) DEAL_II_DEPRECATED;
+ BlockVector (const BlockVector &V);
/**
* Creates a block vector consisting of <tt>num_blocks</tt> components,
* but there is no content in the individual components and the user has
* to fill appropriate data using a reinit of the blocks.
*/
- explicit BlockVector (const size_type num_blocks) DEAL_II_DEPRECATED;
+ explicit BlockVector (const size_type num_blocks);
/**
* Constructor. Set the number of blocks to <tt>n.size()</tt> and
*
* References BlockVector.reinit().
*/
- explicit BlockVector (const std::vector<size_type> &N) DEAL_II_DEPRECATED;
+ explicit BlockVector (const std::vector<size_type> &N);
/**
* Constructor. Set the number of blocks to <tt>n.size()</tt>. Initialize
template <typename InputIterator>
BlockVector (const std::vector<size_type> &n,
const InputIterator first,
- const InputIterator end) DEAL_II_DEPRECATED;
+ const InputIterator end);
/**
* Destructor. Clears memory
<< "local_size = global_size is a necessary condition, but"
<< arg1 << " != " << arg2 << " was given!");
- };
+ } DEAL_II_DEPRECATED;
* in order to be able to access all elements in the vector or to apply
* certain deal.II functions.
*
- * This class is deprecated use TrilinosWrappers::MPI::Vector instead.
+ * This class is deprecated, use TrilinosWrappers::MPI::Vector instead.
*
* @ingroup TrilinosWrappers
* @ingroup Vectors
* function <tt>reinit()</tt> will have to give the vector the correct
* size.
*/
- Vector () DEAL_II_DEPRECATED;
+ Vector ();
/**
* This constructor takes as input the number of elements in the vector.
*/
- explicit Vector (const size_type n) DEAL_II_DEPRECATED;
+ explicit Vector (const size_type n);
/**
* This constructor takes as input the number of elements in the vector.
* ignored, the only thing that matters is the size of the index space
* described by this argument.
*/
- explicit Vector (const Epetra_Map &partitioning) DEAL_II_DEPRECATED;
+ explicit Vector (const Epetra_Map &partitioning);
/**
* This constructor takes as input the number of elements in the vector.
* size of the index space described by this argument.
*/
explicit Vector (const IndexSet &partitioning,
- const MPI_Comm &communicator = MPI_COMM_WORLD) DEAL_II_DEPRECATED;
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* This constructor takes a (possibly parallel) Trilinos Vector and
* generates a localized version of the whole content on each processor.
*/
- explicit Vector (const VectorBase &V) DEAL_II_DEPRECATED;
+ explicit Vector (const VectorBase &V);
/**
* Copy-constructor from deal.II vectors. Sets the dimension to that of
* the given vector, and copies all elements.
*/
template <typename Number>
- explicit Vector (const dealii::Vector<Number> &v) DEAL_II_DEPRECATED;
+ explicit Vector (const dealii::Vector<Number> &v);
/**
* Reinit function that resizes the vector to the size specified by
* thus an empty function.
*/
void update_ghost_values () const;
- };
+ } DEAL_II_DEPRECATED;