From: Bruno Turcksin Date: Fri, 1 May 2015 17:43:32 +0000 (-0500) Subject: Deprecate the serial TrilinosWrappers and PETScWrappers classes instead of the constr... X-Git-Tag: v8.3.0-rc1~196^2~3 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=d249e17c649f32e0d5e448006da7c5bf022c7a49;p=dealii.git Deprecate the serial TrilinosWrappers and PETScWrappers classes instead of the constructor. Use complete_set_index in the tutorials. --- diff --git a/examples/step-17/step-17.cc b/examples/step-17/step-17.cc index 469b00bca7..5298d9dd8a 100644 --- a/examples/step-17/step-17.cc +++ b/examples/step-17/step-17.cc @@ -65,7 +65,6 @@ // simply map to sequential, local vectors and matrices if there is only a // single process, i.e. if you are running on only one machine, and without // MPI support): -#include #include #include // Then we also need interfaces for solvers and preconditioners that PETSc @@ -143,14 +142,9 @@ namespace Step17 // In step-8, this would have been the place where we would have declared // the member variables for the sparsity pattern, the system matrix, right - // hand, and solution vector. We change these declarations to use - // parallel PETSc objects instead (note that the fact that we use the - // parallel versions is denoted the fact that we use the classes from the - // PETScWrappers::MPI namespace; sequential versions of these - // classes are in the PETScWrappers namespace, i.e. without - // the MPI part, be aware that these classes are deprecated). - // Note also that we do not use a separate sparsity pattern, since PETSc - // manages that as part of its matrix data structures. + // hand, and solution vector. We change these declarations to use parallel + // PETSc objects instead. Note that we do not use a separate sparsity + // pattern, since PETSc manages that as part of its matrix data structures. PETScWrappers::MPI::SparseMatrix system_matrix; PETScWrappers::MPI::Vector solution; diff --git a/examples/step-18/step-18.cc b/examples/step-18/step-18.cc index ea1906eac4..38ae6af245 100644 --- a/examples/step-18/step-18.cc +++ b/examples/step-18/step-18.cc @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/examples/step-31/step-31.cc b/examples/step-31/step-31.cc index 653d8c31d0..be44f33ef5 100644 --- a/examples/step-31/step-31.cc +++ b/examples/step-31/step-31.cc @@ -393,6 +393,15 @@ namespace Step31 + // When using a TrilinosWrappers::MPI::Vector or a + // TrilinosWrappers::MPI::BlockVector, the Vector is initialized using an + // IndexSet. IndexSet is used not only to resize the + // TrilinosWrappers::MPI::Vector but it also associates an index in the + // TrilinosWrappers::MPI::Vector with a degree of freedom (see step-40 for + // a more detailed explanation). The function complete_index_set() creates + // an IndexSet where every valid index is part of the set. Note that this + // program can only be run sequentially and will throw an exception if used + // in parallel. template BlockSchurPreconditioner:: BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S, @@ -402,19 +411,9 @@ namespace Step31 : stokes_matrix (&S), m_inverse (&Mpinv), - a_preconditioner (Apreconditioner) - { - // When using a TrilinosWrappers::MPI::Vector or a - // TrilinosWrappers::MPI::BlockVector, the Vector is initialized using an - // IndexSet. IndexSet is used not only to resize the - // TrilinosWrappers::MPI::Vector but it also associates an index in the - // TrilinosWrappers::MPI::Vector with a degree of freedom (see step-40 for - // a more detailed explanation). This assocation is done by the add_range() - // function. - IndexSet tmp_index_set(stokes_matrix->block(1,1).m()); - tmp_index_set.add_range(0,stokes_matrix->block(1,1).m()); - tmp.reinit(tmp_index_set, MPI_COMM_WORLD); - } + a_preconditioner (Apreconditioner), + tmp (complete_index_set(stokes_matrix->block(1,1).m())) + {} // Next is the vmult function. We implement the action of @@ -513,7 +512,7 @@ namespace Step31 DoFHandler stokes_dof_handler; ConstraintMatrix stokes_constraints; - std::vector stokes_block_sizes; + std::vector stokes_partitioning; TrilinosWrappers::BlockSparseMatrix stokes_matrix; TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix; @@ -963,12 +962,9 @@ namespace Step31 // Trilinos matrices store the sparsity pattern internally, there is no // need to keep the sparsity pattern around after the initialization of // the matrix. - stokes_block_sizes.clear(); - stokes_block_sizes.resize (2); - stokes_block_sizes[0].set_size(n_u); - stokes_block_sizes[1].set_size(n_p); - stokes_block_sizes[0].add_range(0,n_u); - stokes_block_sizes[1].add_range(0,n_p); + stokes_partitioning.resize (2); + stokes_partitioning[0] = complete_index_set (n_u); + stokes_partitioning[1] = complete_index_set (n_p); { stokes_matrix.clear (); @@ -1054,11 +1050,10 @@ namespace Step31 // and $\mathbf u^{n-2}$, as well as for the temperatures $T^{n}$, // $T^{n-1}$ and $T^{n-2}$ (required for time stepping) and all the system // right hand sides to their correct sizes and block structure: - IndexSet temperature_partitioning (n_T); - temperature_partitioning.add_range(0,n_T); - stokes_solution.reinit (stokes_block_sizes, MPI_COMM_WORLD); - old_stokes_solution.reinit (stokes_block_sizes, MPI_COMM_WORLD); - stokes_rhs.reinit (stokes_block_sizes, MPI_COMM_WORLD); + IndexSet temperature_partitioning = complete_index_set (n_T); + stokes_solution.reinit (stokes_partitioning, MPI_COMM_WORLD); + old_stokes_solution.reinit (stokes_partitioning, MPI_COMM_WORLD); + stokes_rhs.reinit (stokes_partitioning, MPI_COMM_WORLD); temperature_solution.reinit (temperature_partitioning, MPI_COMM_WORLD); old_temperature_solution.reinit (temperature_partitioning, MPI_COMM_WORLD); @@ -2219,10 +2214,8 @@ int main (int argc, char *argv[]) numbers::invalid_unsigned_int); // This program can only be run in serial. Otherwise, throw an exception. - int size; - MPI_Comm_size(MPI_COMM_WORLD,&size); - AssertThrow(size==1, ExcMessage("This program can only be run in serial," - " use mpirun -np 1 ./step-31")); + AssertThrow(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)==1, + ExcMessage("This program can only be run in serial, use mpirun -np 1 ./step-31")); BoussinesqFlowProblem<2> flow_problem; flow_problem.run (); diff --git a/examples/step-32/step-32.cc b/examples/step-32/step-32.cc index dab38039d2..baea0d2bc0 100644 --- a/examples/step-32/step-32.cc +++ b/examples/step-32/step-32.cc @@ -861,13 +861,8 @@ namespace Step32 // - In a bit of naming confusion, you will notice below that some of the // variables from namespace TrilinosWrappers are taken from namespace // TrilinosWrappers::MPI (such as the right hand side vectors) whereas - // others are not (such as the various matrices). For the matrices, we - // happen to use the same class names for %parallel and sequential data - // structures, i.e., all matrices will actually be considered %parallel - // below. On the other hand, for vectors, only those from namespace - // TrilinosWrappers::MPI are actually distributed (be aware that - // TrilinosWrappers::Vector and TrilinosWrappers::BlockVector are - // deprecated). In particular, we will frequently have to query velocities + // others are not (such as the various matrices). This is due to legacy + // reasons. We will frequently have to query velocities // and temperatures at arbitrary quadrature points; consequently, rather // than importing ghost information of a vector whenever we need access // to degrees of freedom that are relevant locally but owned by another diff --git a/examples/step-36/step-36.cc b/examples/step-36/step-36.cc index 6b5a539508..438ae79a62 100644 --- a/examples/step-36/step-36.cc +++ b/examples/step-36/step-36.cc @@ -49,7 +49,7 @@ // PETSc appears here because SLEPc depends on this library: #include -#include +#include // And then we need to actually import the interfaces for solvers that SLEPc // provides: @@ -201,14 +201,15 @@ namespace Step36 // is initialized using an IndexSet. IndexSet is used not only to resize the // PETScWrappers::MPI::Vector but it also associates an index in the // PETScWrappers::MPI::Vector with a degree of freedom (see step-40 for a - // more detailed explanation). This assocation is done by the add_range() - // function: - IndexSet eigenfunction_index_set(dof_handler.n_dofs ()); - eigenfunction_index_set.add_range(0, dof_handler.n_dofs ()); + // a more detailed explanation). The function complete_index_set() creates + // an IndexSet where every valid index is part of the set. Note that this + // program can only be run sequentially and will throw an exception if used + // in parallel. + IndexSet eigenfunction_partitioning = complete_index_set(dof_handler.n_dofs ()); eigenfunctions .resize (parameters.get_integer ("Number of eigenvalues/eigenfunctions")); for (unsigned int i=0; i dof_handler; ConstraintMatrix constraints; IndexSet active_set; - IndexSet solution_index_set; TrilinosWrappers::SparseMatrix system_matrix; TrilinosWrappers::SparseMatrix complete_system_matrix; @@ -256,12 +255,11 @@ namespace Step41 system_matrix.reinit (dsp); complete_system_matrix.reinit (dsp); - solution_index_set.set_size(dof_handler.n_dofs()); - solution_index_set.add_range(0, dof_handler.n_dofs()); - solution.reinit (solution_index_set, MPI_COMM_WORLD); - system_rhs.reinit (solution_index_set, MPI_COMM_WORLD); - complete_system_rhs.reinit (solution_index_set, MPI_COMM_WORLD); - contact_force.reinit (solution_index_set, MPI_COMM_WORLD); + IndexSet solution_partitioning = complete_index_set(dof_handler.n_dofs()); + solution.reinit (solution_partitioning, MPI_COMM_WORLD); + system_rhs.reinit (solution_partitioning, MPI_COMM_WORLD); + complete_system_rhs.reinit (solution_partitioning, MPI_COMM_WORLD); + contact_force.reinit (solution_partitioning, MPI_COMM_WORLD); // The only other thing to do here is to compute the factors in the $B$ // matrix which is used to scale the residual. As discussed in the @@ -271,7 +269,7 @@ namespace Step41 TrilinosWrappers::SparseMatrix mass_matrix; mass_matrix.reinit (dsp); assemble_mass_matrix_diagonal (mass_matrix); - diagonal_of_mass_matrix.reinit (solution_index_set); + diagonal_of_mass_matrix.reinit (solution_partitioning); for (unsigned int j=0; j obstacle_problem; obstacle_problem.run (); diff --git a/examples/step-43/step-43.cc b/examples/step-43/step-43.cc index dbea7124eb..d17e8db85b 100644 --- a/examples/step-43/step-43.cc +++ b/examples/step-43/step-43.cc @@ -445,12 +445,9 @@ namespace Step43 : darcy_matrix (&S), m_inverse (&Mpinv), - a_preconditioner (Apreconditioner) - { - IndexSet tmp_index_set(darcy_matrix->block(1,1).m()); - tmp_index_set.add_range(0,darcy_matrix->block(1,1).m()); - tmp.reinit(tmp_index_set, MPI_COMM_WORLD); - } + a_preconditioner (Apreconditioner), + tmp (complete_index_set(darcy_matrix->block(1,1).m())) + {} template @@ -550,7 +547,6 @@ namespace Step43 FESystem darcy_fe; DoFHandler darcy_dof_handler; ConstraintMatrix darcy_constraints; - std::vector darcy_index_set; ConstraintMatrix darcy_preconditioner_constraints; @@ -568,7 +564,6 @@ namespace Step43 FE_Q saturation_fe; DoFHandler saturation_dof_handler; ConstraintMatrix saturation_constraints; - IndexSet saturation_index_set; TrilinosWrappers::SparseMatrix saturation_matrix; @@ -808,35 +803,30 @@ namespace Step43 saturation_matrix.reinit (dsp); } - darcy_index_set.clear(); - darcy_index_set.resize(2); - darcy_index_set[0].set_size(n_u); - darcy_index_set[1].set_size(n_p); - darcy_index_set[0].add_range(0,n_u); - darcy_index_set[1].add_range(0,n_p); - darcy_solution.reinit (darcy_index_set, MPI_COMM_WORLD); + std::vector darcy_partitioning(2); + darcy_partitioning[0] = complete_index_set (n_u); + darcy_partitioning[1] = complete_index_set (n_p); + darcy_solution.reinit (darcy_partitioning, MPI_COMM_WORLD); darcy_solution.collect_sizes (); - last_computed_darcy_solution.reinit (darcy_index_set, MPI_COMM_WORLD); + last_computed_darcy_solution.reinit (darcy_partitioning, MPI_COMM_WORLD); last_computed_darcy_solution.collect_sizes (); - second_last_computed_darcy_solution.reinit (darcy_index_set, MPI_COMM_WORLD); + second_last_computed_darcy_solution.reinit (darcy_partitioning, MPI_COMM_WORLD); second_last_computed_darcy_solution.collect_sizes (); - darcy_rhs.reinit (darcy_index_set, MPI_COMM_WORLD); + darcy_rhs.reinit (darcy_partitioning, MPI_COMM_WORLD); darcy_rhs.collect_sizes (); - saturation_index_set.clear(); - saturation_index_set.set_size(n_s); - saturation_index_set.add_range(0,n_s); - saturation_solution.reinit (saturation_index_set, MPI_COMM_WORLD); - old_saturation_solution.reinit (saturation_index_set, MPI_COMM_WORLD); - old_old_saturation_solution.reinit (saturation_index_set, MPI_COMM_WORLD); + IndexSet saturation_partitioning = complete_index_set(n_s); + saturation_solution.reinit (saturation_partitioning, MPI_COMM_WORLD); + old_saturation_solution.reinit (saturation_partitioning, MPI_COMM_WORLD); + old_old_saturation_solution.reinit (saturation_partitioning, MPI_COMM_WORLD); - saturation_matching_last_computed_darcy_solution.reinit (saturation_index_set, + saturation_matching_last_computed_darcy_solution.reinit (saturation_partitioning, MPI_COMM_WORLD); - saturation_rhs.reinit (saturation_index_set, MPI_COMM_WORLD); + saturation_rhs.reinit (saturation_partitioning, MPI_COMM_WORLD); } @@ -2262,10 +2252,8 @@ int main (int argc, char *argv[]) numbers::invalid_unsigned_int); // This program can only be run in serial. Otherwise, throw an exception. - int size; - MPI_Comm_size(MPI_COMM_WORLD,&size); - AssertThrow(size==1, ExcMessage("This program can only be run in serial," - " use mpirun -np 1 ./step-43")); + AssertThrow(Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)==1, + ExcMessage("This program can only be run in serial, use mpirun -np 1 ./step-43")); TwoPhaseFlowProblem<2> two_phase_flow_problem(1); two_phase_flow_problem.run (); diff --git a/include/deal.II/lac/petsc_block_vector.h b/include/deal.II/lac/petsc_block_vector.h index 38080427dd..084218813f 100644 --- a/include/deal.II/lac/petsc_block_vector.h +++ b/include/deal.II/lac/petsc_block_vector.h @@ -43,7 +43,7 @@ namespace PETScWrappers * interface, this class handles the actual allocation of vectors and * provides functions that are specific to the underlying vector type. * - * This class is deprecated use PETScWrappers::MPI::BlockVector. + * This class is deprecated, use PETScWrappers::MPI::BlockVector. * * @ingroup Vectors * @@ -87,13 +87,13 @@ namespace PETScWrappers * of different sizes. */ explicit BlockVector (const unsigned int num_blocks = 0, - const size_type block_size = 0) DEAL_II_DEPRECATED; + const size_type block_size = 0); /** * Copy-Constructor. Dimension set to that of V, all components are copied * from V */ - BlockVector (const BlockVector &V) DEAL_II_DEPRECATED; + BlockVector (const BlockVector &V); /** * Copy-constructor: copy the values from a PETSc wrapper parallel block @@ -105,13 +105,13 @@ namespace PETScWrappers * It is not sufficient if only one processor tries to copy the elements * from the other processors over to its own process space. */ - explicit BlockVector (const MPI::BlockVector &v) DEAL_II_DEPRECATED; + explicit BlockVector (const MPI::BlockVector &v); /** * Constructor. Set the number of blocks to n.size() and * initialize each block with n[i] zero elements. */ - BlockVector (const std::vector &n) DEAL_II_DEPRECATED; + BlockVector (const std::vector &n); /** * Constructor. Set the number of blocks to n.size(). Initialize @@ -124,7 +124,7 @@ namespace PETScWrappers template BlockVector (const std::vector &n, const InputIterator first, - const InputIterator end) DEAL_II_DEPRECATED; + const InputIterator end); /** * Destructor. Clears memory @@ -244,7 +244,7 @@ namespace PETScWrappers */ DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize); ///@} - }; + } DEAL_II_DEPRECATED; /*@}*/ diff --git a/include/deal.II/lac/petsc_vector.h b/include/deal.II/lac/petsc_vector.h index 5e953c2810..c923ffc22e 100644 --- a/include/deal.II/lac/petsc_vector.h +++ b/include/deal.II/lac/petsc_vector.h @@ -45,7 +45,7 @@ namespace PETScWrappers * virtual functions). Only the functions creating a vector of specific type * differ, and are implemented in this particular class. * - * This class is deprecated use PETScWrappers::MPI::Vector instead. + * This class is deprecated, use PETScWrappers::MPI::Vector instead. * * @ingroup Vectors * @author Wolfgang Bangerth, 2004 @@ -73,7 +73,7 @@ namespace PETScWrappers /** * Default constructor. Initialize the vector as empty. */ - Vector () DEAL_II_DEPRECATED; + Vector (); /** * Constructor. Set dimension to @p n and initialize all elements with @@ -85,14 +85,14 @@ namespace PETScWrappers * v=Vector@(0);, i.e. the vector is replaced by one of * length zero. */ - explicit Vector (const size_type n) DEAL_II_DEPRECATED; + explicit Vector (const size_type n); /** * Copy-constructor from deal.II vectors. Sets the dimension to that of * the given vector, and copies all elements. */ template - explicit Vector (const dealii::Vector &v) DEAL_II_DEPRECATED; + explicit Vector (const dealii::Vector &v); /** * Construct it from an existing PETSc Vector of type Vec. Note: this does @@ -100,12 +100,12 @@ namespace PETScWrappers * the vector is not used twice at the same time or destroyed while in * use. This class does not destroy the PETSc object. Handle with care! */ - explicit Vector (const Vec &v) DEAL_II_DEPRECATED; + explicit Vector (const Vec &v); /** * Copy-constructor the values from a PETSc wrapper vector class. */ - Vector (const Vector &v) DEAL_II_DEPRECATED; + Vector (const Vector &v); /** * Copy-constructor: copy the values from a PETSc wrapper parallel vector @@ -116,7 +116,7 @@ namespace PETScWrappers * It is not sufficient if only one processor tries to copy the elements * from the other processors over to its own process space. */ - explicit Vector (const MPI::Vector &v) DEAL_II_DEPRECATED; + explicit Vector (const MPI::Vector &v); /** * Copy the given vector. Resize the present vector if necessary. @@ -183,7 +183,7 @@ namespace PETScWrappers * vector. @p n denotes the total size of the vector to be created. */ void create_vector (const size_type n); - }; + } DEAL_II_DEPRECATED; /*@}*/ diff --git a/include/deal.II/lac/trilinos_block_vector.h b/include/deal.II/lac/trilinos_block_vector.h index f9eb32422f..0d388d7c25 100644 --- a/include/deal.II/lac/trilinos_block_vector.h +++ b/include/deal.II/lac/trilinos_block_vector.h @@ -59,7 +59,7 @@ namespace TrilinosWrappers * block vector class do only work in case the program is run on only one * processor, since the Trilinos matrices are inherently parallel. * - * This class is deprecated use TrilinosWrappers::MPI::BlockVector instead. + * This class is deprecated, use TrilinosWrappers::MPI::BlockVector instead. * * @ingroup Vectors * @ingroup TrilinosWrappers @see @@ -94,14 +94,14 @@ namespace TrilinosWrappers /** * Default constructor. Generate an empty vector without any blocks. */ - BlockVector () DEAL_II_DEPRECATED; + BlockVector (); /** * Constructor. Generate a block vector with as many blocks as there are * entries in Input_Maps. For this non-distributed vector, the %parallel * partitioning is not used, just the global size of the partitioner. */ - explicit BlockVector (const std::vector &partitioner) DEAL_II_DEPRECATED; + explicit BlockVector (const std::vector &partitioner); /** * Constructor. Generate a block vector with as many blocks as there are @@ -109,26 +109,26 @@ namespace TrilinosWrappers * partitioning is not used, just the global size of the partitioner. */ explicit BlockVector (const std::vector &partitioner, - const MPI_Comm &communicator = MPI_COMM_WORLD) DEAL_II_DEPRECATED; + const MPI_Comm &communicator = MPI_COMM_WORLD); /** * Copy-Constructor. Set all the properties of the non-%parallel vector to * those of the given %parallel vector and import the elements. */ - BlockVector (const MPI::BlockVector &V) DEAL_II_DEPRECATED; + BlockVector (const MPI::BlockVector &V); /** * Copy-Constructor. Set all the properties of the vector to those of the * given input vector and copy the elements. */ - BlockVector (const BlockVector &V) DEAL_II_DEPRECATED; + BlockVector (const BlockVector &V); /** * Creates a block vector consisting of num_blocks components, * but there is no content in the individual components and the user has * to fill appropriate data using a reinit of the blocks. */ - explicit BlockVector (const size_type num_blocks) DEAL_II_DEPRECATED; + explicit BlockVector (const size_type num_blocks); /** * Constructor. Set the number of blocks to n.size() and @@ -136,7 +136,7 @@ namespace TrilinosWrappers * * References BlockVector.reinit(). */ - explicit BlockVector (const std::vector &N) DEAL_II_DEPRECATED; + explicit BlockVector (const std::vector &N); /** * Constructor. Set the number of blocks to n.size(). Initialize @@ -149,7 +149,7 @@ namespace TrilinosWrappers template BlockVector (const std::vector &n, const InputIterator first, - const InputIterator end) DEAL_II_DEPRECATED; + const InputIterator end); /** * Destructor. Clears memory @@ -303,7 +303,7 @@ namespace TrilinosWrappers << "local_size = global_size is a necessary condition, but" << arg1 << " != " << arg2 << " was given!"); - }; + } DEAL_II_DEPRECATED; diff --git a/include/deal.II/lac/trilinos_vector.h b/include/deal.II/lac/trilinos_vector.h index 241a334d84..447065b813 100644 --- a/include/deal.II/lac/trilinos_vector.h +++ b/include/deal.II/lac/trilinos_vector.h @@ -710,7 +710,7 @@ namespace TrilinosWrappers * in order to be able to access all elements in the vector or to apply * certain deal.II functions. * - * This class is deprecated use TrilinosWrappers::MPI::Vector instead. + * This class is deprecated, use TrilinosWrappers::MPI::Vector instead. * * @ingroup TrilinosWrappers * @ingroup Vectors @@ -741,12 +741,12 @@ namespace TrilinosWrappers * function reinit() will have to give the vector the correct * size. */ - Vector () DEAL_II_DEPRECATED; + Vector (); /** * This constructor takes as input the number of elements in the vector. */ - explicit Vector (const size_type n) DEAL_II_DEPRECATED; + explicit Vector (const size_type n); /** * This constructor takes as input the number of elements in the vector. @@ -757,7 +757,7 @@ namespace TrilinosWrappers * ignored, the only thing that matters is the size of the index space * described by this argument. */ - explicit Vector (const Epetra_Map &partitioning) DEAL_II_DEPRECATED; + explicit Vector (const Epetra_Map &partitioning); /** * This constructor takes as input the number of elements in the vector. @@ -769,20 +769,20 @@ namespace TrilinosWrappers * size of the index space described by this argument. */ explicit Vector (const IndexSet &partitioning, - const MPI_Comm &communicator = MPI_COMM_WORLD) DEAL_II_DEPRECATED; + const MPI_Comm &communicator = MPI_COMM_WORLD); /** * This constructor takes a (possibly parallel) Trilinos Vector and * generates a localized version of the whole content on each processor. */ - explicit Vector (const VectorBase &V) DEAL_II_DEPRECATED; + explicit Vector (const VectorBase &V); /** * Copy-constructor from deal.II vectors. Sets the dimension to that of * the given vector, and copies all elements. */ template - explicit Vector (const dealii::Vector &v) DEAL_II_DEPRECATED; + explicit Vector (const dealii::Vector &v); /** * Reinit function that resizes the vector to the size specified by @@ -870,7 +870,7 @@ namespace TrilinosWrappers * thus an empty function. */ void update_ghost_values () const; - }; + } DEAL_II_DEPRECATED;