From: kronbichler Date: Thu, 11 Sep 2008 11:20:57 +0000 (+0000) Subject: Updated a few functions. There still seems to be a problem in refinement when running... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=7cef2d27abddfb13f497c960dd8036f23e4e05ff;p=dealii-svn.git Updated a few functions. There still seems to be a problem in refinement when running in parallel. git-svn-id: https://svn.dealii.org/trunk@16807 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/examples/step-32/step-32.cc b/deal.II/examples/step-32/step-32.cc index 5038e3ffd2..c0c13e2f4a 100644 --- a/deal.II/examples/step-32/step-32.cc +++ b/deal.II/examples/step-32/step-32.cc @@ -406,7 +406,7 @@ double BoussinesqFlowProblem::get_maximal_velocity () const const QGauss quadrature_formula(stokes_degree+2); const unsigned int n_q_points = quadrature_formula.size(); - BlockVector stokes_vector (stokes_solution); + BlockVector localized_stokes_solution (stokes_solution); FEValues fe_values (stokes_fe, quadrature_formula, update_values); std::vector > stokes_values(n_q_points, @@ -420,7 +420,7 @@ double BoussinesqFlowProblem::get_maximal_velocity () const if (cell->subdomain_id() == (unsigned int)trilinos_communicator.MyPID()) { fe_values.reinit (cell); - fe_values.get_function_values (stokes_vector, stokes_values); + fe_values.get_function_values (localized_stokes_solution, stokes_values); for (unsigned int q=0; q::get_extrapolated_temperature_range () const std::vector old_temperature_values(n_q_points); std::vector old_old_temperature_values(n_q_points); - Vector old_temperature_vector (old_temperature_solution); - Vector old_old_temperature_vector (old_old_temperature_solution); + Vector localized_old_temperature_solution (old_temperature_solution); + Vector old_localized_old_temperature_solution (old_old_temperature_solution); double min_temperature = (1. + time_step/old_time_step) * old_temperature_solution.linfty_norm() @@ -468,8 +468,8 @@ BoussinesqFlowProblem::get_extrapolated_temperature_range () const if (cell->subdomain_id() == (unsigned int)trilinos_communicator.MyPID()) { fe_values.reinit (cell); - fe_values.get_function_values (old_temperature_vector, old_temperature_values); - fe_values.get_function_values (old_old_temperature_vector, old_old_temperature_values); + fe_values.get_function_values (localized_old_temperature_solution, old_temperature_values); + fe_values.get_function_values (old_localized_old_temperature_solution, old_old_temperature_values); for (unsigned int q=0; q::assemble_stokes_system () const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); - Vector old_temperature_vector (old_temperature_solution); + Vector localized_old_temperature_solution (old_temperature_solution); typename DoFHandler::active_cell_iterator cell = stokes_dof_handler.begin_active(), @@ -892,7 +892,7 @@ void BoussinesqFlowProblem::assemble_stokes_system () local_matrix = 0; local_rhs = 0; - temperature_fe_values.get_function_values (old_temperature_vector, + temperature_fe_values.get_function_values (localized_old_temperature_solution, old_temperature_values); for (unsigned int q=0; q::assemble_temperature_system () global_T_range = get_extrapolated_temperature_range(); const double global_Omega_diameter = GridTools::diameter (triangulation); - const Vector old_temperature_vector (old_temperature_solution); - const Vector old_old_temperature_vector (old_old_temperature_solution); - const BlockVector stokes_vector (stokes_solution); + const Vector localized_old_temperature_solution (old_temperature_solution); + const Vector localized_old_old_temperature_solution (old_old_temperature_solution); + const BlockVector localized_stokes_solution (stokes_solution); typename DoFHandler::active_cell_iterator cell = temperature_dof_handler.begin_active(), @@ -1134,25 +1134,25 @@ void BoussinesqFlowProblem::assemble_temperature_system () temperature_fe_values.reinit (cell); stokes_fe_values.reinit (stokes_cell); - temperature_fe_values.get_function_values (old_temperature_vector, + temperature_fe_values.get_function_values (localized_old_temperature_solution, old_temperature_values); - temperature_fe_values.get_function_values (old_old_temperature_vector, + temperature_fe_values.get_function_values (localized_old_old_temperature_solution, old_old_temperature_values); - temperature_fe_values.get_function_gradients (old_temperature_vector, + temperature_fe_values.get_function_gradients (localized_old_temperature_solution, old_temperature_grads); - temperature_fe_values.get_function_gradients (old_old_temperature_vector, + temperature_fe_values.get_function_gradients (localized_old_old_temperature_solution, old_old_temperature_grads); - temperature_fe_values.get_function_hessians (old_temperature_vector, + temperature_fe_values.get_function_hessians (localized_old_temperature_solution, old_temperature_hessians); - temperature_fe_values.get_function_hessians (old_old_temperature_vector, + temperature_fe_values.get_function_hessians (localized_old_old_temperature_solution, old_old_temperature_hessians); temperature_right_hand_side.value_list (temperature_fe_values.get_quadrature_points(), gamma_values); - stokes_fe_values.get_function_values (stokes_vector, + stokes_fe_values.get_function_values (localized_stokes_solution, present_stokes_values); const double nu @@ -1276,7 +1276,7 @@ void BoussinesqFlowProblem::solve () << " GMRES iterations for Stokes subsystem." << std::endl; - const BlockVector localized_stokes_solution (stokes_solution); + BlockVector localized_stokes_solution (stokes_solution); stokes_constraints.distribute (localized_stokes_solution); stokes_solution = localized_stokes_solution; } @@ -1303,7 +1303,7 @@ void BoussinesqFlowProblem::solve () temperature_rhs, preconditioner); - const Vector localized_temperature_solution (temperature_solution); + Vector localized_temperature_solution (temperature_solution); temperature_constraints.distribute (localized_temperature_solution); temperature_solution = localized_temperature_solution; @@ -1346,6 +1346,8 @@ void BoussinesqFlowProblem::output_results () const ExcInternalError()); Vector joint_solution (joint_dof_handler.n_dofs()); + BlockVector localized_stokes_solution (stokes_solution); + Vector localized_temperature_solution (temperature_solution); { std::vector local_joint_dof_indices (joint_fe.dofs_per_cell); @@ -1371,7 +1373,7 @@ void BoussinesqFlowProblem::output_results () const local_stokes_dof_indices.size(), ExcInternalError()); joint_solution(local_joint_dof_indices[i]) - = stokes_solution(local_stokes_dof_indices[joint_fe.system_to_base_index(i).second]); + = localized_stokes_solution(local_stokes_dof_indices[joint_fe.system_to_base_index(i).second]); } else { @@ -1382,7 +1384,7 @@ void BoussinesqFlowProblem::output_results () const local_stokes_dof_indices.size(), ExcInternalError()); joint_solution(local_joint_dof_indices[i]) - = temperature_solution(local_temperature_dof_indices[joint_fe.system_to_base_index(i).second]); + = localized_temperature_solution(local_temperature_dof_indices[joint_fe.system_to_base_index(i).second]); } } } @@ -1423,10 +1425,12 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) { Vector estimated_error_per_cell (triangulation.n_active_cells()); + Vector localized_temperature_solution (temperature_solution); + KellyErrorEstimator::estimate (temperature_dof_handler, QGauss(temperature_degree+1), typename FunctionMap::type(), - temperature_solution, + localized_temperature_solution, estimated_error_per_cell); GridRefinement::refine_and_coarsen_fixed_fraction (triangulation, @@ -1438,13 +1442,11 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) cell != triangulation.end(); ++cell) cell->clear_refine_flag (); - std::vector x_solution (2); - x_solution[0].reinit (temperature_solution); + std::vector > x_solution (2); x_solution[0] = temperature_solution; - x_solution[1].reinit (temperature_solution); x_solution[1] = old_temperature_solution; - SolutionTransfer soltrans(temperature_dof_handler); + SolutionTransfer > soltrans(temperature_dof_handler); triangulation.prepare_coarsening_and_refinement(); soltrans.prepare_for_coarsening_and_refinement(x_solution); @@ -1452,9 +1454,9 @@ void BoussinesqFlowProblem::refine_mesh (const unsigned int max_grid_level) triangulation.execute_coarsening_and_refinement (); setup_dofs (); - std::vector tmp (2); - tmp[0].reinit (temperature_solution); - tmp[1].reinit (temperature_solution); + std::vector > tmp (2); + tmp[0] = temperature_solution; + tmp[1] = temperature_solution; soltrans.interpolate(x_solution, tmp); temperature_solution = tmp[0]; diff --git a/deal.II/lac/include/lac/block_vector.h b/deal.II/lac/include/lac/block_vector.h index 90958f5ea3..345f555c4b 100644 --- a/deal.II/lac/include/lac/block_vector.h +++ b/deal.II/lac/include/lac/block_vector.h @@ -31,6 +31,7 @@ DEAL_II_NAMESPACE_OPEN namespace TrilinosWrappers { class Vector; + class BlockVector; } #endif diff --git a/deal.II/lac/include/lac/trilinos_block_vector.h b/deal.II/lac/include/lac/trilinos_block_vector.h index 8674754723..2be6d4507d 100644 --- a/deal.II/lac/include/lac/trilinos_block_vector.h +++ b/deal.II/lac/include/lac/trilinos_block_vector.h @@ -18,12 +18,16 @@ #include #include #include +#include #include #ifdef DEAL_II_USE_TRILINOS DEAL_II_NAMESPACE_OPEN + // forward declaration +template class BlockVector; + /*! @addtogroup TrilinosWrappers *@{ */ @@ -36,16 +40,18 @@ namespace TrilinosWrappers /** * An implementation of block vectors based on the vector class - * implemented in TrilinosWrappers. While the base class provides for most of the - * interface, this class handles the actual allocation of vectors and provides - * functions that are specific to the underlying vector type. + * implemented in TrilinosWrappers. While the base class provides for + * most of the interface, this class handles the actual allocation of + * vectors and provides functions that are specific to the underlying + * vector type. * - * The model of distribution of data is such that each of the blocks is - * distributed across all MPI processes named in the MPI communicator. I.e. we - * don't just distribute the whole vector, but each component. In the - * constructors and reinit() functions, one therefore not only has to specify - * the sizes of the individual blocks, but also the number of elements of each - * of these blocks to be stored on the local process. + * The model of distribution of data is such that each of the blocks + * is distributed across all MPI processes named in the MPI + * communicator. I.e. we don't just distribute the whole vector, but + * each component. In the constructors and reinit() functions, one + * therefore not only has to specify the sizes of the individual + * blocks, but also the number of elements of each of these blocks to + * be stored on the local process. * * @ingroup Vectors * @ingroup TrilinosWrappers @@ -54,22 +60,22 @@ namespace TrilinosWrappers class BlockVector : public BlockVectorBase { public: - /** - * Typedef the base class for simpler - * access to its own typedefs. - */ + /** + * Typedef the base class for simpler + * access to its own typedefs. + */ typedef BlockVectorBase BaseClass; - /** - * Typedef the type of the underlying - * vector. - */ + /** + * Typedef the type of the underlying + * vector. + */ typedef BaseClass::BlockType BlockType; - /** - * Import the typedefs from the base - * class. - */ + /** + * Import the typedefs from the base + * class. + */ typedef BaseClass::value_type value_type; typedef BaseClass::pointer pointer; typedef BaseClass::const_pointer const_pointer; @@ -79,58 +85,86 @@ namespace TrilinosWrappers typedef BaseClass::iterator iterator; typedef BaseClass::const_iterator const_iterator; - /** - * Default constructor. Generate an - * empty vector without any blocks. - */ + /** + * Default constructor. Generate an + * empty vector without any blocks. + */ BlockVector (); - /** - * Constructor. Generate a block - * vector with as many blocks as - * there are entries in Input_Maps. - * Each Epetra_Map already knows - * the distribution of data among - * the MPI processes. - */ + /** + * Constructor. Generate a block + * vector with as many blocks as + * there are entries in Input_Maps. + * Each Epetra_Map already knows + * the distribution of data among + * the MPI processes. + */ explicit BlockVector (const std::vector &InputMaps); - /** - * Copy-Constructor. Set all the - * properties of the parallel vector - * to those of the given argument and - * copy the elements. - */ + /** + * Copy-Constructor. Set all the + * properties of the parallel vector + * to those of the given argument and + * copy the elements. + */ BlockVector (const BlockVector &V); - /** - * Creates a block vector consisting - * of num_blocks components, - * but there is no content in the - * individual components and the - * user has to fill appropriate data - * using a reinit of the blocks. - */ + /** + * Creates a block vector + * consisting of + * num_blocks + * components, but there is no + * content in the individual + * components and the user has to + * fill appropriate data using a + * reinit of the blocks. + */ BlockVector (const unsigned int num_blocks); - /** - * Destructor. Clears memory - */ + /** + * Destructor. Clears memory + */ ~BlockVector (); - /** - * Copy operator: fill all components - * of the vector that are locally - * stored with the given scalar value. - */ - BlockVector & operator = (const value_type s); + /** + * Copy operator: fill all + * components of the vector that + * are locally stored with the + * given scalar value. + */ + BlockVector & + operator = (const value_type s); - /** - * Copy operator for arguments of the - * same type. - */ + /** + * Copy operator for arguments of + * the same type. + */ BlockVector & - operator= (const BlockVector &V); + operator = (const BlockVector &V); + + /** + * Another copy function. This + * one takes a deal.II block + * vector and copies it into a + * TrilinosWrappers block + * vector. Note that the number + * of blocks has to be the same + * in the vector as in the input + * vector. Use the reinit() + * command for resizing the + * BlockVector or for changing + * the internal structure of the + * block components. + * + * Since Trilinos only works on + * doubles, this function is + * limited to accept only one + * possible number type in the + * deal.II vector. + */ + template + BlockVector & + operator = (const ::dealii::BlockVector &V); /** * Reinitialize the BlockVector to @@ -295,7 +329,7 @@ namespace TrilinosWrappers reinit(v.n_blocks()); for (unsigned int i=0; in_blocks(); ++i) - this->block(i) = v.block(i); + this->components[i] = v.block(i); collect_sizes(); @@ -304,6 +338,22 @@ namespace TrilinosWrappers + template + inline + BlockVector & + BlockVector::operator = (const ::dealii::BlockVector &v) + { + Assert (n_blocks() == v.n_blocks(), + ExcDimensionMismatch(n_blocks(),v.n_blocks())); + + for (unsigned int i=0; in_blocks(); ++i) + this->components[i] = v.block(i); + + return *this; + } + + + inline BlockVector::~BlockVector () {} diff --git a/deal.II/lac/include/lac/trilinos_vector.h b/deal.II/lac/include/lac/trilinos_vector.h index 94275380cf..7ddcf36136 100755 --- a/deal.II/lac/include/lac/trilinos_vector.h +++ b/deal.II/lac/include/lac/trilinos_vector.h @@ -38,7 +38,7 @@ DEAL_II_NAMESPACE_OPEN - // forward declaration + // forward declaration template class Vector; @@ -48,7 +48,7 @@ template class Vector; */ namespace TrilinosWrappers { - // forward declaration + // forward declaration class Vector; /** @@ -205,14 +205,14 @@ namespace TrilinosWrappers /** * This class implements a wrapper to use the Trilinos distributed - * vector class Epetra_FEVector. This is precisely the kind of vector - * we deal with all the time - we probably get it from some assembly + * vector class Epetra_FEVector. This is precisely the kind of vector we + * deal with all the time - we probably get it from some assembly * process, where also entries not locally owned might need to written * and hence need to be forwarded to the owner. This class is designed - * to be used in a distributed memory architecture with an MPI - * compiler on the bottom, but works equally well also for serial - * processes. The only requirement for this class to work is that - * Trilinos is installed with the respective compiler as a basis. + * to be used in a distributed memory architecture with an MPI compiler + * on the bottom, but works equally well also for serial processes. The + * only requirement for this class to work is that Trilinos is installed + * with the respective compiler as a basis. * * The interface of this class is modeled after the existing Vector * class in deal.II. It has almost the same member functions, and is @@ -220,36 +220,34 @@ namespace TrilinosWrappers * scalar type (double), it is not templated, and only works with that * type. * - * Note that Trilinos only guarantees that operations do what you - * expect if the function @p GlobalAssemble has been called after - * vector assembly in order to distribute the data. Therefore, you - * need to call Vector::compress() before you actually use the - * vectors. + * Note that Trilinos only guarantees that operations do what you expect + * if the function @p GlobalAssemble has been called after vector + * assembly in order to distribute the data. Therefore, you need to call + * Vector::compress() before you actually use the vectors. * *

Parallel communication model

* - * The parallel functionality of Trilinos is built on top of the - * Message Passing Interface (MPI). MPI's communication model is built - * on collective communications: if one process wants something from + * The parallel functionality of Trilinos is built on top of the Message + * Passing Interface (MPI). MPI's communication model is built on + * collective communications: if one process wants something from * another, that other process has to be willing to accept this * communication. A process cannot query data from another process by - * calling a remote function, without that other process expecting - * such a transaction. The consequence is that most of the operations - * in the base class of this class have to be called collectively. For - * example, if you want to compute the l2 norm of a parallel vector, - * @em all processes across which this vector is shared have to call - * the @p l2_norm function. If you don't do this, but instead only - * call the @p l2_norm function on one process, then the following - * happens: This one process will call one of the collective MPI - * functions and wait for all the other processes to join in on - * this. Since the other processes don't call this function, you will - * either get a time-out on the first process, or, worse, by the time - * the next a callto a Trilinos function generates an MPI message on - * the other processes , you will get a cryptic message that only a - * subset of processes attempted a communication. These bugs can be - * very hard to figure out, unless you are well-acquainted with the - * communication model of MPI, and know which functions may generate - * MPI messages. + * calling a remote function, without that other process expecting such + * a transaction. The consequence is that most of the operations in the + * base class of this class have to be called collectively. For example, + * if you want to compute the l2 norm of a parallel vector, @em all + * processes across which this vector is shared have to call the @p + * l2_norm function. If you don't do this, but instead only call the @p + * l2_norm function on one process, then the following happens: This one + * process will call one of the collective MPI functions and wait for + * all the other processes to join in on this. Since the other processes + * don't call this function, you will either get a time-out on the first + * process, or, worse, by the time the next a callto a Trilinos function + * generates an MPI message on the other processes , you will get a + * cryptic message that only a subset of processes attempted a + * communication. These bugs can be very hard to figure out, unless you + * are well-acquainted with the communication model of MPI, and know + * which functions may generate MPI messages. * * One particular case, where an MPI message may be generated * unexpectedly is discussed below. @@ -266,27 +264,29 @@ namespace TrilinosWrappers * wrapper classes) allow to write (or add) to individual elements of * vectors, even if they are stored on a different process. You can do * this writing, for example, vec(i)=d or vec(i)+=d, - * or similar operations. There is one catch, however, that may lead - * to very confusing error messages: Trilinos requires application - * programs to call the compress() function when they switch from - * adding, to elements to writing to elements. The reasoning is that - * all processes might accumulate addition operations to elements, - * even if multiple processes write to the same elements. By the time - * we call compress() the next time, all these additions are - * executed. However, if one process adds to an element, and another - * overwrites to it, the order of execution would yield - * non-deterministic behavior if we don't make sure that a - * synchronisation with compress() happens in between. + * or similar operations. There is one catch, however, that may lead to + * very confusing error messages: Trilinos requires application programs + * to call the compress() function when they switch from adding, to + * elements to writing to elements. The reasoning is that all processes + * might accumulate addition operations to elements, even if multiple + * processes write to the same elements. By the time we call compress() + * the next time, all these additions are executed. However, if one + * process adds to an element, and another overwrites to it, the order + * of execution would yield non-deterministic behavior if we don't make + * sure that a synchronisation with compress() happens in between. * * In order to make sure these calls to compress() happen at the * appropriate time, the deal.II wrappers keep a state variable that * store which is the presently allowed operation: additions or - * writes. If it encounters an operation of the opposite kind, it - * calls compress() and flips the state. This can sometimes lead to - * very confusing behavior, in code that may for example look like - * this: @verbatim TrilinosWrappers::Vector vector; ... // do some - * write operations on the vector for (unsigned int i=0; - * isize(); ++i) vector(i) = i; + * writes. If it encounters an operation of the opposite kind, it calls + * compress() and flips the state. This can sometimes lead to very + * confusing behavior, in code that may for example look like this: + * + * @verbatim + * TrilinosWrappers::Vector vector; + * // do some write operations on the vector + * for (unsigned int i=0; isize(); ++i) + * vector(i) = i; * * // do some additions to vector elements, but * // only for some elements @@ -298,24 +298,24 @@ namespace TrilinosWrappers * const double norm = vector->l2_norm(); * @endverbatim * - * This code can run into trouble: by the time we see the first - * addition operation, we need to flush the overwrite buffers for the - * vector, and the deal.II library will do so by calling - * compress(). However, it will only do so for all processes that - * actually do an addition -- if the condition is never true for one - * of the processes, then this one will not get to the actual - * compress() call, whereas all the other ones do. This gets us into - * trouble, since all the other processes hang in the call to flush - * the write buffers, while the one other process advances to the call - * to compute the l2 norm. At this time, you will get an error that - * some operation was attempted by only a subset of processes. This - * behavior may seem surprising, unless you know that write/addition - * operations on single elements may trigger this behavior. + * This code can run into trouble: by the time we see the first addition + * operation, we need to flush the overwrite buffers for the vector, and + * the deal.II library will do so by calling compress(). However, it + * will only do so for all processes that actually do an addition -- if + * the condition is never true for one of the processes, then this one + * will not get to the actual compress() call, whereas all the other + * ones do. This gets us into trouble, since all the other processes + * hang in the call to flush the write buffers, while the one other + * process advances to the call to compute the l2 norm. At this time, + * you will get an error that some operation was attempted by only a + * subset of processes. This behavior may seem surprising, unless you + * know that write/addition operations on single elements may trigger + * this behavior. * - * The problem described here may be avoided by placing additional - * calls to compress(), or making sure that all processes do the same - * type of operations at the same time, for example by placing zero - * additions if necessary. + * The problem described here may be avoided by placing additional calls + * to compress(), or making sure that all processes do the same type of + * operations at the same time, for example by placing zero additions if + * necessary. * * @ingroup TrilinosWrappers * @ingroup Vectors @@ -330,8 +330,7 @@ namespace TrilinosWrappers * types used in all * containers. These types * parallel those in the - * C standard - * libraries + * C standard libraries * vector<...> class. */ typedef TrilinosScalar value_type; @@ -342,36 +341,35 @@ namespace TrilinosWrappers /** * Default constructor that - * generates an empty (zero - * size) vector. The function - * reinit() will have - * to give the vector the - * correct size and - * distribution among processes - * in case of an MPI run. + * generates an empty (zero size) + * vector. The function + * reinit() will have to + * give the vector the correct + * size and distribution among + * processes in case of an MPI + * run. */ Vector (); /** * This constructor takes an - * Epetra_Map that already - * knows how to distribute the + * Epetra_Map that already knows + * how to distribute the * individual components among * the MPI processors. It also - * includes information about - * the size of the vector. + * includes information about the + * size of the vector. */ Vector (const Epetra_Map &InputMap); /** * Copy constructor. Sets the - * dimension to that of the - * given vector and uses the - * map of that vector, but does - * not copy any - * element. Instead, the memory - * will remain untouched in - * case fast is false + * dimension to that of the given + * vector and uses the map of + * that vector, but does not copy + * any element. Instead, the + * memory will remain untouched + * in case fast is false * and initialized with zero * otherwise. */ @@ -384,17 +382,18 @@ namespace TrilinosWrappers virtual ~Vector (); /** - * Reinit functionality. This function - * destroys the old vector content - * and generates a new one based on - * the input map. + * Reinit functionality. This + * function destroys the old + * vector content and generates a + * new one based on the input + * map. */ void reinit (const Epetra_Map &input_map); /** - * Reinit functionality. This function - * copies the vector v to the current - * one. + * Reinit functionality. This + * function copies the vector v + * to the current one. */ void reinit (const Vector &v, const bool fast = false); @@ -409,75 +408,113 @@ namespace TrilinosWrappers /** * Compress the underlying - * representation of the Trilinos object, - * i.e. flush the buffers of the vector - * object if it has any. This function - * is necessary after writing into a - * vector element-by-element and before - * anything else can be done on it. + * representation of the Trilinos + * object, i.e. flush the buffers + * of the vector object if it has + * any. This function is + * necessary after writing into a + * vector element-by-element and + * before anything else can be + * done on it. */ void compress (); /** - * Set all components of the vector to - * the given number @p s. Simply pass - * this down to the Trilinos Epetra - * object, but we still need to declare - * this function to make the example - * given in the discussion about making - * the constructor explicit work. + * Set all components of the + * vector to the given number @p + * s. Simply pass this down to + * the Trilinos Epetra object, + * but we still need to declare + * this function to make the + * example given in the + * discussion about making the + * constructor explicit work. * - * Since the semantics of assigning a - * scalar to a vector are not - * immediately clear, this operator - * should really only be used if you - * want to set the entire vector to - * zero. This allows the intuitive - * notation v=0. Assigning - * other values is deprecated and may + * Since the semantics of + * assigning a scalar to a vector + * are not immediately clear, + * this operator should really + * only be used if you want to + * set the entire vector to + * zero. This allows the + * intuitive notation + * v=0. Assigning other + * values is deprecated and may * be disallowed in the future. */ - Vector & operator = (const TrilinosScalar s); + Vector & + operator= (const TrilinosScalar s); /** - * Copy the given vector. Resize the - * present vector if necessary. + * Copy the given vector. Resize + * the present vector if + * necessary. */ - Vector & operator = (const Vector &v); + Vector & + operator= (const Vector &v); + + /** + * Another copy function. This + * one takes a deal.II vector and + * copies it into a + * TrilinosWrapper vector. Note + * that since we do not provide + * any Epetra_map that tells + * about the partitioning of the + * vector among the MPI + * processes, the size of the + * TrilinosWrapper vector has to + * be the same as the size of the + * input vector. In order to + * change the map, use the + * reinit(const Epetra_Map + * &input_map) function. + * + * Since Trilinos only works on + * doubles, this function is + * limited to accept only one + * possible number type in the + * deal.II vector. + */ + Vector & + operator= (const ::dealii::Vector &v); /** - * Test for equality. This function - * assumes that the present vector and - * the one to compare with have the same - * size already, since comparing vectors - * of different sizes makes not much - * sense anyway. + * Test for equality. This + * function assumes that the + * present vector and the one to + * compare with have the same + * size already, since comparing + * vectors of different sizes + * makes not much sense anyway. */ bool operator == (const Vector &v) const; /** - * Test for inequality. This function - * assumes that the present vector and - * the one to compare with have the same - * size already, since comparing vectors - * of different sizes makes not much - * sense anyway. + * Test for inequality. This + * function assumes that the + * present vector and the one to + * compare with have the same + * size already, since comparing + * vectors of different sizes + * makes not much sense anyway. */ bool operator != (const Vector &v) const; /** - * Return the global dimension of the - * vector. + * Return the global dimension of + * the vector. */ unsigned int size () const; /** - * Return the local dimension of the - * vector, i.e. the number of elements - * stored on the present MPI - * process. For sequential vectors, - * this number is the same as size(), - * but for parallel vectors it may be + * Return the local dimension of + * the vector, i.e. the number of + * elements stored on the present + * MPI process. For sequential + * vectors, this number is the + * same as size(), but for + * parallel vectors it may be * smaller. * * To figure out which elements @@ -491,29 +528,28 @@ namespace TrilinosWrappers * indicating which elements of * this vector are stored * locally. The first number is - * the index of the first - * element stored, the second - * the index of the one past - * the last one that is stored - * locally. If this is a - * sequential vector, then the - * result will be the pair - * (0,N), otherwise it will be - * a pair (i,i+n), where + * the index of the first element + * stored, the second the index + * of the one past the last one + * that is stored locally. If + * this is a sequential vector, + * then the result will be the + * pair (0,N), otherwise it will + * be a pair (i,i+n), where * n=local_size(). */ std::pair local_range () const; /** - * Return whether @p index is - * in the local range or not, - * see also local_range(). + * Return whether @p index is in + * the local range or not, see + * also local_range(). */ bool in_local_range (const unsigned int index) const; /** - * Provide access to a given element, - * both read and write. + * Provide access to a given + * element, both read and write. */ reference operator () (const unsigned int index); @@ -521,93 +557,105 @@ namespace TrilinosWrappers /** * Provide read-only access to an * element. This is equivalent to - * the el() command. + * * the el() + * command. */ TrilinosScalar operator () (const unsigned int index) const; /** - * Return the value of the vector entry - * i. Note that this function - * does only work properly when - * we request a data stored on the - * local processor. The function will - * throw an exception in case the - * elements sits on another process. + * Return the value of the vector + * entry i. Note that this + * function does only work + * properly when we request a + * data stored on the local + * processor. The function will + * throw an exception in case the + * elements sits on another + * process. */ TrilinosScalar el (const unsigned int index) const; /** - * A collective set operation: instead - * of setting individual elements of a - * vector, this function allows to set - * a whole set of elements at once. The - * indices of the elements to be set - * are stated in the first argument, - * the corresponding values in the - * second. + * A collective set operation: + * instead of setting individual + * elements of a vector, this + * function allows to set a whole + * set of elements at once. The + * indices of the elements to be + * set are stated in the first + * argument, the corresponding + * values in the second. */ void set (const std::vector &indices, const std::vector &values); /** - * This is a second collective set - * operation. As a difference, this - * function takes a deal.II vector - * of values. + * This is a second collective + * set operation. As a + * difference, this function + * takes a deal.II vector of + * values. */ void set (const std::vector &indices, const ::dealii::Vector &values); /** - * This collective set operation is - * of lower level and can handle - * anything else – the only - * thing you have to provide is - * an address where all the indices - * are stored and the number of - * elements to be set. + * This collective set operation + * is of lower level and can + * handle anything else – + * the only thing you have to + * provide is an address where + * all the indices are stored and + * the number of elements to be + * set. */ void set (const unsigned int n_elements, const unsigned int *indices, const TrilinosScalar *values); /** - * A collective add operation: This - * function adds a whole set of values - * stored in @p values to the vector - * components specified by @p indices. + * A collective add operation: + * This funnction adds a whole + * set of values stored in @p + * values to the vector + * components specified by @p + * indices. */ void add (const std::vector &indices, const std::vector &values); /** - * This is a second collective add - * operation. As a difference, this - * function takes a deal.II vector - * of values. + * This is a second collective + * add operation. As a + * difference, this function + * takes a deal.II vector of + * values. */ void add (const std::vector &indices, const ::dealii::Vector &values); /** - * Take an address where n_elements - * are stored contiguously and add - * them into the vector. + * Take an address where + * n_elements are stored + * contiguously and add them into + * the vector. */ void add (const unsigned int n_elements, const unsigned int *indices, const TrilinosScalar *values); /** - * Return the scalar (inner) product of - * two vectors. The vectors must have the - * same size. + * Return the scalar (inner) + * product of two vectors. The + * vectors must have the same + * size. */ TrilinosScalar operator * (const Vector &vec) const; /** - * Return square of the $l_2$-norm. + * Return square of the + * $l_2$-norm. */ real_type norm_sqr () const; @@ -618,8 +666,8 @@ namespace TrilinosWrappers TrilinosScalar mean_value () const; /** - * $l_1$-norm of the vector. - * The sum of the absolute values. + * $l_1$-norm of the vector. The + * sum of the absolute values. */ real_type l1_norm () const; @@ -632,10 +680,10 @@ namespace TrilinosWrappers /** * $l_p$-norm of the vector. The - * pth root of the sum of the - * pth - * powers of the absolute values - * of the elements. + * pth root of the sum of + * the pth powers of the + * absolute values of the + * elements. */ real_type lp_norm (const TrilinosScalar p) const; @@ -646,28 +694,32 @@ namespace TrilinosWrappers real_type linfty_norm () const; /** - * Return whether the vector contains - * only elements with value zero. This - * function is mainly for internal + * Return whether the vector + * contains only elements with + * value zero. This function is + * mainly for internal * consistency checks and should - * seldomly be used when not in debug - * mode since it uses quite some time. + * seldomly be used when not in + * debug mode since it uses quite + * some time. */ bool all_zero () const; /** - * Return @p true if the vector has no - * negative entries, i.e. all entries - * are zero or positive. This function - * is used, for example, to check - * whether refinement indicators are - * really all positive (or zero). + * Return @p true if the vector + * has no negative entries, + * i.e. all entries are zero or + * positive. This function is + * used, for example, to check + * whether refinement indicators + * are really all positive (or + * zero). */ bool is_non_negative () const; /** - * Multiply the entire vector by a - * fixed factor. + * Multiply the entire vector by + * a fixed factor. */ Vector & operator *= (const TrilinosScalar factor); @@ -678,43 +730,41 @@ namespace TrilinosWrappers Vector & operator /= (const TrilinosScalar factor); /** - * Add the given vector to the present - * one. + * Add the given vector to the + * present one. */ Vector & operator += (const Vector &V); /** - * Subtract the given vector - * from the present one. + * Subtract the given vector from + * the present one. */ Vector & operator -= (const Vector &V); /** * Addition of @p s to all - * components. Note that @p s - * is a scalar and not a - * vector. + * components. Note that @p s is + * a scalar and not a vector. */ void add (const TrilinosScalar s); /** - * Simple vector addition, - * equal to the operator - * +=. + * Simple vector addition, equal + * to the operator +=. */ void add (const Vector &V); /** - * Simple addition of a - * multiple of a vector, - * i.e. *this = a*V. + * Simple addition of a multiple + * of a vector, i.e. *this = + * a*V. */ void add (const TrilinosScalar a, const Vector &V); /** * Multiple addition of scaled - * vectors, i.e. *this = - * a*V + b*W. + * vectors, i.e. *this = a*V + * + b*W. */ void add (const TrilinosScalar a, const Vector &V, const TrilinosScalar b, const Vector &W); @@ -729,16 +779,15 @@ namespace TrilinosWrappers /** * Scaling and simple addition, - * i.e. *this = s*(*this) - * + a*V. + * i.e. *this = s*(*this) + + * a*V. */ void sadd (const TrilinosScalar s, const TrilinosScalar a, const Vector &V); /** - * Scaling and multiple - * addition. + * Scaling and multiple addition. */ void sadd (const TrilinosScalar s, const TrilinosScalar a, @@ -747,10 +796,9 @@ namespace TrilinosWrappers const Vector &W); /** - * Scaling and multiple - * addition. *this = - * s*(*this) + a*V + b*W + - * c*X. + * Scaling and multiple addition. + * *this = s*(*this) + a*V + + * b*W + c*X. */ void sadd (const TrilinosScalar s, const TrilinosScalar a, @@ -763,12 +811,11 @@ namespace TrilinosWrappers /** * Scale each element of this * vector by the corresponding - * element in the - * argument. This function is - * mostly meant to simulate - * multiplication (and - * immediate re-assignment) by - * a diagonal scaling matrix. + * element in the argument. This + * function is mostly meant to + * simulate multiplication (and + * immediate re-assignment) by a + * diagonal scaling matrix. */ void scale (const Vector &scaling_factors); @@ -786,49 +833,45 @@ namespace TrilinosWrappers const TrilinosScalar b, const Vector &W); /** - * Compute the elementwise - * ratio of the two given - * vectors, that is let - * this[i] = - * a[i]/b[i]. This is - * useful for example if you - * want to compute the cellwise - * ratio of true to estimated - * error. + * Compute the elementwise ratio + * of the two given vectors, that + * is let this[i] = + * a[i]/b[i]. This is useful + * for example if you want to + * compute the cellwise ratio of + * true to estimated error. * * This vector is appropriately * scaled to hold the result. * - * If any of the b[i] - * is zero, the result is - * undefined. No attempt is - * made to catch such - * situations. + * If any of the b[i] is + * zero, the result is + * undefined. No attempt is made + * to catch such situations. */ void ratio (const Vector &a, const Vector &b); /** * Output of vector in - * user-defined format in - * analogy to the - * dealii::Vector class. + * user-defined format in analogy + * to the dealii::Vector + * class. */ void print (const char* format = 0) const; /** * Print to a stream. @p - * precision denotes the - * desired precision with which - * values shall be printed, @p - * scientific whether - * scientific notation shall be - * used. If @p across is @p - * true then the vector is - * printed in a line, while if - * @p false then the elements - * are printed on a separate - * line each. + * precision denotes the desired + * precision with which values + * shall be printed, @p + * scientific whether scientific + * notation shall be used. If @p + * across is @p true then the + * vector is printed in a line, + * while if @p false then the + * elements are printed on a + * separate line each. */ void print (std::ostream &out, const unsigned int precision = 3, @@ -837,29 +880,29 @@ namespace TrilinosWrappers /** * Swap the contents of this - * vector and the other vector - * @p v. One could do this - * operation with a temporary - * variable and copying over - * the data elements, but this - * function is significantly - * more efficient since it only - * swaps the pointers to the - * data of the two vectors and - * therefore does not need to - * allocate temporary storage - * and move data around. Note - * that the vectors need to be - * of the same size and base on - * the same map. + * vector and the other vector @p + * v. One could do this operation + * with a temporary variable and + * copying over the data + * elements, but this function is + * significantly more efficient + * since it only swaps the + * pointers to the data of the + * two vectors and therefore does + * not need to allocate temporary + * storage and move data + * around. Note that the vectors + * need to be of the same size + * and base on the same map. * * This function is analog to the * the @p swap function of all C * standard containers. Also, * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. + * swap(u,v) that simply + * calls u.swap(v), + * again in analogy to standard + * functions. */ void swap (Vector &v); @@ -891,13 +934,14 @@ namespace TrilinosWrappers private: /** - * The Epetra map used to map - * vector data accross multiple + * The Epetra map is used to map + * (or rather, partition) vector + * data accross multiple * processes. This is the * communicator and data - * distribution object common - * to all Trilinos objects used - * by deal.II. + * distribution object common to + * all Trilinos objects used by + * deal.II. */ Epetra_Map map; @@ -931,7 +975,7 @@ namespace TrilinosWrappers public: /** * An Epetra distibuted vector - * type. Requires an existing + * type. Requires an existing * Epetra_Map for storing data. * TODO: Should become private * at some point. diff --git a/deal.II/lac/source/trilinos_vector.cc b/deal.II/lac/source/trilinos_vector.cc index 623788c09a..7a0233ab32 100755 --- a/deal.II/lac/source/trilinos_vector.cc +++ b/deal.II/lac/source/trilinos_vector.cc @@ -160,6 +160,29 @@ namespace TrilinosWrappers } + Vector & + Vector::operator = (const ::dealii::Vector &v) + { + Assert (size() == v.size(), + ExcDimensionMismatch(size(),v.size())); + + const unsigned int n_local_elements = local_size(); + const unsigned int first_local_element = local_range().first; + + std::vector local_indices (n_local_elements); + for (unsigned int i=0; iReplaceGlobalValues(n_local_elements, + &local_indices[0], + v.begin()+first_local_element); + + Assert (ierr==0, ExcTrilinosError(ierr)); + + return *this; + } + + bool Vector::operator == (const Vector &v) const