From: kronbichler Date: Tue, 21 May 2013 16:47:31 +0000 (+0000) Subject: Improve inner products in parallel vector: Use only one MPI_Allreduce instead of... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=af3d18ac3c35a0a56a1e3a71da9e4f03a9fdc235;p=dealii-svn.git Improve inner products in parallel vector: Use only one MPI_Allreduce instead of n_blocks() many. git-svn-id: https://svn.dealii.org/trunk@29542 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/lac/parallel_block_vector.h b/deal.II/include/deal.II/lac/parallel_block_vector.h index 221570efd0..63b2395979 100644 --- a/deal.II/include/deal.II/lac/parallel_block_vector.h +++ b/deal.II/include/deal.II/lac/parallel_block_vector.h @@ -1,7 +1,7 @@ //--------------------------------------------------------------------------- // $Id$ // -// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012 by the deal.II authors +// Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012, 2013 by the deal.II authors // // This file is subject to QPL and may not be distributed // without copyright and license information. Please refer @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -24,9 +25,6 @@ DEAL_II_NAMESPACE_OPEN -// TODO: global reduction operations (operator *, {l1,l2,lp,linfty}norm, mean -// value) should use one MPI communication with several Number values, not use -// the parallel::distributed::Vector operation directly. namespace parallel { @@ -56,20 +54,17 @@ namespace parallel { public: /** - * Typedef the base class for simpler - * access to its own typedefs. + * Typedef the base class for simpler access to its own typedefs. */ typedef BlockVectorBase > BaseClass; /** - * Typedef the type of the underlying - * vector. + * Typedef the type of the underlying vector. */ typedef typename BaseClass::BlockType BlockType; /** - * Import the typedefs from the base - * class. + * Import the typedefs from the base class. */ typedef typename BaseClass::value_type value_type; typedef typename BaseClass::real_type real_type; @@ -82,55 +77,37 @@ namespace parallel typedef typename BaseClass::const_iterator const_iterator; /** - * Constructor. There are three - * ways to use this - * constructor. First, without - * any arguments, it generates - * an object with no - * blocks. Given one argument, - * it initializes num_blocks - * blocks, but these blocks have - * size zero. The third variant - * finally initializes all - * blocks to the same size - * block_size. + * Constructor. There are three ways to use this constructor. First, + * without any arguments, it generates an object with no blocks. Given + * one argument, it initializes num_blocks blocks, but these + * blocks have size zero. The third variant finally initializes all + * blocks to the same size block_size. * - * Confer the other constructor - * further down if you intend to - * use blocks of different - * sizes. + * Confer the other constructor further down if you intend to use + * blocks of different sizes. */ explicit BlockVector (const unsigned int num_blocks = 0, const unsigned int block_size = 0); /** - * Copy-Constructor. Dimension set to - * that of V, all components are copied - * from V + * Copy-Constructor. Dimension set to that of V, all components are + * copied from V */ BlockVector (const BlockVector &V); #ifndef DEAL_II_EXPLICIT_CONSTRUCTOR_BUG /** - * Copy constructor taking a BlockVector of - * another data type. This will fail if - * there is no conversion path from - * OtherNumber to Number. Note that - * you may lose accuracy when copying - * to a BlockVector with data elements with - * less accuracy. + * Copy constructor taking a BlockVector of another data type. This will + * fail if there is no conversion path from OtherNumber to + * Number. Note that you may lose accuracy when copying to a + * BlockVector with data elements with less accuracy. * - * Older versions of gcc did not honor - * the @p explicit keyword on template - * constructors. In such cases, it is - * easy to accidentally write code that - * can be very inefficient, since the - * compiler starts performing hidden - * conversions. To avoid this, this - * function is disabled if we have - * detected a broken compiler during - * configuration. + * Older versions of gcc did not honor the @p explicit keyword on + * template constructors. In such cases, it is easy to accidentally + * write code that can be very inefficient, since the compiler starts + * performing hidden conversions. To avoid this, this function is + * disabled if we have detected a broken compiler during configuration. */ template explicit @@ -138,38 +115,31 @@ namespace parallel #endif /** - * Constructor. Set the number of - * blocks to - * block_sizes.size() and - * initialize each block with - * block_sizes[i] zero - * elements. + * Constructor. Set the number of blocks to block_sizes.size() + * and initialize each block with block_sizes[i] zero elements. */ BlockVector (const std::vector &block_sizes); /** - * Destructor. Clears memory + * Destructor. Clears memory. */ ~BlockVector (); /** - * Copy operator: fill all components of - * the vector with the given scalar - * value. + * Copy operator: fill all components of the vector with the given + * scalar value. */ BlockVector &operator = (const value_type s); /** - * Copy operator for arguments of the - * same type. Resize the - * present vector if necessary. + * Copy operator for arguments of the same type. Resize the present + * vector if necessary. */ BlockVector & operator= (const BlockVector &V); /** - * Copy operator for template arguments - * of different types. Resize the + * Copy operator for template arguments of different types. Resize the * present vector if necessary. */ template @@ -177,152 +147,173 @@ namespace parallel operator= (const BlockVector &V); /** - * Copy a regular vector into a - * block vector. + * Copy a regular vector into a block vector. */ BlockVector & operator= (const Vector &V); /** - * Reinitialize the BlockVector to - * contain num_blocks blocks of + * Reinitialize the BlockVector to contain num_blocks blocks of * size block_size each. * - * If the second argument is left - * at its default value, then the - * block vector allocates the - * specified number of blocks but - * leaves them at zero size. You - * then need to later - * reinitialize the individual - * blocks, and call - * collect_sizes() to update the - * block system's knowledge of + * If the second argument is left at its default value, then the block + * vector allocates the specified number of blocks but leaves them at + * zero size. You then need to later reinitialize the individual blocks, + * and call collect_sizes() to update the block system's knowledge of * its individual block's sizes. * - * If fast==false, the vector - * is filled with zeros. + * If fast==false, the vector is filled with zeros. */ void reinit (const unsigned int num_blocks, const unsigned int block_size = 0, const bool fast = false); /** - * Reinitialize the BlockVector such that - * it contains - * block_sizes.size() - * blocks. Each block is reinitialized to + * Reinitialize the BlockVector such that it contains + * block_sizes.size() blocks. Each block is reinitialized to * dimension block_sizes[i]. * - * If the number of blocks is the - * same as before this function - * was called, all vectors remain - * the same and reinit() is - * called for each vector. + * If the number of blocks is the same as before this function was + * called, all vectors remain the same and reinit() is called for each + * vector. * - * If fast==false, the vector - * is filled with zeros. + * If fast==false, the vector is filled with zeros. * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() on one of the - * blocks, then subsequent - * actions on this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. + * Note that you must call this (or the other reinit() functions) + * function, rather than calling the reinit() functions of an individual + * block, to allow the block vector to update its caches of vector + * sizes. If you call reinit() on one of the blocks, then subsequent + * actions on this object may yield unpredictable results since they may + * be routed to the wrong block. */ void reinit (const std::vector &N, const bool fast=false); /** - * Change the dimension to that - * of the vector V. The same - * applies as for the other - * reinit() function. + * Change the dimension to that of the vector V. The same + * applies as for the other reinit() function. * - * The elements of V are not - * copied, i.e. this function is - * the same as calling reinit - * (V.size(), fast). + * The elements of V are not copied, i.e. this function is the + * same as calling reinit (V.size(), fast). * - * Note that you must call this - * (or the other reinit() - * functions) function, rather - * than calling the reinit() - * functions of an individual - * block, to allow the block - * vector to update its caches of - * vector sizes. If you call - * reinit() of one of the - * blocks, then subsequent - * actions of this object may - * yield unpredictable results - * since they may be routed to - * the wrong block. + * Note that you must call this (or the other reinit() functions) + * function, rather than calling the reinit() functions of an individual + * block, to allow the block vector to update its caches of vector + * sizes. If you call reinit() of one of the blocks, then subsequent + * actions of this object may yield unpredictable results since they may + * be routed to the wrong block. */ template void reinit (const BlockVector &V, const bool fast=false); /** - * Scale each element of the - * vector by the given factor. + * Return whether the vector contains only elements with value + * zero. This function is mainly for internal consistency checks and + * should seldom be used when not in debug mode since it uses quite some + * time. + */ + bool all_zero () const; + + /** + * Return @p true if the vector has no negative entries, i.e. all + * entries are zero or positive. This function is used, for example, to + * check whether refinement indicators are really all positive (or + * zero). + * + * The function obviously only makes sense if the template argument of + * this class is a real type. If it is a complex type, then an exception + * is thrown. + */ + bool is_non_negative () const; + + /** + * Checks for equality of the two vectors. + */ + template + bool operator == (const BlockVector &v) const; + + /** + * Checks for inequality of the two vectors. + */ + template + bool operator != (const BlockVector &v) const; + + /** + * Perform the inner product of two vectors. + */ + template + Number operator * (const BlockVector &V) const; + + /** + * Computes the square of the l2 norm of the vector (i.e., + * the sum of the squares of all entries among all processors). + */ + real_type norm_sqr () const; + + /** + * Computes the mean value of all the entries in the vector. + */ + Number mean_value () const; + + /** + * Returns the l1 norm of the vector (i.e., the sum of the + * absolute values of all entries among all processors). + */ + real_type l1_norm () const; + + /** + * Returns the l2 norm of the vector (i.e., square root of + * the sum of the square of all entries among all processors). + */ + real_type l2_norm () const; + + /** + * Returns the lp norm with real @p p of the vector (i.e., + * the pth root of sum of the pth power of all entries among all + * processors). + */ + real_type lp_norm (const real_type p) const; + + /** + * Returns the maximum norm of the vector (i.e., maximum absolute value + * among all entries among all processors). + */ + real_type linfty_norm () const; + + /** + * Scale each element of the vector by the given factor. * - * This function is deprecated - * and will be removed in a - * future version. Use - * operator *= and - * operator /= instead. + * This function is deprecated and will be removed in a future + * version. Use operator *= and operator /= instead. * - * @deprecated Use operator*= - * instead. + * @deprecated Use operator*= instead. */ void scale (const value_type factor) DEAL_II_DEPRECATED; /** - * Multiply each element of this - * vector by the corresponding - * element of v. + * Multiply each element of this vector by the corresponding element of + * v. */ template void scale (const BlockVector2 &v); /** - * Swap the contents of this - * vector and the other vector - * v. One could do this - * operation with a temporary - * variable and copying over the - * data elements, but this - * function is significantly more - * efficient since it only swaps - * the pointers to the data of - * the two vectors and therefore - * does not need to allocate - * temporary storage and move - * data around. + * Swap the contents of this vector and the other vector v. One + * could do this operation with a temporary variable and copying over + * the data elements, but this function is significantly more efficient + * since it only swaps the pointers to the data of the two vectors and + * therefore does not need to allocate temporary storage and move data + * around. * - * Limitation: right now this - * function only works if both - * vectors have the same number - * of blocks. If needed, the - * numbers of blocks should be + * Limitation: right now this function only works if both vectors have + * the same number of blocks. If needed, the numbers of blocks should be * exchanged, too. * - * This function is analog to the - * the swap() function of all C++ - * standard containers. Also, - * there is a global function - * swap(u,v) that simply calls - * u.swap(v), again in analogy - * to standard functions. + * This function is analog to the the swap() function of all C++ + * standard containers. Also, there is a global function swap(u,v) that + * simply calls u.swap(v), again in analogy to standard + * functions. */ void swap (BlockVector &v); @@ -488,6 +479,221 @@ namespace parallel } + template + inline + bool + BlockVector::all_zero () const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + + // use int instead of bool. in order to make global reduction operations + // work also when MPI_Init was not called, only call MPI_Allreduce + // commands when there is more than one processor (note that reinit() + // functions handle this case correctly through the job_supports_mpi() + // query). this is the same in all the functions below + int local_result = -1; + for (unsigned int i=0; in_blocks(); ++i) + local_result = std::max(local_result, + -static_cast(this->block(i).all_zero_local())); + + if (this->block(0).partitioner->n_mpi_processes() > 1) + return -Utilities::MPI::max(local_result, + this->block(0).partitioner->get_communicator()); + else + return local_result; + } + + + + template + inline + bool + BlockVector::is_non_negative () const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + int local_result = -1; + for (unsigned int i=0; in_blocks(); ++i) + local_result = std::max(local_result, + -static_cast(this->block(i).is_non_negative_local())); + if (this->block(0).partitioner->n_mpi_processes() > 1) + return Utilities::MPI::max(local_result, + this->block(0).partitioner->get_communicator()); + else + return local_result; + } + + + + template + template + inline + bool + BlockVector::operator == (const BlockVector &v) const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + AssertDimension (this->n_blocks(), v.n_blocks()); + + // MPI does not support bools, so use unsigned int instead. Two vectors + // are equal if the check for non-equal fails on all processors + unsigned int local_result = 0; + for (unsigned int i=0; in_blocks(); ++i) + local_result = std::max(local_result, + static_cast(!this->block(i).vectors_equal_local(v.block(i)))); + unsigned int result = + this->block(0).partitioner->n_mpi_processes() > 1 + ? + Utilities::MPI::max(local_result, this->block(0).partitioner->get_communicator()) + : + local_result; + return result==0; + } + + + + template + template + inline + bool + BlockVector::operator != (const BlockVector &v) const + { + return !(operator == (v)); + } + + + + template + template + inline + Number + BlockVector::operator * (const BlockVector &v) const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + AssertDimension (this->n_blocks(), v.n_blocks()); + + Number local_result = Number(); + for (unsigned int i=0; in_blocks(); ++i) + local_result += this->block(i).inner_product_local(v.block(i)); + + if (this->block(0).partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum (local_result, + this->block(0).partitioner->get_communicator()); + else + return local_result; + } + + + + template + inline + typename BlockVector::real_type + BlockVector::norm_sqr () const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + + real_type local_result = real_type(); + for (unsigned int i=0; in_blocks(); ++i) + local_result += this->block(i).norm_sqr_local(); + + if (this->block(0).partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum (local_result, + this->block(0).partitioner->get_communicator()); + else + return local_result; + } + + + + template + inline + Number + BlockVector::mean_value () const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + + Number local_result = Number(); + for (unsigned int i=0; in_blocks(); ++i) + local_result += this->block(i).mean_value_local()*(real_type)this->block(i).partitioner->local_size(); + + if (this->block(0).partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum (local_result, + this->block(0).partitioner->get_communicator())/ + (real_type)this->size(); + else + return local_result/(real_type)this->size(); + } + + + + template + inline + typename BlockVector::real_type + BlockVector::l1_norm () const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + + real_type local_result = real_type(); + for (unsigned int i=0; in_blocks(); ++i) + local_result += this->block(i).l1_norm_local(); + + if (this->block(0).partitioner->n_mpi_processes() > 1) + return Utilities::MPI::sum (local_result, + this->block(0).partitioner->get_communicator()); + else + return local_result; + } + + + + template + inline + typename BlockVector::real_type + BlockVector::l2_norm () const + { + return std::sqrt(norm_sqr()); + } + + + + template + inline + typename BlockVector::real_type + BlockVector::lp_norm (const real_type p) const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + + real_type local_result = real_type(); + for (unsigned int i=0; in_blocks(); ++i) + local_result += std::pow(this->block(i).lp_norm_local(p), p); + + if (this->block(0).partitioner->n_mpi_processes() > 1) + return std::pow (Utilities::MPI::sum(local_result, + this->block(0).partitioner->get_communicator()), + static_cast(1.0/p)); + else + return std::pow (local_result, static_cast(1.0/p)); + } + + + + template + inline + typename BlockVector::real_type + BlockVector::linfty_norm () const + { + Assert (this->n_blocks() > 0, ExcEmptyObject()); + + real_type local_result = real_type(); + for (unsigned int i=0; in_blocks(); ++i) + local_result = std::max(local_result, this->block(i).linfty_norm_local()); + + if (this->block(0).partitioner->n_mpi_processes() > 1) + return Utilities::MPI::max (local_result, + this->block(0).partitioner->get_communicator()); + else + return local_result; + } + + template inline diff --git a/deal.II/include/deal.II/lac/parallel_vector.h b/deal.II/include/deal.II/lac/parallel_vector.h index 9440ccc1d5..7154c94b24 100644 --- a/deal.II/include/deal.II/lac/parallel_vector.h +++ b/deal.II/include/deal.II/lac/parallel_vector.h @@ -33,6 +33,7 @@ namespace parallel { namespace distributed { + template class BlockVector; /*! @addtogroup Vectors *@{ @@ -309,7 +310,7 @@ namespace parallel void compress_finish (::dealii::VectorOperation::values operation); /** - * @deprecated: use compress(VectorOperation::values) instead. + * @deprecated: use compress_finish(VectorOperation::values) instead. */ void compress_finish (const bool add_ghost_data = true) DEAL_II_DEPRECATED; @@ -756,6 +757,53 @@ namespace parallel //@} private: + /** + * Local part of all_zero(). + */ + bool all_zero_local () const; + + /** + * Local part of is_non_negative(). + */ + bool is_non_negative_local () const; + + /** + * Local part of operator==. + */ + template + bool vectors_equal_local (const Vector &v) const; + + /** + * Local part of the inner product of two vectors. + */ + template + Number inner_product_local (const Vector &V) const; + + /** + * Local part of norm_sqr(). + */ + real_type norm_sqr_local () const; + + /** + * Local part of mean_value(). + */ + Number mean_value_local () const; + + /** + * Local part of l1_norm(). + */ + real_type l1_norm_local () const; + + /** + * Local part of lp_norm(). + */ + real_type lp_norm_local (const real_type p) const; + + /** + * Local part of linfty_norm(). + */ + real_type linfty_norm_local () const; + /** * Shared pointer to store the parallel partitioning information. This * information can be shared between several vectors that have the same @@ -826,6 +874,11 @@ namespace parallel * Make all other vector types friends. */ template friend class Vector; + + /** + * Make BlockVector type friends. + */ + template friend class BlockVector; }; /*@}*/ @@ -1048,19 +1101,42 @@ namespace parallel + template + inline + bool + Vector::all_zero_local () const + { + return partitioner->local_size()>0 ? vector_view.all_zero () : true; + } + + + template inline bool Vector::all_zero () const { - // use int instead of bool - int local_result = (partitioner->local_size()>0 ? - -vector_view.all_zero () : -1); + // use int instead of bool. in order to make global reduction operations + // work also when MPI_Init was not called, only call MPI_Allreduce + // commands when there is more than one processor (note that reinit() + // functions handle this case correctly through the job_supports_mpi() + // query). this is the same in all the functions below + int local_result = -static_cast(all_zero_local()); if (partitioner->n_mpi_processes() > 1) return -Utilities::MPI::max(local_result, partitioner->get_communicator()); else - return local_result; + return -local_result; + } + + + + template + inline + bool + Vector::is_non_negative_local () const + { + return partitioner->local_size()>0 ? vector_view.is_non_negative () : true; } @@ -1070,17 +1146,12 @@ namespace parallel bool Vector::is_non_negative () const { - // use int instead of bool. in order to make this operation work also - // when MPI_Init was not called, only call MPI_Allreduce commands when - // there is more than one processor (note that reinit() functions handle - // this case correctly through the job_supports_mpi() query) - int local_result = (partitioner->local_size()>0 ? - -vector_view.is_non_negative () : -1); + int local_result = -static_cast(is_non_negative_local()); if (partitioner->n_mpi_processes() > 1) - return -Utilities::MPI::max(local_result, - partitioner->get_communicator()); + return -Utilities::MPI::max(local_result, + partitioner->get_communicator()); else - return local_result; + return -local_result; } @@ -1089,16 +1160,24 @@ namespace parallel template inline bool - Vector::operator == (const Vector &v) const + Vector::vectors_equal_local (const Vector &v) const { - AssertDimension (local_size(), v.local_size()); + return partitioner->local_size()>0 ? + vector_view.template operator == (v.vector_view) + : true; + } + + + template + template + inline + bool + Vector::operator == (const Vector &v) const + { // MPI does not support bools, so use unsigned int instead. Two vectors // are equal if the check for non-equal fails on all processors - unsigned int local_result = (partitioner->local_size()>0 ? - vector_view.template operator != - (v.vector_view) - : 0 ); + unsigned int local_result = static_cast(!vectors_equal_local(v)); unsigned int result = partitioner->n_mpi_processes() > 1 ? @@ -1121,15 +1200,28 @@ namespace parallel + template + template + inline + Number + Vector::inner_product_local(const Vector &V) const + { + // on some processors, the size might be zero, which is not allowed by + // the dealii::Vector class. Therefore, insert a check here + return (partitioner->local_size()>0 ? + vector_view.operator* (V.vector_view) + : Number()); + } + + + template template inline Number Vector::operator * (const Vector &V) const { - Number local_result = (partitioner->local_size()>0 ? - vector_view.operator* (V.vector_view) - : 0); + Number local_result = inner_product_local(V); if (partitioner->n_mpi_processes() > 1) return Utilities::MPI::sum (local_result, partitioner->get_communicator()); @@ -1139,17 +1231,22 @@ namespace parallel + template + inline + typename Vector::real_type + Vector::norm_sqr_local () const + { + return partitioner->local_size()>0 ? vector_view.norm_sqr() : real_type(); + } + + + template inline typename Vector::real_type Vector::norm_sqr () const { - // on some processors, the size might be zero, - // which is not allowed by the base - // class. Therefore, insert a check here - real_type local_result = (partitioner->local_size()>0 ? - vector_view.norm_sqr() - : 0); + real_type local_result = norm_sqr_local(); if (partitioner->n_mpi_processes() > 1) return Utilities::MPI::sum(local_result, partitioner->get_communicator()); @@ -1159,33 +1256,52 @@ namespace parallel + template + inline + Number + Vector::mean_value_local () const + { + Assert (partitioner->size()!=0, ExcEmptyObject()); + return (partitioner->local_size()>0 ? + vector_view.mean_value() + : Number()); + } + + + template inline Number Vector::mean_value () const { - Number local_result = - (partitioner->local_size()>0 ? - vector_view.mean_value() - : 0) - *((real_type)partitioner->local_size()/(real_type)partitioner->size()); + Number local_result = mean_value_local(); if (partitioner->n_mpi_processes() > 1) - return Utilities::MPI::sum (local_result, - partitioner->get_communicator()); + return Utilities::MPI::sum (local_result * + (real_type)partitioner->local_size(), + partitioner->get_communicator()) + /(real_type)partitioner->size(); else return local_result; } + template + inline + typename Vector::real_type + Vector::l1_norm_local () const + { + return partitioner->local_size()>0 ? vector_view.l1_norm() : real_type(); + } + + + template inline typename Vector::real_type Vector::l1_norm () const { - real_type local_result = (partitioner->local_size()>0 ? - vector_view.l1_norm() - : 0); + real_type local_result = l1_norm_local(); if (partitioner->n_mpi_processes() > 1) return Utilities::MPI::sum(local_result, partitioner->get_communicator()); @@ -1205,14 +1321,22 @@ namespace parallel + template + inline + typename Vector::real_type + Vector::lp_norm_local (const real_type p) const + { + return partitioner->local_size()>0 ? vector_view.lp_norm(p) : real_type(); + } + + + template inline typename Vector::real_type Vector::lp_norm (const real_type p) const { - const real_type local_result = (partitioner->local_size()>0 ? - vector_view.lp_norm(p) - : 0); + const real_type local_result = lp_norm_local(p); if (partitioner->n_mpi_processes() > 1) return std::pow (Utilities::MPI::sum(std::pow(local_result,p), partitioner->get_communicator()), @@ -1223,14 +1347,22 @@ namespace parallel + template + inline + typename Vector::real_type + Vector::linfty_norm_local () const + { + return partitioner->local_size()>0 ? vector_view.linfty_norm() : real_type(); + } + + + template inline typename Vector::real_type Vector::linfty_norm () const { - const real_type local_result = (partitioner->local_size()>0 ? - vector_view.linfty_norm() - : 0); + const real_type local_result = linfty_norm_local(); if (partitioner->n_mpi_processes() > 1) return Utilities::MPI::max (local_result, partitioner->get_communicator()); diff --git a/tests/mpi/parallel_block_vector_01.cc b/tests/mpi/parallel_block_vector_01.cc new file mode 100644 index 0000000000..450c701e5b --- /dev/null +++ b/tests/mpi/parallel_block_vector_01.cc @@ -0,0 +1,251 @@ +//-------------------------- parallel_block_vector_06.cc ----------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2011 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//-------------------------- parallel_block_vector_06.cc ----------------------- + +// check global reduction operation (norms, operator==, operator!=) on +// parallel block vector (similar test case as parallel_vector_06). + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include + + +void test () +{ + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + + if (myid==0) deallog << "numproc=" << numproc << std::endl; + + + // each processor from processor 1 to 8 + // owns 2 indices (the other processors do + // not own any dof), and all processors are + // ghosting element 1 (the second) + IndexSet local_owned(std::min(16U,numproc*2)); + if (myid < 8) + local_owned.add_range(myid*2,myid*2+2); + IndexSet local_relevant(numproc*2); + local_relevant = local_owned; + local_relevant.add_range(1,2); + + parallel::distributed::Vector v(local_owned, local_owned, MPI_COMM_WORLD); + + // set local values + if (myid < 8) + { + v(myid*2)=myid*2.0; + v(myid*2+1)=myid*2.0+1.0; + } + v.compress(VectorOperation::insert); + v*=2.0; + if (myid < 8) + { + Assert(v(myid*2) == myid*4.0, ExcInternalError()); + Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError()); + } + + parallel::distributed::BlockVector w(3); + for (unsigned int i=0; i<3; ++i) + w.block(i) = v; + w.collect_sizes(); + + // check l2 norm + { + const double l2_norm = w.l2_norm(); + if (myid == 0) + deallog << "l2 norm: " << l2_norm << std::endl; + Assert(std::abs(v.l2_norm()*std::sqrt(3.)-w.l2_norm()) < 1e-13, + ExcInternalError()); + } + + // check l1 norm + { + const double l1_norm = w.l1_norm(); + if (myid == 0) + deallog << "l1 norm: " << l1_norm << std::endl; + Assert(std::abs(v.l1_norm()*3.-w.l1_norm()) < 1e-14, + ExcInternalError()); + } + + // check linfty norm + { + const double linfty_norm = w.linfty_norm(); + if (myid == 0) + deallog << "linfty norm: " << linfty_norm << std::endl; + Assert(v.linfty_norm()==w.linfty_norm(), + ExcInternalError()); + } + + // check lp norm + { + const double lp_norm = w.lp_norm(2.2); + if (myid == 0) + deallog << "l2.2 norm: " << lp_norm << std::endl; + + Assert (std::fabs (w.l2_norm() - w.lp_norm(2.0)) < 1e-14, + ExcInternalError()); + } + + // check mean value (should be equal to l1 + // norm divided by vector size here since we + // have no negative entries) + { + const double mean = w.mean_value(); + if (myid == 0) + deallog << "Mean value: " << mean << std::endl; + + Assert (std::fabs (mean * w.size() - w.l1_norm()) < 1e-15, + ExcInternalError()); + } + // check inner product + { + const double norm_sqr = w.norm_sqr(); + Assert (std::fabs(w * w - norm_sqr) < 1e-15, + ExcInternalError()); + parallel::distributed::BlockVector w2; + w2 = w; + Assert (std::fabs(w2 * w - norm_sqr) < 1e-15, + ExcInternalError()); + + if(myid<8) + w2.block(0).local_element(0) = -1; + const double inner_prod = w * w2; + if (myid == 0) + deallog << "Inner product: " << inner_prod << std::endl; + } + + // check operator == + { + parallel::distributed::BlockVector w2 (w); + bool equal = (w2 == w); + if (myid == 0) + deallog << " v==v2 ? " << equal << std::endl; + + bool not_equal = (w2 != w); + if (myid == 0) + deallog << " v!=v2 ? " << not_equal << std::endl; + + // change v2 on one proc only + if (myid == 0) + w2.block(0).local_element(1) = 2.2212; + + equal = (w2 == w); + if (myid == 0) + deallog << " v==v2 ? " << equal << std::endl; + not_equal = (w2 != w); + if (myid == 0) + deallog << " v!=v2 ? " << not_equal << std::endl; + + // reset + w2 = w; + equal = (w2 == w); + if (myid == 0) + deallog << " v==v2 ? " << equal << std::endl; + not_equal = (w2 != w); + if (myid == 0) + deallog << " v!=v2 ? " << not_equal << std::endl; + + // change some value on all procs + if (myid < 8) + w2.block(1).local_element(0) = -1; + equal = (w2 == w); + if (myid == 0) + deallog << " v==v2 ? " << equal << std::endl; + not_equal = (w2 != w); + if (myid == 0) + deallog << " v!=v2 ? " << not_equal << std::endl; + } + + // check all_zero + { + bool allzero = w.all_zero(); + if (myid == 0) + deallog << " v==0 ? " << allzero << std::endl; + parallel::distributed::BlockVector w2; + w2.reinit (w); + allzero = w2.all_zero(); + if (myid == 0) + deallog << " v2==0 ? " << allzero << std::endl; + + // now change one element to nonzero + if (myid == 0) + w2.block(1).local_element(1) = 1; + allzero = w2.all_zero(); + if (myid == 0) + deallog << " v2==0 ? " << allzero << std::endl; + } + + + // check all_non_negative + { + bool allnonneg = w.is_non_negative(); + if (myid == 0) + deallog << " v>=0 ? " << allnonneg << std::endl; + parallel::distributed::BlockVector w2, w3; + + // vector where all processors have + // non-negative entries + w2 = w; + if (myid < 8) + w2.block(0).local_element(0) = -1; + allnonneg = w2.is_non_negative(); + if (myid == 0) + deallog << " v2>=0 ? " << allnonneg << std::endl; + + // zero vector + w3.reinit (w2); + allnonneg = w3.is_non_negative(); + if (myid == 0) + deallog << " v3>=0 ? " << allnonneg << std::endl; + + // only one processor has non-negative entry + w3 = w; + if (myid == 1 || numproc==1) + w3.block(0).local_element(0) = -1; + allnonneg = w3.is_non_negative(); + if (myid == 0) + deallog << " v3>=0 ? " << allnonneg << std::endl; + } + + if (myid == 0) + deallog << "OK" << std::endl; +} + + + +int main (int argc, char **argv) +{ + Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv); + + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + deallog.push(Utilities::int_to_string(myid)); + + if (myid == 0) + { + std::ofstream logfile(output_file_for_mpi("parallel_block_vector_01").c_str()); + deallog.attach(logfile); + deallog << std::setprecision(4); + deallog.depth_console(0); + deallog.threshold_double(1.e-10); + + test(); + } + else + test(); + +} diff --git a/tests/mpi/parallel_block_vector_01/ncpu_1/cmp/generic b/tests/mpi/parallel_block_vector_01/ncpu_1/cmp/generic new file mode 100644 index 0000000000..2b1e9bf7df --- /dev/null +++ b/tests/mpi/parallel_block_vector_01/ncpu_1/cmp/generic @@ -0,0 +1,24 @@ + +DEAL:0::numproc=1 +DEAL:0::l2 norm: 3.464 +DEAL:0::l1 norm: 6.000 +DEAL:0::linfty norm: 2.000 +DEAL:0::l2.2 norm: 3.295 +DEAL:0::Mean value: 1.000 +DEAL:0::Inner product: 12.00 +DEAL:0:: v==v2 ? 1 +DEAL:0:: v!=v2 ? 0 +DEAL:0:: v==v2 ? 0 +DEAL:0:: v!=v2 ? 1 +DEAL:0:: v==v2 ? 1 +DEAL:0:: v!=v2 ? 0 +DEAL:0:: v==v2 ? 0 +DEAL:0:: v!=v2 ? 1 +DEAL:0:: v==0 ? 0 +DEAL:0:: v2==0 ? 1 +DEAL:0:: v2==0 ? 0 +DEAL:0:: v>=0 ? 1 +DEAL:0:: v2>=0 ? 0 +DEAL:0:: v3>=0 ? 1 +DEAL:0:: v3>=0 ? 0 +DEAL:0::OK diff --git a/tests/mpi/parallel_block_vector_01/ncpu_10/cmp/generic b/tests/mpi/parallel_block_vector_01/ncpu_10/cmp/generic new file mode 100644 index 0000000000..53261a12e3 --- /dev/null +++ b/tests/mpi/parallel_block_vector_01/ncpu_10/cmp/generic @@ -0,0 +1,24 @@ + +DEAL:0::numproc=10 +DEAL:0::l2 norm: 122.0 +DEAL:0::l1 norm: 720.0 +DEAL:0::linfty norm: 30.00 +DEAL:0::l2.2 norm: 104.6 +DEAL:0::Mean value: 15.00 +DEAL:0::Inner product: 1.253e+04 +DEAL:0:: v==v2 ? 1 +DEAL:0:: v!=v2 ? 0 +DEAL:0:: v==v2 ? 0 +DEAL:0:: v!=v2 ? 1 +DEAL:0:: v==v2 ? 1 +DEAL:0:: v!=v2 ? 0 +DEAL:0:: v==v2 ? 0 +DEAL:0:: v!=v2 ? 1 +DEAL:0:: v==0 ? 0 +DEAL:0:: v2==0 ? 1 +DEAL:0:: v2==0 ? 0 +DEAL:0:: v>=0 ? 1 +DEAL:0:: v2>=0 ? 0 +DEAL:0:: v3>=0 ? 1 +DEAL:0:: v3>=0 ? 0 +DEAL:0::OK diff --git a/tests/mpi/parallel_block_vector_01/ncpu_4/cmp/generic b/tests/mpi/parallel_block_vector_01/ncpu_4/cmp/generic new file mode 100644 index 0000000000..8fd4464a52 --- /dev/null +++ b/tests/mpi/parallel_block_vector_01/ncpu_4/cmp/generic @@ -0,0 +1,24 @@ + +DEAL:0::numproc=4 +DEAL:0::l2 norm: 40.99 +DEAL:0::l1 norm: 168.0 +DEAL:0::linfty norm: 14.00 +DEAL:0::l2.2 norm: 36.31 +DEAL:0::Mean value: 7.000 +DEAL:0::Inner product: 1432. +DEAL:0:: v==v2 ? 1 +DEAL:0:: v!=v2 ? 0 +DEAL:0:: v==v2 ? 0 +DEAL:0:: v!=v2 ? 1 +DEAL:0:: v==v2 ? 1 +DEAL:0:: v!=v2 ? 0 +DEAL:0:: v==v2 ? 0 +DEAL:0:: v!=v2 ? 1 +DEAL:0:: v==0 ? 0 +DEAL:0:: v2==0 ? 1 +DEAL:0:: v2==0 ? 0 +DEAL:0:: v>=0 ? 1 +DEAL:0:: v2>=0 ? 0 +DEAL:0:: v3>=0 ? 1 +DEAL:0:: v3>=0 ? 0 +DEAL:0::OK diff --git a/tests/mpi/parallel_vector_interpolate.cc b/tests/mpi/parallel_vector_interpolate.cc index 53b333f8e2..1efaa6a087 100644 --- a/tests/mpi/parallel_vector_interpolate.cc +++ b/tests/mpi/parallel_vector_interpolate.cc @@ -61,7 +61,6 @@ void test () v1.update_ghost_values(); FETools::interpolate(dof1, v1, dof2, v2); - v2.print(std::cout); for (unsigned int i=0; i