From b8fbe719692e4e31c00c4e1bebea1c245a0466b7 Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Thu, 19 Nov 2015 14:35:34 -0600 Subject: [PATCH] Add EpetraWrappers::Vector. --- include/deal.II/lac/la_vector.h | 4 +- include/deal.II/lac/read_write_vector.h | 26 + .../deal.II/lac/read_write_vector.templates.h | 27 +- include/deal.II/lac/trilinos_epetra_vector.h | 289 ++++++++++ include/deal.II/lac/vector_space_vector.h | 2 +- source/lac/CMakeLists.txt | 1 + source/lac/trilinos_epetra_vector.cc | 529 ++++++++++++++++++ tests/lac/la_vector_norms.cc | 2 +- tests/trilinos/epetra_vector_01.cc | 215 +++++++ .../trilinos/epetra_vector_01.mpirun=2.output | 2 + tests/trilinos/epetra_vector_02.cc | 201 +++++++ .../trilinos/epetra_vector_02.mpirun=2.output | 2 + 12 files changed, 1291 insertions(+), 9 deletions(-) create mode 100644 include/deal.II/lac/trilinos_epetra_vector.h create mode 100644 source/lac/trilinos_epetra_vector.cc create mode 100644 tests/trilinos/epetra_vector_01.cc create mode 100644 tests/trilinos/epetra_vector_01.mpirun=2.output create mode 100644 tests/trilinos/epetra_vector_02.cc create mode 100644 tests/trilinos/epetra_vector_02.mpirun=2.output diff --git a/include/deal.II/lac/la_vector.h b/include/deal.II/lac/la_vector.h index cfb00525fb..09c70394af 100644 --- a/include/deal.II/lac/la_vector.h +++ b/include/deal.II/lac/la_vector.h @@ -216,7 +216,7 @@ namespace LinearAlgebra * vec.locally_owned_elements() == complete_index_set(vec.size()) * @endcode */ - const dealii::IndexSet &locally_owned_elements() const; + const dealii::IndexSet locally_owned_elements() const; /** * Prints the vector to the output stream @p out. @@ -391,7 +391,7 @@ namespace LinearAlgebra template inline - const dealii::IndexSet &Vector::locally_owned_elements() const + const dealii::IndexSet Vector::locally_owned_elements() const { return ReadWriteVector::get_stored_elements(); } diff --git a/include/deal.II/lac/read_write_vector.h b/include/deal.II/lac/read_write_vector.h index 207ccb289c..a2574caa07 100644 --- a/include/deal.II/lac/read_write_vector.h +++ b/include/deal.II/lac/read_write_vector.h @@ -29,6 +29,9 @@ #include #include +#ifdef DEAL_II_WITH_TRILINOS +#include "Epetra_MultiVector.h" +#endif DEAL_II_NAMESPACE_OPEN @@ -50,6 +53,14 @@ namespace TrilinosWrappers class Vector; } } + +namespace LinearAlgebra +{ + namespace EpetraWrappers + { + class Vector; + } +} #endif namespace LinearAlgebra @@ -221,6 +232,13 @@ namespace LinearAlgebra */ ReadWriteVector & operator= (const TrilinosWrappers::MPI::Vector &trilinos_vec); + + /** + * Imports all the elements present in the vector's IndexSet from the input + * vector @p epetra_vec. + */ + ReadWriteVector & + operator= (const EpetraWrappers::Vector &epetra_vec); #endif /** @@ -405,6 +423,14 @@ namespace LinearAlgebra //@} protected: +#ifdef DEAL_II_WITH_TRILINOS + /** + * Imports all the elements present in the vector's IndexSet from the input + * vector @p multivector. + */ + ReadWriteVector &import(const Epetra_MultiVector &multivector, + const IndexSet &locally_owned_elements); +#endif /** * Return the local position of @p global_index. diff --git a/include/deal.II/lac/read_write_vector.templates.h b/include/deal.II/lac/read_write_vector.templates.h index cd2420111f..936574e918 100644 --- a/include/deal.II/lac/read_write_vector.templates.h +++ b/include/deal.II/lac/read_write_vector.templates.h @@ -21,6 +21,7 @@ #include #include #include +#include #ifdef DEAL_II_WITH_TRILINOS # include @@ -164,11 +165,12 @@ namespace LinearAlgebra #ifdef DEAL_II_WITH_TRILINOS template ReadWriteVector & - ReadWriteVector::operator = (const TrilinosWrappers::MPI::Vector &trilinos_vec) + ReadWriteVector::import(const Epetra_MultiVector &multivector, + const IndexSet &locally_owned_elements) { // Copy the local elements std::vector trilinos_indices, readwrite_indices; - trilinos_vec.locally_owned_elements().fill_index_vector(trilinos_indices); + locally_owned_elements.fill_index_vector(trilinos_indices); stored_elements.fill_index_vector(readwrite_indices); std::vector intersection(trilinos_indices.size()); std::vector::iterator end; @@ -185,7 +187,7 @@ namespace LinearAlgebra ++j; trilinos_position[i] = j; } - double *values = trilinos_vec.trilinos_vector().Values(); + double *values = multivector.Values(); for (size_t i=0; i off_proc_indices(readwrite_indices.size()); // Subtract the elements of readwrite that are in intersection. end = std::set_difference(readwrite_indices.begin(), readwrite_indices.end(), @@ -211,7 +213,7 @@ namespace LinearAlgebra delete [] off_proc_array; Epetra_Import import(target_map, source_map); Epetra_FEVector target_vector(target_map); - target_vector.Import(trilinos_vec.trilinos_vector(), import, Insert); + target_vector.Import(multivector, import, Insert); values = target_vector.Values(); for (unsigned int i=0; i + ReadWriteVector & + ReadWriteVector::operator = (const TrilinosWrappers::MPI::Vector &trilinos_vec) + { + return import(trilinos_vec.trilinos_vector(), trilinos_vec.locally_owned_elements()); + } + + + template + ReadWriteVector & + ReadWriteVector::operator = (const LinearAlgebra::EpetraWrappers::Vector &trilinos_vec) + { + return import(trilinos_vec.trilinos_vector(), trilinos_vec.locally_owned_elements()); + } + template void diff --git a/include/deal.II/lac/trilinos_epetra_vector.h b/include/deal.II/lac/trilinos_epetra_vector.h new file mode 100644 index 0000000000..61301b6f6b --- /dev/null +++ b/include/deal.II/lac/trilinos_epetra_vector.h @@ -0,0 +1,289 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii__trilinos_epetra_vector_h +#define dealii__trilinos_epetra_vector_h + + +#include + +#ifdef DEAL_II_WITH_TRILINOS + +#ifdef DEAL_II_WITH_MPI + +#include +#include +#include +#include +#include "mpi.h" +#include "Epetra_FEVector.h" + +DEAL_II_NAMESPACE_OPEN + +namespace LinearAlgebra +{ + // Forward declaration + template + class ReadWriteVector; + + namespace EpetraWrappers + { + /** + * This class implements a wrapper to the the Trilinos distributed vector + * class Epetra_FEVector. This class is derived from the + * LinearAlgebra::VectorSpaceVector class. Note however that Epetra only + * works with Number = double. + * + * @ingroup TrilinosWrappers + * @ingroup Vectors + * @author Bruno Turcksin, 2015 + */ + class Vector : public VectorSpaceVector + { + public: + /** + * Constructor. Create a vector of dimension zero. + */ + Vector(); + + /** + * Copy constructor. Sets the dimension and the partitioning to that of + * the given vector and copies all elements. + */ + Vector(const Vector &V); + + /** + * This constructor takes an IndexSet that defines how to distribute the + * individual components among the MPI processors. Since it also + * includes information abtou the size of the vector, this is all we + * need to genereate a %parallel vector. + */ + Vector(const IndexSet ¶llel_partitioner, + const MPI_Comm &communicator = MPI_COMM_WORLD); + + /** + * Reinit functionality. This function destroys the old vector content + * and generates a new one based on the input partitioning. The flag + * omit_zeroing_entries determines whether the vector should be + * filled with zero (false) or left untouched (true). + */ + void reinit (const IndexSet ¶llel_partitioner, + const MPI_Comm &communicator = MPI_COMM_WORLD, + const bool omit_zeroing_entries = false); + + /** + * Copy function. This function takes a Vector and copies all the + * elements. The Vector will have the same parallel distribution as @p + * V. + */ + Vector &operator= (const Vector &V); + + /** + * Copy function. Copy the elements from the ReadWriteVector @p V in the + * Vector. If elements if @p V are not locally owned, there is a + * communication. + */ + Vector &operator= (const ::dealii::LinearAlgebra::ReadWriteVector &V); + + /** + * Multiply the entire vector by a fixed factor. + */ + virtual VectorSpaceVector &operator*= (const double factor); + + /** + * Divide the entire vector by a fixed factor. + */ + virtual VectorSpaceVector &operator/= (const double factor); + + /** + * Add the vector @p V to the present one. + */ + virtual VectorSpaceVector &operator+= (const VectorSpaceVector &V); + + /** + * Substract the vector @p V from the present one. + */ + virtual VectorSpaceVector &operator-= (const VectorSpaceVector &V); + + /** + * Return the scalar product of two vectors. The vectors need to have the + * same layout. + */ + virtual double operator* (const VectorSpaceVector &V); + + /** + * Add @p a to all components. Note that @p is a scalar not a vector. + */ + virtual void add(const double a); + + /** + * Simple addition of a multiple of a vector, i.e. *this += + * a*V. The vectors need to have the same layout. + */ + virtual void add(const double a, const VectorSpaceVector &V); + + /** + * Multiple addition of multiple of a vector, i.e. *this> += + * a*V+b*W. The vectors need to have the same layout. + */ + virtual void add(const double a, const VectorSpaceVector &V, + const double b, const VectorSpaceVector &W); + + /** + * Scaling and simple addition of a multiple of a vector, i.e. *this + * = s*(*this)+a*V. + */ + virtual void sadd(const double s, const double a, + const VectorSpaceVector &V); + + /** + * Scale each element of this vector by the corresponding element in the + * argument. This function is mostly meant to simulate multiplication + * (and immediate re-assignement) by a diagonal scaling matrix. The + * vectors need to have the same layout. + */ + virtual void scale(const VectorSpaceVector &scaling_factors); + + /** + * Assignement *this = a*V. + */ + virtual void equ(const double a, const VectorSpaceVector &V); + + + /** + * Returns the l1 norm of the vector (i.e., the sum of the + * absolute values of all entries among all processors). + */ + virtual double l1_norm(); + + /** + * Returns the l2 norm of the vector (i.e., the square root of + * the sum of the square of all entries among all processors). + */ + virtual double l2_norm(); + + /** + * Returns the maximum norm of the vector (i.e., the maximum absolute value + * among all entries and among all processors). + */ + virtual double linfty_norm(); + + /** + * Performs a combined operation of a vector addition and a subsequent + * inner product, returning the value of the inner product. In other + * words, the result of this function is the same as if the user called + * @code + * this->add(a, V); + * return_value = *this * W; + * @endcode + * + * The reason this function exists is that this operation involves less + * memory transfer than calling the two functions separately. This + * method only needs to load three vectors, @p this, @p V, @p W, whereas + * calling separate methods means to load the calling vector @p this + * twice. Since most vector operations are memory transfer limited, this + * reduces the time by 25\% (or 50\% if @p W equals @p this). + */ + virtual double add_and_dot(const double a, + const VectorSpaceVector &V, + const VectorSpaceVector &W); + + /** + * Returns the global size of the vector, equal to the sum of the number of + * locally owned indices among all processors. + */ + virtual size_type size() const; + + /** + * Return the MPI communicator object in use with this object. + */ + MPI_Comm get_mpi_communicator() const; + + /** + * Return an index set that describes which elements of this vector are + * owned by the current processor. As a consequence, the index sets returned + * on different procesors if this is a distributed vector will form disjoint + * sets that add up to the complete index set. Obviously, if a vector is + * created on only one processor, then the result would satisfy + * @code + * vec.locally_owned_elements() == complete_index_set(vec.size()) + * @endcode + */ + virtual const ::dealii::IndexSet locally_owned_elements() const; + + /** + * Return a const reference to the underlying Trilinos + * Epetra_FEVector class. + */ + const Epetra_FEVector &trilinos_vector() const; + + /** + * Return a (modifyable) reference to the underlying Trilinos + * Epetra_FEVector class. + */ + Epetra_FEVector &trilinos_vector(); + + /** + * Prints the vector to the output stream @p out. + */ + virtual void print(std::ostream &out, + const unsigned int precision=3, + const bool scientific=true, + const bool across=true) const; + + /** + * Returns the memory consumption of this class in bytes. + */ + virtual std::size_t memory_consumption() const; + + /** + * The vectors have different partitioning, i.e. they have use different + * IndexSet. + */ + DeclException0(ExcDifferentParallelPartitioning); + + /** + * Attempt to perform an operation between two incompatible vector types. + * + * @ingroup Exceptions + */ + DeclException0(ExcVectorTypeNotCompatible); + + /** + * Exception thrown by an error in Trilinos. + * + * @ingroup Exceptions + */ + DeclException1 (ExcTrilinosError, + int, + << "An error with error number " << arg1 + << " occurred while calling a Trilinos function"); + + private: + /** + * Pointer to the actual Epetra vector object. + */ + std_cxx11::shared_ptr vector; + }; + } +} + +DEAL_II_NAMESPACE_CLOSE + +#endif + +#endif + +#endif diff --git a/include/deal.II/lac/vector_space_vector.h b/include/deal.II/lac/vector_space_vector.h index 54f818fe9e..ec008f89d3 100644 --- a/include/deal.II/lac/vector_space_vector.h +++ b/include/deal.II/lac/vector_space_vector.h @@ -164,7 +164,7 @@ namespace LinearAlgebra * vec.locally_owned_elements() == complete_index_set(vec.size()) * @endcode */ - virtual const dealii::IndexSet &locally_owned_elements() const = 0; + virtual const dealii::IndexSet locally_owned_elements() const = 0; /** * Print the vector to the output stream @p out. diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index 648edcfc28..b131f2746f 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -111,6 +111,7 @@ IF(DEAL_II_WITH_TRILINOS) ${_src} trilinos_block_sparse_matrix.cc trilinos_block_vector.cc + trilinos_epetra_vector.cc trilinos_parallel_block_vector.cc trilinos_precondition.cc trilinos_precondition_ml.cc diff --git a/source/lac/trilinos_epetra_vector.cc b/source/lac/trilinos_epetra_vector.cc new file mode 100644 index 0000000000..4bf7b32ea9 --- /dev/null +++ b/source/lac/trilinos_epetra_vector.cc @@ -0,0 +1,529 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include + +#ifdef DEAL_II_WITH_TRILINOS +#ifdef DEAL_II_WITH_MPI + +#include +#include "Epetra_Import.h" +#include "Epetra_Map.h" +#include "Epetra_MpiComm.h" + + +DEAL_II_NAMESPACE_OPEN + +namespace LinearAlgebra +{ + namespace EpetraWrappers + { + Vector::Vector() + : + vector(new Epetra_FEVector(Epetra_Map(0,1,0,Epetra_MpiComm(MPI_COMM_SELF)))) + {} + + + + Vector::Vector(const Vector &V) + : + vector(new Epetra_FEVector(V.trilinos_vector())) + {} + + + + Vector::Vector(const IndexSet ¶llel_partitioner, + const MPI_Comm &communicator) + : + vector(new Epetra_FEVector(parallel_partitioner.make_trilinos_map(communicator,false))) + {} + + + + void Vector::reinit(const IndexSet ¶llel_partitioner, + const MPI_Comm &communicator, + const bool omit_zeroing_entries) + { + Epetra_Map input_map = parallel_partitioner.make_trilinos_map(communicator,false); + if (vector->Map().SameAs(input_map)==false) + vector.reset(new Epetra_FEVector(input_map)); + else if (omit_zeroing_entries==false) + { + const int ierr = vector->PutScalar(0.); + (void) ierr; + Assert(ierr==0, ExcTrilinosError(ierr)); + } + } + + + + Vector &Vector::operator= (const Vector &V) + { + // Distinguish three cases: + // - First case: both vectors have the same layout. + // - Second case: both vectors have the same size but different layout. + // - Third case: the vectors have different size. + if (vector->Map().SameAs(V.trilinos_vector().Map())) + *vector = V.trilinos_vector(); + else + { + if (size()==V.size()) + { + Epetra_Import data_exchange(vector->Map(), V.trilinos_vector().Map()); + + const int ierr = vector->Import(V.trilinos_vector(), data_exchange, Insert); + Assert(ierr==0, ExcTrilinosError(ierr)); + } + else + vector.reset(new Epetra_FEVector(V.trilinos_vector())); + } + + return *this; + } + + + + Vector &Vector::operator= (const ::dealii::LinearAlgebra::ReadWriteVector &V) + { + IndexSet elements_to_write(V.get_stored_elements()); + IndexSet locally_owned_elements(this->locally_owned_elements()); + IndexSet off_proc_elements(elements_to_write); + off_proc_elements.subtract_set(locally_owned_elements); + IndexSet on_proc_elements(elements_to_write); + on_proc_elements.subtract_set(off_proc_elements); + + // Write the elements that locally owned. + IndexSet::ElementIterator elem = on_proc_elements.begin(), + elem_end = on_proc_elements.end(); + for (; elem!=elem_end; ++elem) + { + const TrilinosWrappers::types::int_type local_index = + vector->Map().LID(static_cast(*elem)); + (*vector)[0][local_index] = V[*elem]; + } + + + // Write the elements off-processor. Cannot use Import function of Trilinos + // because V is not a Trilinos object. The most straightforward to send + // the off-processor to the other processors is to AllGather. This way + // every processor has access to all the off-processor elements. The + // problem with this method is that if every element in V is + // off-processor, it is possible that the buffers used to receive the data + // has twice the global size of the current Vector. + // TODO This won't scale past a few 10,000's of processors. + + // Get recv_count and the size of the buffer needed to receive the data. + const Epetra_MpiComm *epetra_comm + = dynamic_cast(&(vector->Comm())); + MPI_Comm comm = epetra_comm->GetMpiComm(); + int send_buffer_size(off_proc_elements.n_elements()); + int n_procs(0); + MPI_Comm_size(comm, &n_procs); + std::vector recv_count(n_procs); + MPI_Allgather(&send_buffer_size, 1, MPI_INT, &recv_count, 1, + MPI_INT, comm); + std::vector displacement(n_procs,0); + for (int i=1; i send_index_buffer(send_buffer_size); + elem = off_proc_elements.begin(); + for (int i=0; i recv_index_buffer(recv_buffer_size); + MPI_Allgatherv(&send_index_buffer[0], send_buffer_size, DEAL_II_DOF_INDEX_MPI_TYPE, + &recv_index_buffer[0], &recv_count[0], &displacement[0], + DEAL_II_DOF_INDEX_MPI_TYPE, comm); + + // Write the values in the send buffer + std::vector send_value_buffer(send_buffer_size); + elem = off_proc_elements.begin(); + for (int i=0; i recv_value_buffer(recv_buffer_size); + MPI_Allgatherv(&send_value_buffer[0], send_buffer_size, MPI_DOUBLE, + &recv_value_buffer[0], &recv_count[0], &displacement[0], MPI_DOUBLE, comm); + + // Write the data in the vector + for (unsigned int i=0; iMap().LID(static_cast( + recv_index_buffer[i])); + (*vector)[0][local_index] = recv_value_buffer[i]; + } + } + + return *this; + } + + + + VectorSpaceVector &Vector::operator*= (const double factor) + { + AssertIsFinite(factor); + vector->Scale(factor); + + return *this; + } + + + + VectorSpaceVector &Vector::operator/= (const double factor) + { + AssertIsFinite(factor); + Assert(factor!=0., ExcZero()); + *this *= 1./factor; + + return *this; + } + + + + VectorSpaceVector &Vector::operator+= (const VectorSpaceVector &V) + { + // Check that casting will work. + Assert(dynamic_cast(&V)!=NULL, ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throws an exception. + const Vector &down_V = dynamic_cast(V); + // If the maps are the same we can Update right away. + if (vector->Map().SameAs(down_V.trilinos_vector().Map())) + vector->Update(1., down_V.trilinos_vector(), 1.); + else + { + Assert(this->size()==down_V.size(), + ExcDimensionMismatch(this->size(), down_V.size())); + +#if DEAL_II_TRILINOS_VERSION_GTE(11,11,0) + Epetra_Import data_exchange (vector->Map(), down_V.trilinos_vector().Map()); + int ierr = vector->Import(down_V.trilinos_vector(), data_exchange, Epetra_AddLocalAlso); + (void) ierr; + Assert(ierr==0, ExcTrilinosError(ierr)); +#else + // In versions older than 11.11 the Import function is broken for adding + // Hence, we provide a workaround in this case + + Epetra_MultiVector dummy(vector->Map(), 1, false); + Epetra_Import data_exchange(dummy.Map(), down_V.trilinos_vector().Map()); + + int ierr = dummy.Import(down_V.trilinos_vector(), data_exchange, Insert); + (void) ierr; + Assert(ierr==0, ExcTrilinosError(ierr)); + + ierr = vector->Update(1.0, dummy, 1.0); + (void) ierr; + Assert(ierr==0, ExcTrilinosError(ierr)); +#endif + } + + return *this; + } + + + + VectorSpaceVector &Vector::operator-= (const VectorSpaceVector &V) + { + this->add(-1.,V); + + return *this; + } + + + + double Vector::operator* (const VectorSpaceVector &V) + { + // Check that casting will work. + Assert(dynamic_cast(&V)!=NULL, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throws an exception. + const Vector &down_V = dynamic_cast(V); + Assert(this->size()==down_V.size(), + ExcDimensionMismatch(this->size(), down_V.size())); + Assert(vector->Map().SameAs(down_V.trilinos_vector().Map()), + ExcDifferentParallelPartitioning()); + + double result(0.); + const int ierr = vector->Dot(down_V.trilinos_vector(), &result); + (void) ierr; + Assert(ierr==0, ExcTrilinosError(ierr)); + + return result; + } + + + + void Vector::add(const double a) + { + AssertIsFinite(a); + const unsigned local_size(vector->MyLength()); + for (unsigned int i=0; i &V) + { + // Check that casting will work. + Assert(dynamic_cast(&V)!=NULL, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throws an exception. + const Vector &down_V = dynamic_cast(V); + AssertIsFinite(a); + Assert(vector->Map().SameAs(down_V.trilinos_vector().Map()), + ExcDifferentParallelPartitioning()); + + const int ierr = vector->Update(a, down_V.trilinos_vector(), 1.); + Assert(ierr==0, ExcTrilinosError(ierr)); + } + + + + void Vector::add(const double a, const VectorSpaceVector &V, + const double b, const VectorSpaceVector &W) + { + // Check that casting will work. + Assert(dynamic_cast(&V)!=NULL, + ExcVectorTypeNotCompatible()); + // Check that casting will work. + Assert(dynamic_cast(&W)!=NULL, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throws an exception. + const Vector &down_V = dynamic_cast(V); + // Downcast W. If fails, throws an exception. + const Vector &down_W = dynamic_cast(W); + Assert(vector->Map().SameAs(down_V.trilinos_vector().Map()), + ExcDifferentParallelPartitioning()); + Assert(vector->Map().SameAs(down_W.trilinos_vector().Map()), + ExcDifferentParallelPartitioning()); + AssertIsFinite(a); + AssertIsFinite(b); + + const int ierr = vector->Update(a, down_V.trilinos_vector(), b, + down_W.trilinos_vector(), 1.); + Assert(ierr==0, ExcTrilinosError(ierr)); + } + + + + void Vector::sadd(const double s, const double a, + const VectorSpaceVector &V) + { + // Check that casting will work. + Assert(dynamic_cast(&V)!=NULL, + ExcVectorTypeNotCompatible()); + + *this *= s; + // Downcast V. It fails, throws an exception. + const Vector &down_V = dynamic_cast(V); + Vector tmp(down_V); + tmp *= a; + *this += tmp; + } + + + + void Vector::scale(const VectorSpaceVector &scaling_factors) + { + // Check that casting will work. + Assert(dynamic_cast(&scaling_factors)!=NULL, + ExcVectorTypeNotCompatible()); + + // Downcast scaling_factors. If fails, throws an exception. + const Vector &down_scaling_factors = + dynamic_cast(scaling_factors); + Assert(vector->Map().SameAs(down_scaling_factors.trilinos_vector().Map()), + ExcDifferentParallelPartitioning()); + + const int ierr = vector->Multiply(1.0, down_scaling_factors.trilinos_vector(), + *vector, 0.0); + Assert(ierr==0, ExcTrilinosError(ierr)); + } + + + + void Vector::equ(const double a, const VectorSpaceVector &V) + { + // Check that casting will work. + Assert(dynamic_cast(&V)!=NULL, + ExcVectorTypeNotCompatible()); + + // Downcast V. If fails, throws an exception. + const Vector &down_V = dynamic_cast(V); + // If we don't have the same map, copy. + if (vector->Map().SameAs(down_V.trilinos_vector().Map())==false) + this->sadd(0., a, V); + else + { + // Otherwise, just update + int ierr = vector->Update(a, down_V.trilinos_vector(), 0.); + Assert(ierr==0, ExcTrilinosError(ierr)); + } + } + + + + double Vector::l1_norm() + { + double norm(0.); + int ierr = vector->Norm1(&norm); + Assert(ierr==0, ExcTrilinosError(ierr)); + + return norm; + } + + + + double Vector::l2_norm() + { + double norm(0.); + int ierr = vector->Norm2(&norm); + Assert(ierr==0, ExcTrilinosError(ierr)); + + return norm; + } + + + + double Vector::linfty_norm() + { + double norm(0.); + int ierr = vector->NormInf(&norm); + Assert(ierr==0, ExcTrilinosError(ierr)); + + return norm; + } + + + + double Vector::add_and_dot(const double a, + const VectorSpaceVector &V, + const VectorSpaceVector &W) + { + this->add(a, V); + + return *this * W; + } + + + + Vector::size_type Vector::size() const + { +#ifndef DEAL_II_WITH_64BIT_INDICES + return vector->GlobalLength(); +#else + return vector->GlobalLength64(); +#endif + } + + + + MPI_Comm Vector::get_mpi_communicator() const + { + const Epetra_MpiComm *epetra_comm + = dynamic_cast(&(vector->Comm())); + return epetra_comm->GetMpiComm(); + } + + + + const ::dealii::IndexSet Vector::locally_owned_elements() const + { + const Epetra_Map *map = dynamic_cast(&(vector->Map())); + return IndexSet(*map); + } + + + + const Epetra_FEVector &Vector::trilinos_vector() const + { + return *vector; + } + + + + Epetra_FEVector &Vector::trilinos_vector() + { + return *vector; + } + + + + void Vector::print(std::ostream &out, + const unsigned int precision, + const bool scientific, + const bool across) const + { + AssertThrow(out, ExcIO()); + + // Get a representation of the vector and loop over all + // the elements + double *val; + int leading_dimension; + int ierr = vector->ExtractView(&val, &leading_dimension); + + Assert(ierr==0, ExcTrilinosError(ierr)); + out.precision (precision); + if (scientific) + out.setf(std::ios::scientific, std::ios::floatfield); + else + out.setf(std::ios::fixed, std::ios::floatfield); + + if (across) + for (size_type i=0; iMyLength()*(sizeof(double)+ + sizeof(TrilinosWrappers::types::int_type)); + } + } +} + +DEAL_II_NAMESPACE_CLOSE + +#endif + +#endif diff --git a/tests/lac/la_vector_norms.cc b/tests/lac/la_vector_norms.cc index c9da83d144..73944b4ebd 100644 --- a/tests/lac/la_vector_norms.cc +++ b/tests/lac/la_vector_norms.cc @@ -14,7 +14,7 @@ // --------------------------------------------------------------------- -// checks that the l1, l2, lp norm is computed correctly for deal.II vectors +// checks that the l1 and l2 norm are computed correctly for deal.II vectors // (check the summation algorithm), including an accuracy test (should not // lose more than 1 decimal also for 200000 vector entries) diff --git a/tests/trilinos/epetra_vector_01.cc b/tests/trilinos/epetra_vector_01.cc new file mode 100644 index 0000000000..c8fe4ee422 --- /dev/null +++ b/tests/trilinos/epetra_vector_01.cc @@ -0,0 +1,215 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#include "../tests.h" +#include +#include +#include +#include +#include +#include + +// Check LinearAlgebra::EpetraWrappers::Vector assignement and the operator +// overloading. + + +void test() +{ + IndexSet parallel_partitioner_1(10); + IndexSet parallel_partitioner_2(10); + unsigned int rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + if (rank==0) + { + parallel_partitioner_1.add_range(0,5); + parallel_partitioner_2.add_range(0,3); + } + else + { + parallel_partitioner_1.add_range(5,10); + parallel_partitioner_2.add_range(3,10); + } + parallel_partitioner_1.compress(); + parallel_partitioner_2.compress(); + LinearAlgebra::EpetraWrappers::Vector a; + LinearAlgebra::EpetraWrappers::Vector b(parallel_partitioner_1); + LinearAlgebra::EpetraWrappers::Vector c(b); + + AssertThrow(a.size()==0, ExcMessage("Vector has the wrong size.")); + AssertThrow(b.size()==10, ExcMessage("Vector has the wrong size.")); + AssertThrow(c.size()==10, ExcMessage("Vector has the wrong size.")); + + a.reinit(parallel_partitioner_2); + AssertThrow(a.size()==10, ExcMessage("Vector has the wrong size.")); + + AssertThrow(parallel_partitioner_1==b.locally_owned_elements(), + ExcMessage("IndexSet has been modified.")); + AssertThrow(parallel_partitioner_2==a.locally_owned_elements(), + ExcMessage("IndexSet has been modified.")); + + IndexSet read_write_index_set(10); + if (rank==0) + read_write_index_set.add_range(0,5); + else + read_write_index_set.add_range(5,10); + read_write_index_set.compress(); + + LinearAlgebra::ReadWriteVector read_write_1(read_write_index_set); + LinearAlgebra::ReadWriteVector read_write_2(read_write_index_set); + LinearAlgebra::ReadWriteVector read_write_3(read_write_index_set); + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + { + read_write_1[i] = i; + read_write_2[i] = 5.+i; + } + } + else + { + for (unsigned int i=5; i<10; ++i) + { + read_write_1[i] = i; + read_write_2[i] = 5.+i; + } + } + + a = read_write_2; + b = read_write_1; + c = read_write_2; + + read_write_3 = a; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(read_write_2[i]==read_write_3[i], + ExcMessage("Vector a has been modified.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(read_write_2[i]==read_write_3[i], + ExcMessage("Vector a has been modified.")); + } + + read_write_3 = b; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(read_write_1[i]==read_write_3[i], + ExcMessage("Vector b has been modified.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(read_write_1[i]==read_write_3[i], + ExcMessage("Vector b has been modified.")); + } + + read_write_3 = c; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(read_write_2[i]==read_write_3[i], + ExcMessage("Vector c has been modified.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(read_write_2[i]==read_write_3[i], + ExcMessage("Vector c has been modified.")); + } + + + a *= 2; + read_write_3 = a; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(2.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in operator *=.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(2.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in operator *=.")); + } + + c /= 2.; + read_write_3 = c; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(0.5*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in operator /=.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(0.5*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in operator /=.")); + } + + b += a; + read_write_3 = b; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(2.*read_write_2[i]+read_write_1[i]==read_write_3[i], + ExcMessage("Problem in operator +=.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(2.*read_write_2[i]+read_write_1[i]==read_write_3[i], + ExcMessage("Problem in operator +=.")); + } + + b -= c; + read_write_3 = b; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(1.5*read_write_2[i]+read_write_1[i]==read_write_3[i], + ExcMessage("Problem in operator -=.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(1.5*read_write_2[i]+read_write_1[i]==read_write_3[i], + ExcMessage("Problem in operator -=.")); + } + + b = read_write_1; + c = read_write_1; + const double val = b*c; + AssertThrow(val==285., ExcMessage("Problem in operator *.")); +} + + +int main(int argc, char **argv) +{ + std::ofstream logfile("output"); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-10); + + Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1); + + deallog << "OK" < +#include +#include +#include +#include +#include + + +void test() +{ + IndexSet parallel_partitioner_1(10); + IndexSet parallel_partitioner_2(10); + unsigned int rank = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + if (rank==0) + { + parallel_partitioner_1.add_range(0,5); + parallel_partitioner_2.add_range(0,3); + } + else + { + parallel_partitioner_1.add_range(5,10); + parallel_partitioner_2.add_range(3,10); + } + parallel_partitioner_1.compress(); + parallel_partitioner_2.compress(); + LinearAlgebra::EpetraWrappers::Vector a(parallel_partitioner_1); + LinearAlgebra::EpetraWrappers::Vector b(parallel_partitioner_1); + LinearAlgebra::EpetraWrappers::Vector c(parallel_partitioner_2); + + IndexSet read_write_index_set(10); + if (rank==0) + read_write_index_set.add_range(0,5); + else + read_write_index_set.add_range(5,10); + read_write_index_set.compress(); + + LinearAlgebra::ReadWriteVector read_write_1(read_write_index_set); + LinearAlgebra::ReadWriteVector read_write_2(read_write_index_set); + LinearAlgebra::ReadWriteVector read_write_3(read_write_index_set); + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + { + read_write_1[i] = i; + read_write_2[i] = 5.+i; + } + } + else + { + for (unsigned int i=5; i<10; ++i) + { + read_write_1[i] = i; + read_write_2[i] = 5.+i; + } + } + + a = read_write_1; + b = read_write_2; + c = read_write_2; + + a.add(1.); + read_write_3 = a; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(1.+read_write_1[i]==read_write_3[i], + ExcMessage("Problem in add(scalar).")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(1.+read_write_1[i]==read_write_3[i], + ExcMessage("Problem in add(scalar).")); + } + + a.add(2.,b); + read_write_3 = a; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(1.+read_write_1[i]+2.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in add(scalar,Vector).")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(1.+read_write_1[i]+2.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in add(scalar,Vector).")); + } + + + LinearAlgebra::EpetraWrappers::Vector d(a); + a.add(2.,b,3.,d); + read_write_3 = a; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(3.+read_write_1[i]+8.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in add(scalar,Vector,scalar,Vector).")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(3.+read_write_1[i]+8.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in add(scalar,Vector,scalar,Vector).")); + } + + + a = read_write_1; + a.sadd(3.,2.,c); + read_write_3 = a; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(3.+read_write_1[i]+2.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in sadd(scalar,scalar,Vector).")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(3.+read_write_1[i]+2.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in sadd(scalar,scalar,Vector).")); + } + + + a = read_write_1; + a.scale(b); + read_write_3 = a; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(read_write_1[i]*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in scale.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(read_write_1[i]*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in scale.")); + } + + + a.equ(2.,c); + read_write_3 = a; + if (rank==0) + { + for (unsigned int i=0; i<5; ++i) + AssertThrow(2.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in scale.")); + } + else + { + for (unsigned int i=5; i<10; ++i) + AssertThrow(2.*read_write_2[i]==read_write_3[i], + ExcMessage("Problem in scale.")); + } + + + AssertThrow(b.l1_norm()==45., ExcMessage("Problem in l1_norm.")); + + const double eps=1e-6; + AssertThrow(std::fabs(b.l2_norm()-16.881943016)