#include <deal.II/lac/block_sparse_matrix.h>
#include <deal.II/lac/parallel_vector.h>
#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/petsc_vector.h>
+#include <deal.II/lac/trilinos_vector.h>
#include <iomanip>
}
+namespace internal
+{
+ namespace
+ {
+ // create an output vector that consists of the input vector's locally owned
+ // elements plus some ghost elements that need to be imported from elsewhere
+ //
+ // this is an operation that is different for all vector types and so we
+ // need a few overloads
+#ifdef DEAL_II_WITH_TRILINOS
+ void
+ import_vector_with_ghost_elements (const TrilinosWrappers::MPI::Vector &vec,
+ const IndexSet &/*locally_owned_elements*/,
+ const IndexSet &needed_elements,
+ TrilinosWrappers::MPI::Vector &output)
+ {
+ output.reinit (needed_elements,
+ dynamic_cast<const Epetra_MpiComm *>(&vec.vector_partitioner().Comm())->GetMpiComm());
+ output = vec;
+ }
-template<class VectorType>
+
+ void
+ import_vector_with_ghost_elements (const TrilinosWrappers::Vector &vec,
+ const IndexSet &,
+ const IndexSet &,
+ TrilinosWrappers::Vector &output)
+ {
+ output.reinit (vec.size());
+ output = vec;
+ }
+#endif
+
+#ifdef DEAL_II_WITH_PETSC
+ void
+ import_vector_with_ghost_elements (const PETScWrappers::MPI::Vector &vec,
+ const IndexSet &locally_owned_elements,
+ const IndexSet &needed_elements,
+ PETScWrappers::MPI::Vector &output)
+ {
+ output.reinit (vec.get_mpi_communicator(), locally_owned_elements, needed_elements);
+ output = vec;
+ }
+
+
+
+ void
+ import_vector_with_ghost_elements (const PETScWrappers::Vector &vec,
+ const IndexSet &,
+ const IndexSet &,
+ PETScWrappers::Vector &output)
+ {
+ output.reinit (vec.size());
+ output = vec;
+ }
+#endif
+
+ template <typename number>
+ void
+ import_vector_with_ghost_elements (const parallel::distributed::Vector<number> &vec,
+ const IndexSet &locally_owned_elements,
+ const IndexSet &needed_elements,
+ parallel::distributed::Vector<number> &output)
+ {
+ output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator());
+ output = vec;
+ }
+
+
+ template <typename number>
+ void
+ import_vector_with_ghost_elements (const dealii::Vector<number> &vec,
+ const IndexSet &locally_owned_elements,
+ const IndexSet &needed_elements,
+ dealii::Vector<number> &output)
+ {
+ Assert (false, ExcMessage ("We shouldn't even get here!"));
+ }
+
+
+ // for block vectors, simply dispatch to the individual blocks
+ template <class VectorType>
+ void
+ import_vector_with_ghost_elements (const BlockVectorBase<VectorType> &vec,
+ const IndexSet &locally_owned_elements,
+ const IndexSet &needed_elements,
+ BlockVectorBase<VectorType> &output)
+ {
+ types::global_dof_index block_start = 0;
+ for (unsigned int b=0; b<vec.n_blocks(); ++b)
+ {
+ import_vector_with_ghost_elements (vec.block(b),
+ locally_owned_elements.get_view (block_start, block_start+vec.block(b).size()),
+ needed_elements.get_view (block_start, block_start+vec.block(b).size()),
+ output.block(b));
+ block_start += vec.block(b).size();
+ }
+ }
+ }
+}
+
+
+template <class VectorType>
void
ConstraintMatrix::distribute (VectorType &vec) const
{
- Assert (sorted == true, ExcMatrixNotClosed());
+ Assert (sorted==true, ExcMatrixIsClosed());
+
+ // if the vector type supports parallel storage and if the
+ // vector actually does store only part of the vector, distributing
+ // is slightly more complicated. we can skip the complicated part
+ // if the local processor stores the *entire* vector and pretend
+ // that this is a sequential vector but we need to pay attention that
+ // in that case the other processors don't actually do anything (in
+ // particular that they do not call compress because the processor
+ // that owns everything doesn't do so either); this is the first if
+ // case here, the second is for the complicated case, the last else
+ // is for the simple case (sequential vector or distributed vector
+ // where the current processor stores everything)
+ if ((vec.supports_distributed_data == true)
+ &&
+ (vec.locally_owned_elements().n_elements() == 0))
+ {
+ // do nothing, in particular don't call compress()
+ }
+ else if ((vec.supports_distributed_data == true)
+ &&
+ (vec.locally_owned_elements() != complete_index_set(vec.size())))
+ {
+ // This processor owns only part of the vector. one may think that
+ // every processor should be able to simply communicate those elements
+ // it owns and for which it knows that they act as sources to constrained
+ // DoFs to the owner of these DoFs. This would lead to a scheme where all
+ // we need to do is to add some local elements to (possibly non-local) ones
+ // and then call compress().
+ //
+ // Alas, this scheme does not work as evidenced by the disaster of bug #51,
+ // see http://code.google.com/p/dealii/issues/detail?id=51 and the
+ // reversion of one attempt that implements this in r29662. Rather, we
+ // need to get a vector that has all the *sources* or constraints we
+ // own locally, possibly as ghost vector elements, then read from them,
+ // and finally throw away the ghosted vector. Implement this in the following.
+ const IndexSet vec_owned_elements = vec.locally_owned_elements();
+ IndexSet needed_elements = vec_owned_elements;
+
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+ for (constraint_iterator it = lines.begin();
+ it != lines.end(); ++it)
+ if (vec_owned_elements.is_element(it->line))
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ needed_elements.add_index(it->entries[i].first);
+
+ VectorType ghosted_vector;
+ internal::import_vector_with_ghost_elements (vec,
+ vec_owned_elements, needed_elements,
+ ghosted_vector);
+
+ for (constraint_iterator it = lines.begin();
+ it != lines.end(); ++it)
+ if (vec_owned_elements.is_element(it->line))
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ {
+ typename VectorType::value_type
+ new_value = it->inhomogeneity;
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ new_value += (static_cast<typename VectorType::value_type>
+ (ghosted_vector(it->entries[i].first)) *
+ it->entries[i].second);
+ Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
+ vec(it->line) = new_value;
+ }
- std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
- for (; next_constraint != lines.end(); ++next_constraint)
+ // now compress to communicate the entries that we added to
+ // and that weren't to local processors to the owner
+ //
+ // this shouldn't be strictly necessary but it probably doesn't
+ // hurt either
+ vec.compress (VectorOperation::insert);
+ }
+ else
+ // purely sequential vector (either because the type doesn't
+ // support anything else or because it's completely stored
+ // locally
{
- // fill entry in line
- // next_constraint.line by adding the
- // different contributions
- typename VectorType::value_type
- new_value = next_constraint->inhomogeneity;
- for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
- new_value += (static_cast<typename VectorType::value_type>
- (vec(next_constraint->entries[i].first)) *
- next_constraint->entries[i].second);
- Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
- vec(next_constraint->line) = new_value;
+ std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
+ for (; next_constraint != lines.end(); ++next_constraint)
+ {
+ // fill entry in line
+ // next_constraint.line by adding the
+ // different contributions
+ typename VectorType::value_type
+ new_value = next_constraint->inhomogeneity;
+ for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
+ new_value += (static_cast<typename VectorType::value_type>
+ (vec(next_constraint->entries[i].first)) *
+ next_constraint->entries[i].second);
+ Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
+ vec(next_constraint->line) = new_value;
+ }
}
}
//---------------------------------------------------------------------------
// $Id$
//
-// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 by the deal.II authors
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
-#ifdef DEAL_II_WITH_TRILINOS
-
-// this is a specialization for a parallel (non-block) Trilinos vector. The
-// basic idea is to just work on the local range of the vector. But we need
-// access to values that the local nodes are constrained to.
-
-template<>
-void
-ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
-{
- Assert (sorted==true, ExcMatrixIsClosed());
-
- //TODO: not implemented yet, we need to fix LocalRange() first to only
- //include "owned" indices. For this we need to keep track of the owned
- //indices, because Trilinos doesn't. Use same constructor interface as in
- //PETSc with two IndexSets!
- AssertThrow (vec.vector_partitioner().IsOneToOne(),
- ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning."));
-
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
- ConstraintLine index_comparison;
- index_comparison.line = vec.local_range().first;
- const constraint_iterator begin_my_constraints =
- Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
- index_comparison.line = vec.local_range().second;
- const constraint_iterator end_my_constraints
- = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
- // Here we search all the indices that we need to have read-access to -
- // the local nodes and all the nodes that the constraints indicate.
- IndexSet my_indices (vec.size());
- {
- const std::pair<unsigned int, unsigned int>
- local_range = vec.local_range();
-
- my_indices.add_range (local_range.first, local_range.second);
-
- std::set<unsigned int> individual_indices;
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if ((it->entries[i].first < local_range.first)
- ||
- (it->entries[i].first >= local_range.second))
- individual_indices.insert (it->entries[i].first);
-
- my_indices.add_indices (individual_indices.begin(),
- individual_indices.end());
- }
-
-#ifdef DEAL_II_WITH_MPI
- const Epetra_MpiComm *mpi_comm
- = dynamic_cast<const Epetra_MpiComm *>(&vec.trilinos_vector().Comm());
-
- Assert (mpi_comm != 0, ExcInternalError());
-
- TrilinosWrappers::MPI::Vector vec_distribute
- (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
-#else
- TrilinosWrappers::MPI::Vector vec_distribute
- (my_indices.make_trilinos_map (MPI_COMM_WORLD, true));
-#endif
-
- // here we import the data
- vec_distribute.reinit(vec,false,true);
-
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- {
- // fill entry in line next_constraint.line by adding the different
- // contributions
- double new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
- new_value += (vec_distribute(it->entries[i].first) *
- it->entries[i].second);
- vec(it->line) = new_value;
- }
-
- // some processes might not apply constraints, so we need to explicitly
- // state, that the others are doing an insert here:
- vec.compress (::dealii::VectorOperation::insert);
-}
-
-
-
-template<>
-void
-ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const
-{
- Assert (sorted==true, ExcMatrixIsClosed());
-
- IndexSet my_indices (vec.size());
- for (unsigned int block=0; block<vec.n_blocks(); ++block)
- {
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
- ConstraintLine index_comparison;
- index_comparison.line = vec.block(block).local_range().first
- +vec.get_block_indices().block_start(block);
- const constraint_iterator begin_my_constraints =
- Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
- index_comparison.line = vec.block(block).local_range().second
- +vec.get_block_indices().block_start(block);
-
- const constraint_iterator end_my_constraints
- = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
- // Here we search all the indices that we need to have read-access to
- // - the local nodes and all the nodes that the constraints indicate.
- // No caching done yet. would need some more clever data structures
- // for doing that.
- const std::pair<unsigned int, unsigned int>
- local_range = vec.block(block).local_range();
-
- my_indices.add_range (local_range.first, local_range.second);
-
- std::set<unsigned int> individual_indices;
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if ((it->entries[i].first < local_range.first)
- ||
- (it->entries[i].first >= local_range.second))
- individual_indices.insert (it->entries[i].first);
-
- my_indices.add_indices (individual_indices.begin(),
- individual_indices.end());
- }
-
-#ifdef DEAL_II_WITH_MPI
- const Epetra_MpiComm *mpi_comm
- = dynamic_cast<const Epetra_MpiComm *>(&vec.block(0).trilinos_vector().Comm());
-
- Assert (mpi_comm != 0, ExcInternalError());
-
- TrilinosWrappers::MPI::Vector vec_distribute
- (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
-#else
- TrilinosWrappers::MPI::Vector vec_distribute
- (my_indices.make_trilinos_map (MPI_COMM_WORLD, true));
-#endif
-
- // here we import the data
- vec_distribute.reinit(vec,true);
-
- for (unsigned int block=0; block<vec.n_blocks(); ++block)
- {
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
- ConstraintLine index_comparison;
- index_comparison.line = vec.block(block).local_range().first
- +vec.get_block_indices().block_start(block);
- const constraint_iterator begin_my_constraints =
- Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
- index_comparison.line = vec.block(block).local_range().second
- +vec.get_block_indices().block_start(block);
-
- const constraint_iterator end_my_constraints
- = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- {
- // fill entry in line next_constraint.line by adding the
- // different contributions
- double new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
- new_value += (vec_distribute(it->entries[i].first) *
- it->entries[i].second);
- vec(it->line) = new_value;
- }
- vec.block(block).compress(::dealii::VectorOperation::insert);
- }
-}
-
-#endif
-
-#ifdef DEAL_II_WITH_PETSC
-
-// this is a specialization for a parallel (non-block) PETSc vector. The
-// basic idea is to just work on the local range of the vector. But we need
-// access to values that the local nodes are constrained to.
-
-template<>
-void
-ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const
-{
- Assert (sorted==true, ExcMatrixIsClosed());
- Assert (vec.has_ghost_elements() == false,
- ExcMessage ("This operation can only be performed on vectors without ghost elements."));
-
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
- ConstraintLine index_comparison;
- index_comparison.line = vec.local_range().first;
- const constraint_iterator begin_my_constraints =
- Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
- index_comparison.line = vec.local_range().second;
- const constraint_iterator end_my_constraints
- = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
- // all indices we need to read from
- IndexSet my_indices (vec.size());
-
- const std::pair<unsigned int, unsigned int>
- local_range = vec.local_range();
-
- my_indices.add_range (local_range.first, local_range.second);
-
- std::set<unsigned int> individual_indices;
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if ((it->entries[i].first < local_range.first)
- ||
- (it->entries[i].first >= local_range.second))
- individual_indices.insert (it->entries[i].first);
-
- my_indices.add_indices (individual_indices.begin(),
- individual_indices.end());
-
- IndexSet local_range_is (vec.size());
- local_range_is.add_range(local_range.first, local_range.second);
-
-
- // create a vector and import those indices
- PETScWrappers::MPI::Vector ghost_vec (vec.get_mpi_communicator(),
- local_range_is,
- my_indices);
- ghost_vec = vec;
- ghost_vec.update_ghost_values();
-
- // finally do the distribution on own constraints
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- {
- // fill entry in line next_constraint.line by adding the different
- // contributions
- PetscScalar new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
- new_value += (PetscScalar(ghost_vec(it->entries[i].first)) *
- it->entries[i].second);
- vec(it->line) = new_value;
- }
-
- vec.compress (VectorOperation::insert);
-}
-
-
-template<>
-void
-ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const
-{
- Assert (sorted==true, ExcMatrixIsClosed());
- AssertThrow (false, ExcNotImplemented());
-}
-
-#endif
-
-
-
bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const
{
if (is_constrained(index) == false)
VectorType &, \
const FullMatrix<double> &) const; \
template void ConstraintMatrix::distribute<VectorType >(const VectorType &condensed,\
- VectorType &uncondensed) const;\
- template void ConstraintMatrix::distribute<VectorType >(VectorType &vec) const
+ VectorType &uncondensed) const
#define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
template void ConstraintMatrix:: \
const FullMatrix<double> &) const
-// TODO: Can PETSc really do all the operations required by the above
-// condense/distribute function etc also on distributed vectors? Trilinos
-// can't do that - we have to rewrite those functions by hand if we want to
-// use them. The key is to use local ranges etc., which still needs to be
-// implemented.
#ifdef DEAL_II_WITH_PETSC
VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);