void
ConstraintMatrix::distribute (VectorType &vec) const
{
- Assert (sorted==true, ExcMatrixIsClosed());
-
- // if the vector type supports parallel storage and if the
- // vector actually does store only part of the vector, distributing
- // is slightly more complicated. we can skip the complicated part
- // if the local processor stores the *entire* vector and pretend
- // that this is a sequential vector but we need to pay attention that
- // in that case the other processors don't actually do anything (in
- // particular that they do not call compress because the processor
- // that owns everything doesn't do so either); this is the first if
- // case here, the second is for the complicated case, the last else
- // is for the simple case (sequential vector or distributed vector
- // where the current processor stores everything)
- if ((vec.supports_distributed_data == true)
- &&
- (vec.locally_owned_elements().n_elements() == 0))
- {
- // do nothing, in particular don't call compress()
- }
- else if ((vec.supports_distributed_data == true)
- &&
- (vec.locally_owned_elements() != complete_index_set(vec.size())))
- {
- const IndexSet vec_owned_elements = vec.locally_owned_elements();
-
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
-
-#ifdef DEBUG
- {
- // the algorithm below pushes contributions c_ij x_j to a constrained
- // DoF x_j *from the side of the owner of x_j*, as opposed to pulling
- // it from the owner of the target side x_i. this relies on the
- // assumption that both target and source know about the constraint
- // on x_i.
- //
- // the disadvantage is that it is incredibly difficult to find out what
- // is happening if the assumption is not satisfied. to help debug
- // problems of this kind, we do the following in a first step if
- // debugging is enabled:
- // - every processor who owns an x_j where c_ij!=0 sends a one
- // to the owner of x_i to add up
- // - the owner of x_i knows how many nonzero entries exist, so can
- // verify that the correct number of ones has been added
- // - if the sum is not correct, then apparently one of the owners
- // of the x_j's did not know to send its one and, consequently,
- // will also not know to send the correct c_ij*x_j later on,
- // leading to a bug
- set_zero (vec);
- for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if (vec_owned_elements.is_element (it->entries[i].first))
- vec(it->line) += 1;
- vec.compress (VectorOperation::add);
-
- for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
- if (vec_owned_elements.is_element(it->line))
- Assert (vec(it->line) == it->entries.size(),
- ExcIncorrectConstraint(it->line, it->entries.size()));
- }
-#endif
+ Assert (sorted == true, ExcMatrixNotClosed());
- // set the values of all constrained DoFs to zero, then add the
- // contributions for each constrained DoF of all the ones we
- // own locally
- set_zero (vec);
-
- for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if (vec_owned_elements.is_element(it->entries[i].first))
- vec(it->line) += it->entries[i].second *
- vec(it->entries[i].first);
-
- // in a final step, add the inhomogeneities to the elements we
- // own locally
- for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
- if (vec_owned_elements.is_element(it->line))
- vec(it->line) += it->inhomogeneity;
-
- // now compress to communicate the entries that we added to
- // and that weren't to local processors to the owner
- vec.compress (VectorOperation::add);
- }
- else
- // purely sequential vector (either because the type doesn't
- // support anything else or because it's completely stored
- // locally
+ std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
+ for (; next_constraint != lines.end(); ++next_constraint)
{
- std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
- for (; next_constraint != lines.end(); ++next_constraint)
- {
- // fill entry in line
- // next_constraint.line by adding the
- // different contributions
- typename VectorType::value_type
- new_value = next_constraint->inhomogeneity;
- for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
- new_value += (static_cast<typename VectorType::value_type>
- (vec(next_constraint->entries[i].first)) *
- next_constraint->entries[i].second);
- Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
- vec(next_constraint->line) = new_value;
- }
+ // fill entry in line
+ // next_constraint.line by adding the
+ // different contributions
+ typename VectorType::value_type
+ new_value = next_constraint->inhomogeneity;
+ for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
+ new_value += (static_cast<typename VectorType::value_type>
+ (vec(next_constraint->entries[i].first)) *
+ next_constraint->entries[i].second);
+ Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
+ vec(next_constraint->line) = new_value;
}
}
//---------------------------------------------------------------------------
// $Id$
//
-// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 by the deal.II authors
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
+#ifdef DEAL_II_WITH_TRILINOS
+
+// this is a specialization for a parallel (non-block) Trilinos vector. The
+// basic idea is to just work on the local range of the vector. But we need
+// access to values that the local nodes are constrained to.
+
+template<>
+void
+ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
+{
+ Assert (sorted==true, ExcMatrixIsClosed());
+
+ //TODO: not implemented yet, we need to fix LocalRange() first to only
+ //include "owned" indices. For this we need to keep track of the owned
+ //indices, because Trilinos doesn't. Use same constructor interface as in
+ //PETSc with two IndexSets!
+ AssertThrow (vec.vector_partitioner().IsOneToOne(),
+ ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning."));
+
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+ ConstraintLine index_comparison;
+ index_comparison.line = vec.local_range().first;
+ const constraint_iterator begin_my_constraints =
+ Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+ index_comparison.line = vec.local_range().second;
+ const constraint_iterator end_my_constraints
+ = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+ // Here we search all the indices that we need to have read-access to -
+ // the local nodes and all the nodes that the constraints indicate.
+ IndexSet my_indices (vec.size());
+ {
+ const std::pair<unsigned int, unsigned int>
+ local_range = vec.local_range();
+
+ my_indices.add_range (local_range.first, local_range.second);
+
+ std::set<unsigned int> individual_indices;
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ if ((it->entries[i].first < local_range.first)
+ ||
+ (it->entries[i].first >= local_range.second))
+ individual_indices.insert (it->entries[i].first);
+
+ my_indices.add_indices (individual_indices.begin(),
+ individual_indices.end());
+ }
+
+#ifdef DEAL_II_WITH_MPI
+ const Epetra_MpiComm *mpi_comm
+ = dynamic_cast<const Epetra_MpiComm *>(&vec.trilinos_vector().Comm());
+
+ Assert (mpi_comm != 0, ExcInternalError());
+
+ TrilinosWrappers::MPI::Vector vec_distribute
+ (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
+#else
+ TrilinosWrappers::MPI::Vector vec_distribute
+ (my_indices.make_trilinos_map (MPI_COMM_WORLD, true));
+#endif
+
+ // here we import the data
+ vec_distribute.reinit(vec,false,true);
+
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ {
+ // fill entry in line next_constraint.line by adding the different
+ // contributions
+ double new_value = it->inhomogeneity;
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ new_value += (vec_distribute(it->entries[i].first) *
+ it->entries[i].second);
+ vec(it->line) = new_value;
+ }
+
+ // some processes might not apply constraints, so we need to explicitly
+ // state, that the others are doing an insert here:
+ vec.compress (::dealii::VectorOperation::insert);
+}
+
+
+
+template<>
+void
+ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const
+{
+ Assert (sorted==true, ExcMatrixIsClosed());
+
+ IndexSet my_indices (vec.size());
+ for (unsigned int block=0; block<vec.n_blocks(); ++block)
+ {
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+ ConstraintLine index_comparison;
+ index_comparison.line = vec.block(block).local_range().first
+ +vec.get_block_indices().block_start(block);
+ const constraint_iterator begin_my_constraints =
+ Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+ index_comparison.line = vec.block(block).local_range().second
+ +vec.get_block_indices().block_start(block);
+
+ const constraint_iterator end_my_constraints
+ = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+ // Here we search all the indices that we need to have read-access to
+ // - the local nodes and all the nodes that the constraints indicate.
+ // No caching done yet. would need some more clever data structures
+ // for doing that.
+ const std::pair<unsigned int, unsigned int>
+ local_range = vec.block(block).local_range();
+
+ my_indices.add_range (local_range.first, local_range.second);
+
+ std::set<unsigned int> individual_indices;
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ if ((it->entries[i].first < local_range.first)
+ ||
+ (it->entries[i].first >= local_range.second))
+ individual_indices.insert (it->entries[i].first);
+
+ my_indices.add_indices (individual_indices.begin(),
+ individual_indices.end());
+ }
+
+#ifdef DEAL_II_WITH_MPI
+ const Epetra_MpiComm *mpi_comm
+ = dynamic_cast<const Epetra_MpiComm *>(&vec.block(0).trilinos_vector().Comm());
+
+ Assert (mpi_comm != 0, ExcInternalError());
+
+ TrilinosWrappers::MPI::Vector vec_distribute
+ (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
+#else
+ TrilinosWrappers::MPI::Vector vec_distribute
+ (my_indices.make_trilinos_map (MPI_COMM_WORLD, true));
+#endif
+
+ // here we import the data
+ vec_distribute.reinit(vec,true);
+
+ for (unsigned int block=0; block<vec.n_blocks(); ++block)
+ {
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+ ConstraintLine index_comparison;
+ index_comparison.line = vec.block(block).local_range().first
+ +vec.get_block_indices().block_start(block);
+ const constraint_iterator begin_my_constraints =
+ Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+ index_comparison.line = vec.block(block).local_range().second
+ +vec.get_block_indices().block_start(block);
+
+ const constraint_iterator end_my_constraints
+ = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ {
+ // fill entry in line next_constraint.line by adding the
+ // different contributions
+ double new_value = it->inhomogeneity;
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ new_value += (vec_distribute(it->entries[i].first) *
+ it->entries[i].second);
+ vec(it->line) = new_value;
+ }
+ vec.block(block).compress(::dealii::VectorOperation::insert);
+ }
+}
+
+#endif
+
+#ifdef DEAL_II_WITH_PETSC
+
+// this is a specialization for a parallel (non-block) PETSc vector. The
+// basic idea is to just work on the local range of the vector. But we need
+// access to values that the local nodes are constrained to.
+
+template<>
+void
+ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const
+{
+ Assert (sorted==true, ExcMatrixIsClosed());
+ Assert (vec.has_ghost_elements() == false,
+ ExcMessage ("This operation can only be performed on vectors without ghost elements."));
+
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+ ConstraintLine index_comparison;
+ index_comparison.line = vec.local_range().first;
+ const constraint_iterator begin_my_constraints =
+ Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+ index_comparison.line = vec.local_range().second;
+ const constraint_iterator end_my_constraints
+ = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+ // all indices we need to read from
+ IndexSet my_indices (vec.size());
+
+ const std::pair<unsigned int, unsigned int>
+ local_range = vec.local_range();
+
+ my_indices.add_range (local_range.first, local_range.second);
+
+ std::set<unsigned int> individual_indices;
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ if ((it->entries[i].first < local_range.first)
+ ||
+ (it->entries[i].first >= local_range.second))
+ individual_indices.insert (it->entries[i].first);
+
+ my_indices.add_indices (individual_indices.begin(),
+ individual_indices.end());
+
+ IndexSet local_range_is (vec.size());
+ local_range_is.add_range(local_range.first, local_range.second);
+
+
+ // create a vector and import those indices
+ PETScWrappers::MPI::Vector ghost_vec (vec.get_mpi_communicator(),
+ local_range_is,
+ my_indices);
+ ghost_vec = vec;
+ ghost_vec.update_ghost_values();
+
+ // finally do the distribution on own constraints
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ {
+ // fill entry in line next_constraint.line by adding the different
+ // contributions
+ PetscScalar new_value = it->inhomogeneity;
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ new_value += (PetscScalar(ghost_vec(it->entries[i].first)) *
+ it->entries[i].second);
+ vec(it->line) = new_value;
+ }
+
+ vec.compress (VectorOperation::insert);
+}
+
+
+template<>
+void
+ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const
+{
+ Assert (sorted==true, ExcMatrixIsClosed());
+ AssertThrow (false, ExcNotImplemented());
+}
+
+#endif
+
+
+
bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const
{
if (is_constrained(index) == false)
VectorType &, \
const FullMatrix<double> &) const; \
template void ConstraintMatrix::distribute<VectorType >(const VectorType &condensed,\
- VectorType &uncondensed) const
+ VectorType &uncondensed) const;\
+ template void ConstraintMatrix::distribute<VectorType >(VectorType &vec) const
#define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
template void ConstraintMatrix:: \
const FullMatrix<double> &) const
+// TODO: Can PETSc really do all the operations required by the above
+// condense/distribute function etc also on distributed vectors? Trilinos
+// can't do that - we have to rewrite those functions by hand if we want to
+// use them. The key is to use local ranges etc., which still needs to be
+// implemented.
#ifdef DEAL_II_WITH_PETSC
VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);