<h3>Specific improvements</h3>
<ol>
+<li> New: ConstraintMatrix::distribute is now also implemented for
+arguments of type PETScWrappers::MPI::BlockVector.
+<br>
+(Wolfgang Bangerth, 2013/05/25)
+</li>
+
<li> Fixed: IndexSet::operator== returned the wrong results
in some cases.
<br>
void
ConstraintMatrix::distribute (VectorType &vec) const
{
- Assert (sorted == true, ExcMatrixNotClosed());
+ Assert (sorted==true, ExcMatrixIsClosed());
+
+ // if the vector type supports parallel storage and if the
+ // vector actually does it, we need to be a bit more
+ // careful about how we do things
+ if ((vec.supports_distributed_data == true)
+ &&
+ (vec.locally_owned_elements() != complete_index_set(vec.size())))
+ {
+ const IndexSet vec_owned_elements = vec.locally_owned_elements();
- std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
- for (; next_constraint != lines.end(); ++next_constraint)
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+
+#ifdef DEBUG
+ {
+ // the algorithm below pushes contributions c_ij x_j to a constrained
+ // DoF x_j *from the side of the owner of x_j*, as opposed to pulling
+ // it from the owner of the target side x_i. this relies on the
+ // assumption that both target and source know about the constraint
+ // on x_i.
+ //
+ // the disadvantage is that it is incredibly difficult to find out what
+ // is happening if the assumption is not satisfied. to help debug
+ // problems of this kind, we do the following in a first step if
+ // debugging is enabled:
+ // - every processor who owns an x_j where c_ij!=0 sends a one
+ // to the owner of x_i to add up
+ // - the owner of x_i knows how many nonzero entries exist, so can
+ // verify that the correct number of ones has been added
+ // - if the sum is not correct, then apparently one of the owners
+ // of the x_j's did not know to send its one and, consequently,
+ // will also not know to send the correct c_ij*x_j later on,
+ // leading to a bug
+ set_zero (vec);
+ for (constraint_iterator it = lines.begin();
+ it != lines.end(); ++it)
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ if (vec_owned_elements.is_element (it->entries[i].first))
+ vec(it->line) += 1;
+ vec.compress (VectorOperation::add);
+
+ for (constraint_iterator it = lines.begin();
+ it != lines.end(); ++it)
+ if (vec_owned_elements.is_element(it->line))
+ Assert (vec(it->line) == it->entries.size(),
+ ExcIncorrectConstraint(it->line, it->entries.size()));
+ }
+#endif
+
+ // set the values of all constrained DoFs to zero, then add the
+ // contributions for each constrained DoF of all the ones we
+ // own locally
+ set_zero (vec);
+
+ for (constraint_iterator it = lines.begin();
+ it != lines.end(); ++it)
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ if (vec_owned_elements.is_element(it->entries[i].first))
+ vec(it->line) += it->entries[i].second *
+ vec(it->entries[i].first);
+
+ // in a final step, add the inhomogeneities to the elements we
+ // own locally
+ for (constraint_iterator it = lines.begin();
+ it != lines.end(); ++it)
+ if (vec_owned_elements.is_element(it->line))
+ vec(it->line) += it->inhomogeneity;
+
+ // now compress to communicate the entries that we added to
+ // and that weren't to local processors to the owner
+ vec.compress (VectorOperation::add);
+ }
+ else
+ // purely sequential vector (either because the type doesn't
+ // support anything else or because it's completely stored
+ // locally
{
- // fill entry in line
- // next_constraint.line by adding the
- // different contributions
- typename VectorType::value_type
- new_value = next_constraint->inhomogeneity;
- for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
- new_value += (static_cast<typename VectorType::value_type>
- (vec(next_constraint->entries[i].first)) *
- next_constraint->entries[i].second);
- Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
- vec(next_constraint->line) = new_value;
+ std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
+ for (; next_constraint != lines.end(); ++next_constraint)
+ {
+ // fill entry in line
+ // next_constraint.line by adding the
+ // different contributions
+ typename VectorType::value_type
+ new_value = next_constraint->inhomogeneity;
+ for (unsigned int i=0; i<next_constraint->entries.size(); ++i)
+ new_value += (static_cast<typename VectorType::value_type>
+ (vec(next_constraint->entries[i].first)) *
+ next_constraint->entries[i].second);
+ Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
+ vec(next_constraint->line) = new_value;
+ }
}
}
-#ifdef DEAL_II_WITH_TRILINOS
-
-// this is a specialization for a parallel (non-block) Trilinos vector. The
-// basic idea is to just work on the local range of the vector. But we need
-// access to values that the local nodes are constrained to.
-
-template<>
-void
-ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
-{
- Assert (sorted==true, ExcMatrixIsClosed());
-
- //TODO: not implemented yet, we need to fix LocalRange() first to only
- //include "owned" indices. For this we need to keep track of the owned
- //indices, because Trilinos doesn't. Use same constructor interface as in
- //PETSc with two IndexSets!
- AssertThrow (vec.vector_partitioner().IsOneToOne(),
- ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning."));
-
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
- ConstraintLine index_comparison;
- index_comparison.line = vec.local_range().first;
- const constraint_iterator begin_my_constraints =
- Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
- index_comparison.line = vec.local_range().second;
- const constraint_iterator end_my_constraints
- = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
- // Here we search all the indices that we need to have read-access to -
- // the local nodes and all the nodes that the constraints indicate.
- IndexSet my_indices (vec.size());
- {
- const std::pair<unsigned int, unsigned int>
- local_range = vec.local_range();
-
- my_indices.add_range (local_range.first, local_range.second);
-
- std::set<unsigned int> individual_indices;
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if ((it->entries[i].first < local_range.first)
- ||
- (it->entries[i].first >= local_range.second))
- individual_indices.insert (it->entries[i].first);
-
- my_indices.add_indices (individual_indices.begin(),
- individual_indices.end());
- }
-
-#ifdef DEAL_II_WITH_MPI
- const Epetra_MpiComm *mpi_comm
- = dynamic_cast<const Epetra_MpiComm *>(&vec.trilinos_vector().Comm());
-
- Assert (mpi_comm != 0, ExcInternalError());
-
- TrilinosWrappers::MPI::Vector vec_distribute
- (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
-#else
- TrilinosWrappers::MPI::Vector vec_distribute
- (my_indices.make_trilinos_map (MPI_COMM_WORLD, true));
-#endif
-
- // here we import the data
- vec_distribute.reinit(vec,false,true);
-
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- {
- // fill entry in line next_constraint.line by adding the different
- // contributions
- double new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
- new_value += (vec_distribute(it->entries[i].first) *
- it->entries[i].second);
- vec(it->line) = new_value;
- }
-
- // some processes might not apply constraints, so we need to explicitly
- // state, that the others are doing an insert here:
- vec.compress (::dealii::VectorOperation::insert);
-}
-
-
-
-template<>
-void
-ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const
-{
- Assert (sorted==true, ExcMatrixIsClosed());
-
- IndexSet my_indices (vec.size());
- for (unsigned int block=0; block<vec.n_blocks(); ++block)
- {
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
- ConstraintLine index_comparison;
- index_comparison.line = vec.block(block).local_range().first
- +vec.get_block_indices().block_start(block);
- const constraint_iterator begin_my_constraints =
- Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
- index_comparison.line = vec.block(block).local_range().second
- +vec.get_block_indices().block_start(block);
-
- const constraint_iterator end_my_constraints
- = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
- // Here we search all the indices that we need to have read-access to
- // - the local nodes and all the nodes that the constraints indicate.
- // No caching done yet. would need some more clever data structures
- // for doing that.
- const std::pair<unsigned int, unsigned int>
- local_range = vec.block(block).local_range();
-
- my_indices.add_range (local_range.first, local_range.second);
-
- std::set<unsigned int> individual_indices;
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if ((it->entries[i].first < local_range.first)
- ||
- (it->entries[i].first >= local_range.second))
- individual_indices.insert (it->entries[i].first);
-
- my_indices.add_indices (individual_indices.begin(),
- individual_indices.end());
- }
-
-#ifdef DEAL_II_WITH_MPI
- const Epetra_MpiComm *mpi_comm
- = dynamic_cast<const Epetra_MpiComm *>(&vec.block(0).trilinos_vector().Comm());
-
- Assert (mpi_comm != 0, ExcInternalError());
-
- TrilinosWrappers::MPI::Vector vec_distribute
- (my_indices.make_trilinos_map (mpi_comm->Comm(), true));
-#else
- TrilinosWrappers::MPI::Vector vec_distribute
- (my_indices.make_trilinos_map (MPI_COMM_WORLD, true));
-#endif
-
- // here we import the data
- vec_distribute.reinit(vec,true);
-
- for (unsigned int block=0; block<vec.n_blocks(); ++block)
- {
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
- ConstraintLine index_comparison;
- index_comparison.line = vec.block(block).local_range().first
- +vec.get_block_indices().block_start(block);
- const constraint_iterator begin_my_constraints =
- Utilities::lower_bound (lines.begin(),lines.end(),index_comparison);
-
- index_comparison.line = vec.block(block).local_range().second
- +vec.get_block_indices().block_start(block);
-
- const constraint_iterator end_my_constraints
- = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison);
-
- for (constraint_iterator it = begin_my_constraints;
- it != end_my_constraints; ++it)
- {
- // fill entry in line next_constraint.line by adding the
- // different contributions
- double new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
- new_value += (vec_distribute(it->entries[i].first) *
- it->entries[i].second);
- vec(it->line) = new_value;
- }
- vec.block(block).compress(::dealii::VectorOperation::insert);
- }
-}
-
-#endif
-
-#ifdef DEAL_II_WITH_PETSC
-
-// this is a specialization for a parallel (non-block) PETSc vector. The
-// basic idea is to just work on the local range of the vector. But we need
-// access to values that the local nodes are constrained to.
-
-template<>
-void
-ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const
-{
- Assert (sorted==true, ExcMatrixIsClosed());
- Assert (vec.has_ghost_elements() == false,
- ExcMessage ("This operation can only be performed on vectors without ghost elements."));
-
- IndexSet vec_owned_elements (vec.size());
- vec_owned_elements.add_range(vec.local_range().first,
- vec.local_range().second);
-
- typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
-
-#ifdef DEBUG
- if (vec.supports_distributed_data == true)
- {
- // the algorithm below pushes contributions c_ij x_j to a constrained
- // DoF x_j *from the side of the owner of x_j*, as opposed to pulling
- // it from the owner of the target side x_i. this relies on the
- // assumption that both target and source know about the constraint
- // on x_i.
- //
- // the disadvantage is that it is incredibly difficult to find out what
- // is happening if the assumption is not satisfied. to help debug
- // problems of this kind, we do the following in a first step if
- // debugging is enabled:
- // - every processor who owns an x_j where c_ij!=0 sends a one
- // to the owner of x_i to add up
- // - the owner of x_i knows how many nonzero entries exist, so can
- // verify that the correct number of ones has been added
- // - if the sum is not correct, then apparently one of the owners
- // of the x_j's did not know to send its one and, consequently,
- // will also not know to send the correct c_ij*x_j later on,
- // leading to a bug
- set_zero (vec);
- for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if (vec_owned_elements.is_element (it->entries[i].first))
- vec(it->line) += 1;
- vec.compress (VectorOperation::add);
-
- for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
- if (vec_owned_elements.is_element(it->line))
- Assert (vec(it->line) == it->entries.size(),
- ExcIncorrectConstraint(it->line, it->entries.size()));
- }
-#endif
-
- // set the values of all constrained DoFs to zero, then add the
- // contributions for each constrained DoF of all the ones we
- // own locally
- set_zero (vec);
-
- for (constraint_iterator it = lines.begin();
- it != lines.end(); ++it)
- for (unsigned int i=0; i<it->entries.size(); ++i)
- if (vec_owned_elements.is_element(it->entries[i].first))
- vec(it->line) += it->entries[i].second *
- vec(it->entries[i].first);
-
- // now compress to communicate the entries that we added to
- // and that weren't to local processors to the owner
- vec.compress (VectorOperation::add);
-}
-
-
-template<>
-void
-ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const
-{
- Assert (sorted==true, ExcMatrixIsClosed());
- AssertThrow (false, ExcNotImplemented());
-}
-
-#endif
-
-
-
bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const
{
if (is_constrained(index) == false)
VectorType &, \
const FullMatrix<double> &) const; \
template void ConstraintMatrix::distribute<VectorType >(const VectorType &condensed,\
- VectorType &uncondensed) const;\
- template void ConstraintMatrix::distribute<VectorType >(VectorType &vec) const
+ VectorType &uncondensed) const
#define PARALLEL_VECTOR_FUNCTIONS(VectorType) \
template void ConstraintMatrix:: \
const FullMatrix<double> &) const
-// TODO: Can PETSc really do all the operations required by the above
-// condense/distribute function etc also on distributed vectors? Trilinos
-// can't do that - we have to rewrite those functions by hand if we want to
-// use them. The key is to use local ranges etc., which still needs to be
-// implemented.
#ifdef DEAL_II_WITH_PETSC
VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector);
VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector);
template void ConstraintMatrix::distribute_local_to_global<T<S> > (
const Vector<double>&, const std::vector<unsigned int> &, T<S> &, const FullMatrix<double>&) const;
template void ConstraintMatrix::distribute<T<S> >(const T<S> &, T<S> &) const;
- template void ConstraintMatrix::distribute<T<S> >(T<S> &) const;
template void ConstraintMatrix::set_zero<T<S> >(T<S> &) const;
}
template void ConstraintMatrix::distribute_local_to_global<parallel::distributed::T<S> > (
const Vector<double>&, const std::vector<unsigned int> &, parallel::distributed::T<S> &, const FullMatrix<double>&) const;
template void ConstraintMatrix::distribute<parallel::distributed::T<S> >(const parallel::distributed::T<S> &, parallel::distributed::T<S> &) const;
- template void ConstraintMatrix::distribute<parallel::distributed::T<S> >(parallel::distributed::T<S> &) const;
template void ConstraintMatrix::set_zero<parallel::distributed::T<S> >(parallel::distributed::T<S> &) const;
}
template void ConstraintMatrix::distribute_local_to_global<V > (
const Vector<double>&, const std::vector<unsigned int> &, V&, const FullMatrix<double>&) const;
template void ConstraintMatrix::distribute<V >(const V&, V&) const;
- template void ConstraintMatrix::distribute<V >(V&) const;
template void ConstraintMatrix::set_zero<V >(V&) const;
}
template void ConstraintMatrix::condense<S1,BlockVector<S2> >(
const SparseMatrix<S1>&, const BlockVector<S2>&, SparseMatrix<S1> &, BlockVector<S2>&) const;
}
+
+
+for (Vec : SERIAL_VECTORS)
+ {
+ template void ConstraintMatrix::distribute<Vec>(Vec &) const;
+ }
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010, 2012, 2013 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+// check ConstraintMatrix.distribute() for a petsc vector
+//
+// like _01, but with an inhomogeneity
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+
+#include <fstream>
+#include <sstream>
+
+
+
+void test()
+{
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ // create a vector that consists of elements indexed from 0 to n
+ PETScWrappers::MPI::Vector vec (MPI_COMM_WORLD, 100 * n_processes, 100);
+ Assert (vec.local_size() == 100, ExcInternalError());
+ Assert (vec.local_range().first == 100*myid, ExcInternalError());
+ Assert (vec.local_range().second == 100*myid+100, ExcInternalError());
+ for (unsigned int i=vec.local_range().first; i<vec.local_range().second; ++i)
+ vec(i) = i;
+ vec.compress();
+
+ // verify correctness so far
+ {
+ double exact_l1 = 0;
+ for (unsigned int i=0; i<vec.size(); ++i)
+ exact_l1 += i;
+ Assert (vec.l1_norm() == exact_l1, ExcInternalError());
+ }
+
+
+ // create a ConstraintMatrix with a range that exceeds the locally
+ // owned range by 50 on each side
+ IndexSet locally_relevant_range (vec.size());
+ locally_relevant_range.add_range (std::max<int> (100*myid-50, 0),
+ std::min (100*myid+150, vec.size()));
+ ConstraintMatrix cm (locally_relevant_range);
+
+ // add constraints that constrain an element in the middle of the
+ // local range of each processor against an element outside, both in
+ // the ghost range before and after
+ //
+ // note that we tell each processor about all constraints, but most
+ // of them will throw away this information since it is not for a
+ // DoF inside the locally relevant range
+ for (unsigned int p=0; p<n_processes; ++p)
+ {
+ if ((p != 0) && locally_relevant_range.is_element (p*100+10))
+ {
+ cm.add_line (p*100+10);
+ cm.add_entry (p*100+10,
+ p*100-25,
+ 1);
+ cm.set_inhomogeneity (p*100+10, 1);
+ }
+
+ if ((p != n_processes-1) && locally_relevant_range.is_element (p*100+90))
+ {
+ cm.add_line (p*100+90);
+ cm.add_entry (p*100+90,
+ p*100+105,
+ 1);
+ cm.set_inhomogeneity (p*100+90, -1);
+ }
+ }
+ cm.close ();
+
+ // now distribute these constraints
+ cm.distribute (vec);
+
+ // verify correctness
+ vec.compress ();
+
+ if (myid != 0)
+ Assert (vec(vec.local_range().first+10) == vec.local_range().first-25+1,
+ ExcInternalError());
+
+ if (myid != n_processes-1)
+ Assert (vec(vec.local_range().first+90) == vec.local_range().first+105-1,
+ ExcInternalError());
+
+ for (unsigned int i=vec.local_range().first; i<vec.local_range().second; ++i)
+ {
+ if ((i != vec.local_range().first+10)
+ &&
+ (i != vec.local_range().first+90))
+ {
+ double val = vec(i);
+ Assert (std::fabs(val - i) <= 1e-6, ExcInternalError());
+ }
+ }
+
+ {
+ double exact_l1 = 0;
+
+ // add up original values of vector entries
+ for (unsigned int i=0; i<vec.size(); ++i)
+ exact_l1 += i;
+
+ // but then correct for the constrained values
+ for (unsigned int p=0; p<n_processes; ++p)
+ {
+ if (p != 0)
+ exact_l1 = exact_l1 - (p*100+10) + (p*100-25);
+ if (p != n_processes-1)
+ exact_l1 = exact_l1 - (p*100+90) + (p*100+105);
+ }
+
+ const double l1_norm = vec.l1_norm();
+ Assert (l1_norm == exact_l1, ExcInternalError());
+
+ if (myid == 0)
+ deallog << "Norm = " << l1_norm << std::endl;
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+
+ deallog.push(Utilities::int_to_string(myid));
+
+ if (myid == 0)
+ {
+ std::ofstream logfile(output_file_for_mpi("petsc_distribute_01_inhomogenous").c_str());
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ test();
+ }
+ else
+ test();
+
+}
--- /dev/null
+
+DEAL:0::Norm = 4950.00
--- /dev/null
+
+DEAL:0::Norm = 499320.
--- /dev/null
+
+DEAL:0::Norm = 19880.0
--- /dev/null
+
+DEAL:0::Norm = 79740.0
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010, 2012, 2013 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+// check ConstraintMatrix.distribute() for a trilinos vector
+//
+// like _01, but with an inhomogeneity
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+
+#include <fstream>
+#include <sstream>
+
+
+
+void test()
+{
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ // create a vector that consists of elements indexed from 0 to n
+ TrilinosWrappers::MPI::Vector vec;
+ {
+ IndexSet is (100*n_processes);
+ is.add_range (100*myid, 100*myid+100);
+ vec.reinit (is, MPI_COMM_WORLD);
+ }
+ Assert (vec.local_size() == 100, ExcInternalError());
+ Assert (vec.local_range().first == 100*myid, ExcInternalError());
+ Assert (vec.local_range().second == 100*myid+100, ExcInternalError());
+ for (unsigned int i=vec.local_range().first; i<vec.local_range().second; ++i)
+ vec(i) = i;
+ vec.compress();
+
+ // verify correctness so far
+ {
+ double exact_l1 = 0;
+ for (unsigned int i=0; i<vec.size(); ++i)
+ exact_l1 += i;
+ Assert (vec.l1_norm() == exact_l1, ExcInternalError());
+ }
+
+
+ // create a ConstraintMatrix with a range that exceeds the locally
+ // owned range by 50 on each side
+ IndexSet locally_relevant_range (vec.size());
+ locally_relevant_range.add_range (std::max<int> (100*myid-50, 0),
+ std::min (100*myid+150, vec.size()));
+ ConstraintMatrix cm (locally_relevant_range);
+
+ // add constraints that constrain an element in the middle of the
+ // local range of each processor against an element outside, both in
+ // the ghost range before and after
+ //
+ // note that we tell each processor about all constraints, but most
+ // of them will throw away this information since it is not for a
+ // DoF inside the locally relevant range
+ for (unsigned int p=0; p<n_processes; ++p)
+ {
+ if ((p != 0) && locally_relevant_range.is_element (p*100+10))
+ {
+ cm.add_line (p*100+10);
+ cm.add_entry (p*100+10,
+ p*100-25,
+ 1);
+ cm.set_inhomogeneity (p*100+10, 1);
+ }
+
+ if ((p != n_processes-1) && locally_relevant_range.is_element (p*100+90))
+ {
+ cm.add_line (p*100+90);
+ cm.add_entry (p*100+90,
+ p*100+105,
+ 1);
+ cm.set_inhomogeneity (p*100+90, -1);
+ }
+ }
+ cm.close ();
+
+ // now distribute these constraints
+ cm.distribute (vec);
+
+ // verify correctness
+ vec.compress ();
+
+ if (myid != 0)
+ Assert (vec(vec.local_range().first+10) == vec.local_range().first-25+1,
+ ExcInternalError());
+
+ if (myid != n_processes-1)
+ Assert (vec(vec.local_range().first+90) == vec.local_range().first+105-1,
+ ExcInternalError());
+
+ for (unsigned int i=vec.local_range().first; i<vec.local_range().second; ++i)
+ {
+ if ((i != vec.local_range().first+10)
+ &&
+ (i != vec.local_range().first+90))
+ {
+ double val = vec(i);
+ Assert (std::fabs(val - i) <= 1e-6, ExcInternalError());
+ }
+ }
+
+ {
+ double exact_l1 = 0;
+
+ // add up original values of vector entries
+ for (unsigned int i=0; i<vec.size(); ++i)
+ exact_l1 += i;
+
+ // but then correct for the constrained values
+ for (unsigned int p=0; p<n_processes; ++p)
+ {
+ if (p != 0)
+ exact_l1 = exact_l1 - (p*100+10) + (p*100-25);
+ if (p != n_processes-1)
+ exact_l1 = exact_l1 - (p*100+90) + (p*100+105);
+ }
+
+ const double l1_norm = vec.l1_norm();
+ Assert (l1_norm == exact_l1, ExcInternalError());
+
+ if (myid == 0)
+ deallog << "Norm = " << l1_norm << std::endl;
+ }
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+
+ deallog.push(Utilities::int_to_string(myid));
+
+ if (myid == 0)
+ {
+ std::ofstream logfile(output_file_for_mpi("trilinos_distribute_01_inhomogenous").c_str());
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ test();
+ }
+ else
+ test();
+
+}
--- /dev/null
+
+DEAL:0::Norm = 4950.00
--- /dev/null
+
+DEAL:0::Norm = 499320.
--- /dev/null
+
+DEAL:0::Norm = 19880.0
--- /dev/null
+
+DEAL:0::Norm = 79740.0
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010, 2012, 2013 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+// check ConstraintMatrix.distribute() for a petsc vector
+//
+// we do this by creating a vector where each processor has 100
+// elements but no ghost elements. then we add constraints on each
+// processor that constrain elements within each processor's local
+// range to ones outside. these have to be added on all
+// processors. then call distribute() and verify that the result is
+// true.
+//
+// we use constraints of the form x_i = x_j with sequentially growing
+// x_j's so that we can verify the correctness analytically
+//
+// compared to the _01 test, here the ConstraintMatrix object acts on an index
+// set that only includes the locally owned vector elements, without any
+// overlap. this means that the owners of a source DoF in a constraint
+// do not know about the constraint if the constrained DoF is owned by
+// another processor. This is not allowed, and should trigger an
+// exception.
+
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+
+#include <fstream>
+#include <sstream>
+
+
+
+void test()
+{
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ // create a vector that consists of elements indexed from 0 to n
+ PETScWrappers::MPI::Vector vec (MPI_COMM_WORLD, 100 * n_processes, 100);
+ Assert (vec.local_size() == 100, ExcInternalError());
+ Assert (vec.local_range().first == 100*myid, ExcInternalError());
+ Assert (vec.local_range().second == 100*myid+100, ExcInternalError());
+ for (unsigned int i=vec.local_range().first; i<vec.local_range().second; ++i)
+ vec(i) = i;
+ vec.compress();
+
+ // verify correctness so far
+ {
+ double exact_l1 = 0;
+ for (unsigned int i=0; i<vec.size(); ++i)
+ exact_l1 += i;
+ Assert (vec.l1_norm() == exact_l1, ExcInternalError());
+ }
+
+
+ // create a ConstraintMatrix with a range that only includes locally owned
+ // DoFs
+ IndexSet locally_owned_range (vec.size());
+ locally_owned_range.add_range (100*myid,
+ 100*myid+100);
+ ConstraintMatrix cm (locally_owned_range);
+
+ // add constraints that constrain an element in the middle of the
+ // local range of each processor against an element outside, both in
+ // the ghost range before and after
+ //
+ // note that we tell each processor about all constraints, but most
+ // of them will throw away this information since it is not for a
+ // DoF inside the locally relevant range
+ for (unsigned int p=0; p<n_processes; ++p)
+ {
+ if ((p != 0) && locally_owned_range.is_element (p*100+10))
+ {
+ cm.add_line (p*100+10);
+ cm.add_entry (p*100+10,
+ p*100-25,
+ 1);
+ }
+
+ if ((p != n_processes-1) && locally_owned_range.is_element (p*100+90))
+ {
+ cm.add_line (p*100+90);
+ cm.add_entry (p*100+90,
+ p*100+105,
+ 1);
+ }
+ }
+ cm.close ();
+
+ // now distribute these constraints. this should trigger an exception
+ bool exception_caught = false;
+ deal_II_exceptions::disable_abort_on_exception();
+ try
+ {
+ cm.distribute (vec);
+ }
+ catch (...)
+ {
+ exception_caught = true;
+ }
+ if (n_processes > 1)
+ Assert (exception_caught == true, ExcInternalError());
+
+ if (myid == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+
+ deallog.push(Utilities::int_to_string(myid));
+
+ if (myid == 0)
+ {
+ std::ofstream logfile(output_file_for_mpi("petsc_distribute_02").c_str());
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ test();
+ }
+ else
+ test();
+
+}
--- /dev/null
+
+DEAL:0::OK
--- /dev/null
+
+DEAL:0::OK
--- /dev/null
+
+DEAL:0::OK
--- /dev/null
+
+DEAL:0::OK