From: bangerth
Date: Mon, 7 Jun 2010 20:47:13 +0000 (+0000)
Subject: Take over many of the changes in base/ and lac/ from branch_distributed_grids.
X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=86dadaf67042063c530c538529e14d0c94a68fae;p=dealii-svn.git
Take over many of the changes in base/ and lac/ from branch_distributed_grids.
git-svn-id: https://svn.dealii.org/trunk@21162 0785d39b-7218-0410-832d-ea1e28bc413d
---
diff --git a/deal.II/base/include/base/config.h.in b/deal.II/base/include/base/config.h.in
index c5bb06ecc9..52a470e0b3 100644
--- a/deal.II/base/include/base/config.h.in
+++ b/deal.II/base/include/base/config.h.in
@@ -241,6 +241,9 @@
functionality is available. */
#undef DEAL_II_USE_MT_POSIX_NO_BARRIERS
+/* Defined if a MUMPS installation was found and is going to be used */
+#undef DEAL_II_USE_MUMPS
+
/* Defined if a PETSc installation was found and is going to be used */
#undef DEAL_II_USE_PETSC
@@ -254,9 +257,6 @@
/* Defined if a Trilinos installation was found and is going to be used */
#undef DEAL_II_USE_TRILINOS
-/* Defined if a MUMPS installation was found and is going to be used */
-#undef DEAL_II_USE_MUMPS
-
/* Define if vector iterators are just plain pointers */
#undef DEAL_II_VECTOR_ITERATOR_IS_POINTER
@@ -606,10 +606,9 @@
* compatibility; and (ii) DEAL_II_PETSC_VERSION_GTE is used to add
* functionality to the PETScWrappers that does not exist in previous
* versions of PETSc. Examples of usage can be found in
- * lac/source/petsc_matrix_base.h.
- *
- * Note: SLEPcWrappers do not need their own anological macros, since
- * SLEPc and PETSc must have identical version numbers anyways.
+ * lac/source/petsc_matrix_base.h. Note: SLEPcWrappers do not need
+ * their own anological macros, since SLEPc and PETSc must have
+ * identical version numbers anyways.
*/
#define DEAL_II_PETSC_VERSION_LT(major,minor,subminor) \
((PETSC_VERSION_MAJOR * 10000 + \
@@ -626,6 +625,7 @@
(major)*10000 + (minor)*100 + (subminor))
#include
+#include
/**
* If the compiler supports the upcoming C++1x standard, allow us to refer
diff --git a/deal.II/base/include/base/index_set.h b/deal.II/base/include/base/index_set.h
index c5dad3458b..19311fefb1 100644
--- a/deal.II/base/include/base/index_set.h
+++ b/deal.II/base/include/base/index_set.h
@@ -224,6 +224,24 @@ class IndexSet
IndexSet get_view (const unsigned int begin,
const unsigned int end) const;
+
+ /**
+ * Removes all elements contained in @p
+ * other from this set. In other words,
+ * if $x$ is the current object and $o$
+ * the argument, then we compute $x
+ * \leftarrow x \backslash o$.
+ */
+ void subtract_set (const IndexSet & other);
+
+
+ /**
+ * Fills the given vector with all
+ * indices contained in this IndexSet.
+ */
+ void fill_index_vector(std::vector & indices) const;
+
+
/**
* Outputs a text representation of this
* IndexSet to the given stream. Used for
diff --git a/deal.II/base/include/base/parallel.h b/deal.II/base/include/base/parallel.h
index 3c0aa8dc0b..a2daa4ba43 100644
--- a/deal.II/base/include/base/parallel.h
+++ b/deal.II/base/include/base/parallel.h
@@ -37,13 +37,6 @@
DEAL_II_NAMESPACE_OPEN
-/**
- * A namespace in which we define a few algorithms that can run in parallel
- * when deal.II is configured to use multiple threads.
- *
- * @ingroup threads
- * @author Wolfgang Bangerth, 2008, 2009
- */
namespace parallel
{
namespace internal
diff --git a/deal.II/base/include/base/timer.h b/deal.II/base/include/base/timer.h
index 43462a7ee7..26775ee1a2 100644
--- a/deal.II/base/include/base/timer.h
+++ b/deal.II/base/include/base/timer.h
@@ -2,7 +2,7 @@
// $Id$
// Version: $Name$
//
-// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009 by the deal.II authors
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
@@ -19,6 +19,7 @@
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
#include
+#include
#endif
#include
@@ -89,11 +90,89 @@ class Timer
*
* Starts the timer at 0 sec.
*
+ * If @p sync_wall_time is true, the wall
+ * time is synchronized between all CPUs
+ * using a MPI_Barrier() and a collective
+ * operation. Note that this only works
+ * if you stop() the timer before
+ * querying for the wall time. The time
+ * for the MPI operations are not
+ * included in the timing but may slow
+ * down your program.
+ *
* This constructor is only available
* if the deal.II compiler is an MPI
* compiler.
*/
- Timer (MPI_Comm mpi_communicator);
+ Timer (MPI_Comm mpi_communicator,
+ bool sync_wall_time = false);
+
+
+ /**
+ * Structure to save collective data
+ * measured by this timer class. Queried
+ * by get_data() or printed with
+ * print_data() after calling stop().
+ */
+ struct TimeMinMaxAvg
+ {
+ double sum;
+ double min;
+ double max;
+ unsigned int min_index;
+ unsigned int max_index;
+ double avg;
+
+ /**
+ * Set the time values to @p
+ * val, and the MPI rank to
+ * the given @p rank.
+ */
+ void set(const double val,
+ const unsigned int rank)
+ {
+ sum = min = max = val;
+ min_index = max_index = rank;
+ }
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ /**
+ * Given two structures
+ * indicating timer
+ * information, do the
+ * reduction by choosing the
+ * one with the longer time
+ * interval. This is a max
+ * operation and therefore a
+ * reduction.
+ *
+ * Arguments are passed as
+ * void pointers to satisfy
+ * the MPI requirement of
+ * reduction operations.
+ */
+ static void max_reduce ( const void * in_lhs_,
+ void * inout_rhs_,
+ int * len,
+ MPI_Datatype * );
+#endif
+ };
+
+ /**
+ * Returns a reference to the data
+ * structure with global timing
+ * information. Filled after calling
+ * stop().
+ */
+ const TimeMinMaxAvg & get_data() const;
+
+ /**
+ * Prints the data to the given stream.
+ */
+ template
+ void print_data(STREAM & stream) const;
+
+
#endif
/**
@@ -200,6 +279,14 @@ class Timer
* running.
*/
MPI_Comm mpi_communicator;
+
+ /**
+ * Store whether the wall time is
+ * synchronized between machines.
+ */
+ bool sync_wall_time;
+
+ TimeMinMaxAvg mpi_data;
#endif
};
@@ -333,6 +420,10 @@ class TimerOutput
ConditionalOStream &stream,
const enum OutputFrequency output_frequency,
const enum OutputType output_type);
+
+
+
+
#endif
/**
@@ -472,6 +563,27 @@ class TimerOutput
/* ---------------- inline functions ----------------- */
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+
+const Timer::TimeMinMaxAvg & Timer::get_data() const
+{
+ return mpi_data;
+}
+
+template
+void Timer::print_data(STREAM & stream) const
+{
+ unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+ if (my_id==0)
+ stream << mpi_data.max << " wall,"
+ << " max @" << mpi_data.max_index
+ << ", min=" << mpi_data.min << " @" << mpi_data.min_index
+ << ", avg=" << mpi_data.avg
+ << std::endl;
+}
+
+#endif
+
inline
void
TimerOutput::enter_section (const std::string §ion_name)
diff --git a/deal.II/base/include/base/utilities.h b/deal.II/base/include/base/utilities.h
index a0c69f54f4..ee7d56796c 100644
--- a/deal.II/base/include/base/utilities.h
+++ b/deal.II/base/include/base/utilities.h
@@ -2,7 +2,7 @@
// $Id$
// Version: $Name$
//
-// Copyright (C) 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
@@ -245,7 +245,7 @@ namespace Utilities
* MPI.
*/
bool job_supports_mpi ();
-
+
/**
* Return the number of MPI processes
* there exist in the given communicator
@@ -266,6 +266,51 @@ namespace Utilities
*/
unsigned int get_this_mpi_process (const MPI_Comm &mpi_communicator);
+
+ /**
+ * Consider an unstructured
+ * communication pattern where
+ * every process in an MPI
+ * universe wants to send some
+ * data to a subset of the other
+ * processors. To do that, the
+ * other processors need to know
+ * who to expect messages
+ * from. This function computes
+ * this information.
+ *
+ * @param mpi_comm A communicator
+ * that describes the processors
+ * that are going to communicate
+ * with each other.
+ *
+ * @param destinations The list
+ * of processors the current
+ * process wants to send
+ * information to. This list need
+ * not be sorted in any way. If
+ * it contains duplicate entries
+ * that means that multiple
+ * messages are intended for a
+ * given destination.
+ *
+ * @return A list of processors
+ * that have indicated that they
+ * want to send something to the
+ * current processor. The
+ * resulting list is not
+ * sorted. It may contain
+ * duplicate entries if
+ * processors enter the same
+ * destination more than once in
+ * their destinations list.
+ */
+ std::vector
+ compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+ const std::vector & destinations);
+
+
+
/**
* Given a communicator, generate a new
* communicator that contains the same
@@ -502,7 +547,7 @@ namespace Utilities
*/
void
destroy_communicator (Epetra_Comm &communicator);
-
+
/**
* Return the number of MPI processes
* there exist in the given communicator
diff --git a/deal.II/base/source/index_set.cc b/deal.II/base/source/index_set.cc
index ac5e5cf71f..174c9ff591 100644
--- a/deal.II/base/source/index_set.cc
+++ b/deal.II/base/source/index_set.cc
@@ -2,7 +2,7 @@
// $Id$
// Version: $Name$
//
-// Copyright (C) 2005, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 2005, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
@@ -13,6 +13,7 @@
#include
+#include
#ifdef DEAL_II_USE_TRILINOS
# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
@@ -75,6 +76,8 @@ IndexSet::do_compress () const
i != ranges.end();
++i)
{
+ Assert(i->begin < i->end, ExcInternalError());
+
i->nth_index_in_set = next_index;
next_index += (i->end - i->begin);
}
@@ -197,6 +200,103 @@ IndexSet::get_view (const unsigned int begin,
}
+void
+IndexSet::subtract_set (const IndexSet & other)
+{
+ compress();
+ other.compress();
+ is_compressed = false;
+
+
+ // we save new ranges to be added to our
+ // IndexSet in an temporary list and add
+ // all of them in one go at the end. This
+ // is necessary because a growing ranges
+ // vector invalidates iterators.
+ std::list temp_list;
+
+ std::vector::iterator own_it = ranges.begin();
+ std::vector::iterator other_it = other.ranges.begin();
+
+ while (own_it != ranges.end() && other_it != other.ranges.end())
+ {
+ //advance own iterator until we get an
+ //overlap
+ if (own_it->end <= other_it->begin)
+ {
+ ++own_it;
+ continue;
+ }
+ //we are done with other_it, so advance
+ if (own_it->begin >= other_it->end)
+ {
+ ++other_it;
+ continue;
+ }
+
+ //Now own_it and other_it overlap.
+ //First save the part of own_it that is
+ //before other_it (if not empty).
+ if (own_it->begin < other_it->begin)
+ {
+ Range r(own_it->begin, other_it->begin);
+ r.nth_index_in_set = 0; //fix warning of unused variable
+ temp_list.push_back(r);
+ }
+ // change own_it to the sub range
+ // behind other_it. Do not delete
+ // own_it in any case. As removal would
+ // invalidate iterators, we just shrink
+ // the range to an empty one.
+ own_it->begin = other_it->end;
+ if (own_it->begin > own_it->end)
+ {
+ own_it->begin = own_it->end;
+ ++own_it;
+ }
+
+ // continue without advancing
+ // iterators, the right one will be
+ // advanced next.
+ }
+
+ // Now delete all empty ranges we might
+ // have created.
+ for (std::vector::iterator it = ranges.begin();
+ it != ranges.end(); )
+ {
+ if (it->begin >= it->end)
+ it = ranges.erase(it);
+ else
+ ++it;
+ }
+
+ // done, now add the temporary ranges
+ for (std::list::iterator it = temp_list.begin();
+ it != temp_list.end();
+ ++it)
+ add_range(it->begin, it->end);
+
+ compress();
+}
+
+
+void IndexSet::fill_index_vector(std::vector & indices) const
+{
+ compress();
+
+ indices.clear();
+ indices.reserve(n_elements());
+
+ for (std::vector::iterator it = ranges.begin();
+ it != ranges.end();
+ ++it)
+ for (unsigned int i=it->begin; iend; ++i)
+ indices.push_back (i);
+
+ Assert (indices.size() == n_elements(), ExcInternalError());
+}
+
#ifdef DEAL_II_USE_TRILINOS
@@ -217,19 +317,14 @@ IndexSet::make_trilinos_map (const MPI_Comm &communicator,
#endif
else
{
- std::vector indices;
- indices.reserve(n_elements());
- for (std::vector::iterator
- i = ranges.begin();
- i != ranges.end();
- ++i)
- for (unsigned int j=i->begin; jend; ++j)
- indices.push_back (j);
- Assert (indices.size() == n_elements(), ExcInternalError());
+ std::vector indices;
+ fill_index_vector(indices);
+
+ int * indices_ptr = reinterpret_cast(&indices[0]);
return Epetra_Map (-1,
n_elements(),
- &indices[0],
+ indices_ptr,
0,
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
Epetra_MpiComm(communicator));
@@ -243,5 +338,4 @@ IndexSet::make_trilinos_map (const MPI_Comm &communicator,
#endif
-
DEAL_II_NAMESPACE_CLOSE
diff --git a/deal.II/base/source/timer.cc b/deal.II/base/source/timer.cc
index 8677131357..a41ce678f6 100644
--- a/deal.II/base/source/timer.cc
+++ b/deal.II/base/source/timer.cc
@@ -14,10 +14,12 @@
#include
#include
+#include
#include
#include
#include
#include
+#include
// these includes should probably be properly
// ./configure'd using the AC_HEADER_TIME macro:
@@ -46,6 +48,7 @@ Timer::Timer()
cumulative_wall_time (0.)
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
, mpi_communicator (MPI_COMM_SELF)
+ , sync_wall_time (false)
#endif
{
start();
@@ -56,11 +59,13 @@ Timer::Timer()
// in case we use an MPI compiler, use
// the communicator given from input
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-Timer::Timer(MPI_Comm mpi_communicator)
+Timer::Timer(MPI_Comm mpi_communicator,
+ bool sync_wall_time_)
:
cumulative_time (0.),
cumulative_wall_time (0.),
- mpi_communicator (mpi_communicator)
+ mpi_communicator (mpi_communicator),
+ sync_wall_time(sync_wall_time_)
{
start();
}
@@ -72,6 +77,11 @@ void Timer::start ()
{
running = true;
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (sync_wall_time)
+ MPI_Barrier(mpi_communicator);
+#endif
+
struct timeval wall_timer;
gettimeofday(&wall_timer, NULL);
start_wall_time = wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec;
@@ -86,6 +96,33 @@ void Timer::start ()
}
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+
+void Timer::TimeMinMaxAvg::max_reduce ( const void * in_lhs_,
+ void * inout_rhs_,
+ int * len,
+ MPI_Datatype * )
+{
+ const Timer::TimeMinMaxAvg * in_lhs = static_cast(in_lhs_);
+ Timer::TimeMinMaxAvg * inout_rhs = static_cast(inout_rhs_);
+
+ Assert(*len==1, ExcInternalError());
+
+ inout_rhs->sum += in_lhs->sum;
+ if (inout_rhs->min>in_lhs->min)
+ {
+ inout_rhs->min = in_lhs->min;
+ inout_rhs->min_index = in_lhs->min_index;
+ }
+ if (inout_rhs->maxmax)
+ {
+ inout_rhs->max = in_lhs->max;
+ inout_rhs->max_index = in_lhs->max_index;
+ }
+}
+
+
+#endif
double Timer::stop ()
{
@@ -105,8 +142,50 @@ double Timer::stop ()
struct timeval wall_timer;
gettimeofday(&wall_timer, NULL);
- cumulative_wall_time += wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec
- - start_wall_time;
+ double time = wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec
+ - start_wall_time;
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (sync_wall_time)
+ {
+ unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+
+ MPI_Op op;
+ int ierr = MPI_Op_create((MPI_User_function *)&Timer::TimeMinMaxAvg::max_reduce,
+ false, &op);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ TimeMinMaxAvg in;
+ in.set(time, my_id);
+
+ MPI_Datatype type;
+ int lengths[]={3,2};
+ MPI_Aint displacements[]={0,offsetof(TimeMinMaxAvg, min_index)};
+ MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
+
+ ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Type_commit(&type);
+
+ ierr = MPI_Reduce ( &in, &this->mpi_data, 1, type, op, 0, mpi_communicator );
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Type_free (&type);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Op_free(&op);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ this->mpi_data.avg = this->mpi_data.sum / dealii::Utilities::System::get_n_mpi_processes(mpi_communicator);
+
+ cumulative_wall_time += this->mpi_data.max;
+ }
+ else
+ cumulative_wall_time += time;
+#else
+ cumulative_wall_time += time;
+#endif
}
return cumulative_time;
}
diff --git a/deal.II/base/source/utilities.cc b/deal.II/base/source/utilities.cc
index e543ee3838..ab8e94d807 100644
--- a/deal.II/base/source/utilities.cc
+++ b/deal.II/base/source/utilities.cc
@@ -2,7 +2,7 @@
// $Id$
// Version: $Name$
//
-// Copyright (C) 2005, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 2005, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
@@ -516,6 +516,74 @@ namespace Utilities
}
+ std::vector
+ compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+ const std::vector & destinations)
+ {
+ unsigned int myid = Utilities::System::get_this_mpi_process(mpi_comm);
+ unsigned int n_procs = Utilities::System::get_n_mpi_processes(mpi_comm);
+
+ for (unsigned int i=0; i my_destinations(max_n_destinations,
+ numbers::invalid_unsigned_int);
+ std::copy (destinations.begin(), destinations.end(),
+ my_destinations.begin());
+
+ // now exchange these (we could
+ // communicate less data if we
+ // used MPI_Allgatherv, but
+ // we'd have to communicate
+ // my_n_destinations to all
+ // processors in this case,
+ // which is more expensive than
+ // the reduction operation
+ // above in MPI_Allreduce)
+ std::vector all_destinations (max_n_destinations * n_procs);
+ MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ mpi_comm);
+
+ // now we know who is going to
+ // communicate with
+ // whom. collect who is going
+ // to communicate with us!
+ std::vector origins;
+ for (unsigned int i=0; i&6; }
$as_echo "---------------- configuring additional libs ----------------" >&6; }
-
{ $as_echo "$as_me:$LINENO: checking for PETSc library directory" >&5
$as_echo_n "checking for PETSc library directory... " >&6; }
@@ -14011,6 +14032,117 @@ done
+ { $as_echo "$as_me:$LINENO: checking for MUMPS library directory" >&5
+$as_echo_n "checking for MUMPS library directory... " >&6; }
+
+# Check whether --with-mumps was given.
+if test "${with_mumps+set}" = set; then
+ withval=$with_mumps; if test "x$withval" = "xno" ; then
+ { $as_echo "$as_me:$LINENO: result: explicitly disabled" >&5
+$as_echo "explicitly disabled" >&6; }
+ USE_CONTRIB_MUMPS=no
+ else
+ USE_CONTRIB_MUMPS=yes
+ DEAL_II_MUMPS_DIR="$withval"
+ { $as_echo "$as_me:$LINENO: result: $DEAL_II_MUMPS_DIR" >&5
+$as_echo "$DEAL_II_MUMPS_DIR" >&6; }
+ if test ! -d $DEAL_II_MUMPS_DIR \
+ -o ! -d $DEAL_II_MUMPS_DIR/include \
+ -o ! -d $DEAL_II_MUMPS_DIR/lib \
+ ; then
+ { { $as_echo "$as_me:$LINENO: error: Path to MUMPS specified with --with-mumps does not point to a complete MUMPS installation" >&5
+$as_echo "$as_me: error: Path to MUMPS specified with --with-mumps does not point to a complete MUMPS installation" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ fi
+
+else
+ USE_CONTRIB_MUMPS=no
+
+fi
+
+
+ if test "$USE_CONTRIB_MUMPS" = "yes" ; then
+
+ { $as_echo "$as_me:$LINENO: checking for SCALAPACK library directory" >&5
+$as_echo_n "checking for SCALAPACK library directory... " >&6; }
+
+# Check whether --with-scalapack was given.
+if test "${with_scalapack+set}" = set; then
+ withval=$with_scalapack; DEAL_II_SCALAPACK_DIR="$withval"
+ { $as_echo "$as_me:$LINENO: result: $DEAL_II_SCALAPACK_DIR" >&5
+$as_echo "$DEAL_II_SCALAPACK_DIR" >&6; }
+ if test ! -d $DEAL_II_SCALAPCK_DIR ; then
+ { { $as_echo "$as_me:$LINENO: error: The path to SCALAPACK specified with --with-scalapack does t point to a complete SCALAPACK installation" >&5
+$as_echo "$as_me: error: The path to SCALAPACK specified with --with-scalapack does t point to a complete SCALAPACK installation" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+else
+ { { $as_echo "$as_me:$LINENO: error: If MUMPS is used, the path to SCALAPACK must be specified with --with-scalapack" >&5
+$as_echo "$as_me: error: If MUMPS is used, the path to SCALAPACK must be specified with --with-scalapack" >&2;}
+ { (exit 1); exit 1; }; }
+
+fi
+
+
+ { $as_echo "$as_me:$LINENO: checking for BLACS library directory" >&5
+$as_echo_n "checking for BLACS library directory... " >&6; }
+
+# Check whether --with-blacs was given.
+if test "${with_blacs+set}" = set; then
+ withval=$with_blacs; DEAL_II_BLACS_DIR="$withval"
+ { $as_echo "$as_me:$LINENO: result: $DEAL_II_BLACS_DIR" >&5
+$as_echo "$DEAL_II_BLACS_DIR" >&6; }
+ if test ! -d $DEAL_II_BLACS_DIR \
+ -o ! -d $DEAL_II_BLACS_DIR/LIB ; then
+ { { $as_echo "$as_me:$LINENO: error: The path to BLACS specified with --with-blacs does not point to a complete BLACS installation" >&5
+$as_echo "$as_me: error: The path to BLACS specified with --with-blacs does not point to a complete BLACS installation" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+else
+ { { $as_echo "$as_me:$LINENO: error: If MUMPS is used, the path to BLACS must be specified with --with-blacs" >&5
+$as_echo "$as_me: error: If MUMPS is used, the path to BLACS must be specified with --with-blacs" >&2;}
+ { (exit 1); exit 1; }; }
+
+fi
+
+
+ { $as_echo "$as_me:$LINENO: checking for BLACS library architecture" >&5
+$as_echo_n "checking for BLACS library architecture... " >&6; }
+ BLACS_COMM=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+ | grep "COMMLIB = " \
+ | perl -pi -e 's/.*LIB =\s+//g;'`
+ BLACS_PLAT=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+ | grep "PLAT = " \
+ | perl -pi -e 's/.*PLAT =\s+//g;'`
+ BLACS_DEBUG=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+ | grep "BLACSDBGLVL = " \
+ | perl -pi -e 's/.*DBGLVL =\s+//g;'`
+ DEAL_II_BLACS_ARCH="$BLACS_COMM-$BLACS_PLAT-$BLACS_DEBUG"
+ { $as_echo "$as_me:$LINENO: result: $DEAL_II_BLACS_ARCH" >&5
+$as_echo "$DEAL_II_BLACS_ARCH" >&6; }
+
+ fi
+
+ if test "$USE_CONTRIB_MUMPS" = "yes" ; then
+
+cat >>confdefs.h <<\_ACEOF
+#define DEAL_II_USE_MUMPS 1
+_ACEOF
+
+ DEAL_II_DEFINE_DEAL_II_USE_MUMPS=DEAL_II_USE_MUMPS
+ if test "x$with_mumps" = "x" ; then
+ with_mumps="yes"
+ fi
+ fi
+
+
+
+
+
+
if test "x$with_umfpack" != "x" -a "x$with_umfpack" != "xno" ; then
@@ -16662,6 +16794,7 @@ fi
+
for ac_func in daxpy_ saxpy_ dgemv_ sgemv_ dgeev_ sgeev_ dgeevx_ sgeevx_
do
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
diff --git a/deal.II/configure.in b/deal.II/configure.in
index f34464ffaa..8ff1210d02 100644
--- a/deal.II/configure.in
+++ b/deal.II/configure.in
@@ -433,7 +433,7 @@ AC_SUBST(USE_CONTRIB_MUMPS)
AC_SUBST(DEAL_II_MUMPS_DIR)
AC_SUBST(DEAL_II_SCALAPACK_DIR) dnl MUMPS dependency
AC_SUBST(DEAL_II_BLACS_DIR) dnl MUMPS dependency
-AC_SUBST(DEAL_II_BLACS_ARCH)
+AC_SUBST(DEAL_II_BLACS_ARCH)
AC_SUBST(DEAL_II_DEFINE_DEAL_II_USE_MUMPS)
dnl Make sure we configure for libraries used by other libraries. For
@@ -727,6 +727,7 @@ AH_BOTTOM(
(major)*10000 + (minor)*100 + (subminor))
#include
+#include
/**
* If the compiler supports the upcoming C++1x standard, allow us to refer
diff --git a/deal.II/doc/doxygen/headers/glossary.h b/deal.II/doc/doxygen/headers/glossary.h
index cea9638436..dd2bc097ee 100644
--- a/deal.II/doc/doxygen/headers/glossary.h
+++ b/deal.II/doc/doxygen/headers/glossary.h
@@ -18,7 +18,7 @@
* documentation of classes of deal.II. The glossary often only gives
* a microscopic view of a particular concept; if you struggle with
* the bigger picture, it may therefore also be worth to consult the
- * global overview of classes on the main/@ref index page.
+ * global overview of classes on the @ref index page.
*
*
*
@@ -26,6 +26,29 @@
* - Mesh cells not refined any further in the hierarchy.
*
*
+ *
+ * - @anchor GlossArtificialCell Artificial cells
+ * -
+ * If a mesh is distributed across multiple MPI processes using the
+ * parallel::distributed::Triangulation class, each processor stores
+ * only the cells it owns, one layer of adjacent cells that are owned
+ * by other processors (called @ref GlossGhostCell "ghost cells"), all coarse level
+ * cells, and all cells that are necessary to maintain the invariant
+ * that adjacent cells must differ by at most one refinement
+ * level. The cells stored on each process that are not owned by this
+ * process and that are not ghost cells are called "artificial cells",
+ * and for these cells the predicate
+ *
cell-@>is_artificial()
returns true. Artificial cells
+ * are guaranteed to exist in the globally distributed mesh but they
+ * may be further refined on other processors. See the
+ * @ref distributed_paper "Distributed Computing paper" for more
+ * information.
+ *
+ * The concept of artificial cells has no meaning for triangulations
+ * that store the entire mesh on each processor, i.e. the
+ * dealii::Triangulation class.
+ *
+ *
* - @anchor GlossBlockLA Block (linear algebra)
* - It is often convenient to treat a matrix or vector as a collection of
@@ -267,10 +290,14 @@
*
* The way out of a situation like this is to use one of the two following
* ways:
+ *
* - You tell the object that you want to compress what operation is
- * intended. The TrilinosWrappers::VectorBase::compress() can take such an
- * additional argument. Or,
- * - You do a fake addition or set operation on the object in question.
+ * intended. The TrilinosWrappers::VectorBase::compress() can take
+ * such an additional argument.
+ * - You do a fake addition or set operation on the object in question. For
+ * example, you can add a zero to an element of the matrix or vector,
+ * which has no effect other than telling the object that the next
+ * compress operation should be in
Add
mode.
*
* Some of the objects are also indifferent and can figure out what to
* do without being told. The TrilinosWrappers::SparseMatrix can do that,
@@ -391,6 +418,22 @@
* flag.
*
*
+ * - @anchor distributed_paper
+ * Distributed computing paper
+
+ * - The "distributed computing paper" is a paper by W. Bangerth,
+ * C. Burstedde, T. Heister and M. Kronbichler titled "Algorithms and Data
+ * Structures for Massively Parallel Generic Finite Element Codes" that
+ * described the implementation of parallel distributed computing in deal.II,
+ * i.e. computations where not only the linear system is split onto different
+ * machines as in, for example, step-18, but also the Triangulation and
+ * DoFHandler objects. In essence, it is a guide to the parallel::distributed
+ * namespace.
+ *
+ * The paper is currently in preparation.
+ *
+ *
+ *
* - @anchor GlossFaceOrientation Face orientation
* - In a triangulation, the normal vector to a face
* can be deduced from the face orientation by
@@ -448,6 +491,28 @@
*
*
*
+ * - @anchor GlossGhostCell Ghost cells
+ * -
+ * If a mesh is distributed across multiple MPI processes using the
+ * parallel::distributed::Triangulation class, each processor stores
+ * only the cells it owns, one layer of adjacent cells that are owned
+ * by other processors, all coarse level cells, and all cells that are
+ * necessary to maintain the invariant that adjacent cells must differ
+ * by at most one refinement level. The cells stored on each process
+ * that are not owned by this process but that are adjacent to the
+ * ones owned by this process are called "ghost cells", and for these
+ * cells the predicate
cell-@>is_ghost()
returns
+ * true. Ghost cells are guaranteed to exist in the globally
+ * distributed mesh, i.e. these cells are actually owned by another
+ * process and are not further refined there. See the
+ * @ref distributed_paper "Distributed Computing paper" for more
+ * information.
+ *
+ * The concept of ghost cells has no meaning for triangulations that
+ * store the entire mesh on each processor, i.e. the
+ * dealii::Triangulation class.
+ *
+ *
* - @anchor hp_paper %hp paper
* - The "hp paper" is a paper by W. Bangerth and O. Kayser-Herold, titled
* "Data Structures and Requirements for hp Finite Element Software", that
@@ -577,8 +642,9 @@ Article{JK10,
* element shape functions are defined.
*
*
- * - @anchor GlossShape Shape functions
- The restriction of
- * the finite element basis functions to a single grid cell.
+ * - @anchor GlossShape Shape functions
+ * - The restriction of the finite element basis functions to a single
+ * grid cell.
*
*
* - @anchor GlossSubdomainId Subdomain id
@@ -600,8 +666,26 @@ Article{JK10,
* coincides with the rank of the MPI process within the MPI
* communicator). Partitioning is typically done using the
* GridTools::partition() function, but any other method can also be used to
- * do this though most other ideas will likely lead to less well balanced
+ * do this though most simple ideas will likely lead to less well balanced
* numbers of degrees of freedom on the various subdomains.
+ *
+ * On the other hand, for programs that are parallelized using MPI but
+ * where meshes are held distributed across several processors using
+ * the parallel::distributed::Triangulation and
+ * parallel::distributed::DoFHandler classes, the subdomain id of
+ * cells are tied to the processor that owns the cell. In other words,
+ * querying the subdomain id of a cell tells you if the cell is owned
+ * by the current processor (i.e. if cell-@>subdomain_id() ==
+ * triangulation.parallel::distributed::Triangulation::locally_owned_subdomain()
)
+ * or by another processor. In the parallel distributed case,
+ * subdomain ids are only assigned to cells that the current processor
+ * owns as well as the immediately adjacent @ref GlossGhostCell "ghost cells".
+ * Cells further away are held on each processor to ensure
+ * that every MPI process has access to the full coarse grid as well
+ * as to ensure the invariant that neighboring cells differ by at most
+ * one refinement level. These cells are called "artificial" (see
+ * @ref GlossArtificialCell "here") and have the special subdomain id value
+ * types::artificial_subdomain_id.
*
*
*
diff --git a/deal.II/doc/doxygen/headers/multithreading.h b/deal.II/doc/doxygen/headers/multithreading.h
index 4a3368bb64..895fee0116 100644
--- a/deal.II/doc/doxygen/headers/multithreading.h
+++ b/deal.II/doc/doxygen/headers/multithreading.h
@@ -12,7 +12,8 @@
//-------------------------------------------------------------------------
/**
- * @defgroup threads Parallel computing with multiple processors
+ * @defgroup threads Parallel computing with multiple processors accessing the shared memory
+ * @ingroup Parallel
*
* @brief A module discussing the use of parallelism on shared memory
* machines. See the detailed documentation and
diff --git a/deal.II/doc/doxygen/headers/parallel.h b/deal.II/doc/doxygen/headers/parallel.h
new file mode 100644
index 0000000000..b61f9f58ea
--- /dev/null
+++ b/deal.II/doc/doxygen/headers/parallel.h
@@ -0,0 +1,36 @@
+//-------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//-------------------------------------------------------------------------
+
+/**
+ * @defgroup Parallel Parallel computing
+ *
+ * @brief A module discussing the use of multiple processor.
+ *
+ * This module contains information on %parallel computing. It is
+ * subdivided into parts on @ref threads and on @ref distributed.
+ */
+
+
+/**
+ * A namespace in which we define classes and algorithms that deal
+ * with running in %parallel on shared memory machines when deal.II is
+ * configured to use multiple threads (see @ref threads), as well as
+ * running things in %parallel on %distributed memory machines (see @ref
+ * distributed).
+ *
+ * @ingroup threads
+ * @author Wolfgang Bangerth, 2008, 2009
+ */
+namespace parallel
+{
+}
diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h
index 9d0d4e17b2..2dbe0009f3 100644
--- a/deal.II/doc/news/changes.h
+++ b/deal.II/doc/news/changes.h
@@ -277,6 +277,19 @@ inconvenience this causes.
base
+ New: The Timer class can now accumulate and average run times of
+ pieces of code across multiple MPI processes.
+
+ (Timo Heister 2010/06/07)
+
+
+ New: The Utilities::System::compute_point_to_point_communication_pattern
+ function can be used to compute who wants to send messages to the
+ current processor in unstructured point-to-point MPI communications.
+
+ (WB 2010/06/07)
+
+
New: The DataOutBase class (and all derived classes such as DataOut,
MatrixOut, etc) can now produce the XML-based version of the VTK file format
(the so-called VTU format). Furthermore, the
@@ -436,10 +449,30 @@ inconvenience this causes.
lac
+ New: The ConstraintMatrix class can now handle storing only
+ a subset of all constraints, for example only for degrees of
+ freedom that are relevant for the subdomain that is owned by one
+ process in an MPI universe.
+
+ (Timo Heister, Martin Kronbichler 2010/06/07)
+
+
+ New: The PETScWrappers::MPI::Vector and TrilinosWrappers::MPI::Vector
+ classes can now handle ghost elements, i.e. elements that are not
+ owned by the current processor but are available for reading
+ anyway. The simplest form of ghosting would be to simply import
+ an entire vector to local memory, but the new function allow to
+ select the elements we need to support the case of computations
+ where importing all elements of even a single vector would
+ exceed available memory.
+
+ (Timo Heister 2010/06/07)
+
+
-
- New: A class SparseDirectMumps that provides an interface to
- the MUltifrontal Massively Parallel sparse direct Solver (MUMPS).
+ New: A class SparseDirectMumps that provides an interface to
+ the MUltifrontal Massively Parallel sparse direct Solver (MUMPS).
(Markus Buerg 2010/05/10)
diff --git a/deal.II/doc/readme-petsc-trilinos.html b/deal.II/doc/readme-petsc-trilinos.html
index 1484cf759a..a426571f59 100644
--- a/deal.II/doc/readme-petsc-trilinos.html
+++ b/deal.II/doc/readme-petsc-trilinos.html
@@ -200,6 +200,16 @@ make install
want to use Trilinos with MPI on parallel machines, you also need to
flip the value of the TPL_ENABLE_MPI
flag above.
+
+ Note: if the deal.II ./configure reports an error related to
+ HAVE_INTTYPES_H, edit /include/ml_config.h and comment out the
+ line
+
+
+#define HAVE_INTTYPES_H
+
+
+
Configuring for installed Trilinos packages
diff --git a/deal.II/lac/include/lac/block_matrix_base.h b/deal.II/lac/include/lac/block_matrix_base.h
index d336d1d94c..8c63c06eea 100644
--- a/deal.II/lac/include/lac/block_matrix_base.h
+++ b/deal.II/lac/include/lac/block_matrix_base.h
@@ -97,7 +97,7 @@ struct IsBlockMatrix
* indicates whether the template
* argument to this class is a block
* matrix (in fact whether the type is
- * derived from BlockMatrix).
+ * derived from BlockMatrixBase).
*/
static const bool value = (sizeof(check_for_block_matrix
((MatrixType*)0))
diff --git a/deal.II/lac/include/lac/block_vector_base.h b/deal.II/lac/include/lac/block_vector_base.h
index 632b8d1e6f..4bd235721b 100644
--- a/deal.II/lac/include/lac/block_vector_base.h
+++ b/deal.II/lac/include/lac/block_vector_base.h
@@ -33,6 +33,69 @@ DEAL_II_NAMESPACE_OPEN
*@{
*/
+template class BlockVectorBase;
+
+
+/**
+ * A class that can be used to determine whether a given type is a block
+ * vector type or not. For example,
+ * @code
+ * IsBlockVector >::value
+ * @endcode
+ * has the value false, whereas
+ * @code
+ * IsBlockVector >::value
+ * @endcode
+ * is true. This is sometimes useful in template contexts where we may
+ * want to do things differently depending on whether a template type
+ * denotes a regular or a block vector type.
+ *
+ * @author Wolfgang Bangerth, 2010
+ */
+template
+struct IsBlockVector
+{
+ private:
+ struct yes_type { char c[1]; };
+ struct no_type { char c[2]; };
+
+ /**
+ * Overload returning true if the class
+ * is derived from BlockVectorBase,
+ * which is what block vectors do.
+ */
+ template
+ static yes_type check_for_block_vector (const BlockVectorBase *);
+
+ /**
+ * Catch all for all other potential
+ * vector types that are not block
+ * matrices.
+ */
+ static no_type check_for_block_vector (...);
+
+ public:
+ /**
+ * A statically computable value that
+ * indicates whether the template
+ * argument to this class is a block
+ * vector (in fact whether the type is
+ * derived from BlockVectorBase).
+ */
+ static const bool value = (sizeof(check_for_block_vector
+ ((VectorType*)0))
+ ==
+ sizeof(yes_type));
+};
+
+
+// instantiation of the static member
+template
+const bool IsBlockVector::value;
+
+
+
+
namespace internal
{
diff --git a/deal.II/lac/include/lac/constraint_matrix.h b/deal.II/lac/include/lac/constraint_matrix.h
index 9e745bb0cb..dd7a32f0c9 100644
--- a/deal.II/lac/include/lac/constraint_matrix.h
+++ b/deal.II/lac/include/lac/constraint_matrix.h
@@ -344,15 +344,28 @@ class ConstraintMatrix : public Subscriptor
ConstraintMatrix (const ConstraintMatrix &constraint_matrix);
/**
- * Reinit the ConstraintMatrix
- * object. This function is only relevant
- * in the distributed case, to supply a
- * different IndexSet. Otherwise this
- * routine is equivalent to calling
- * clear().
+ * Reinit the ConstraintMatrix object and
+ * supply an IndexSet with lines that may
+ * be constrained. This function is only
+ * relevant in the distributed case, to
+ * supply a different IndexSet. Otherwise
+ * this routine is equivalent to calling
+ * clear(). Normally an IndexSet with all
+ * locally_active_dofs should be supplied
+ * here.
*/
void reinit (const IndexSet & local_constraints = IndexSet());
+ /**
+ * Determines if we can store a
+ * constraint for the given @p
+ * line_index. This routine only matters
+ * in the distributed case and checks if
+ * the IndexSet allows storage of this
+ * line. Always returns true if not in
+ * the distributed case.
+ */
+ bool can_store_line(unsigned int line_index) const;
/**
* This function copies the content of @p
@@ -2093,7 +2106,9 @@ void ConstraintMatrix::
lines[lines_cache[calculate_line_index(*local_indices_begin)]];
for (unsigned int j=0; j &local_vector,
lines[lines_cache[calculate_line_index(local_dof_indices[j])]];
for (unsigned int q=0; q &local_vector,
// the entries of fixed dofs
for (unsigned int j=0; jentries.size(); ++j)
{
- Assert (is_constrained(position->entries[j].first) == false,
+ Assert (!(!local_lines.size()
+ || local_lines.is_element(position->entries[j].first))
+ || is_constrained(position->entries[j].first) == false,
ExcMessage ("Tried to distribute to a fixed dof."));
global_vector(position->entries[j].first)
+= local_vector(i) * position->entries[j].second;
diff --git a/deal.II/lac/include/lac/petsc_parallel_vector.h b/deal.II/lac/include/lac/petsc_parallel_vector.h
index d05859da3c..337f94429e 100644
--- a/deal.II/lac/include/lac/petsc_parallel_vector.h
+++ b/deal.II/lac/include/lac/petsc_parallel_vector.h
@@ -2,7 +2,7 @@
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2005, 2006, 2007, 2009 by the deal.II authors
+// Copyright (C) 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
@@ -22,6 +22,7 @@
# include
# include
# include
+# include
DEAL_II_NAMESPACE_OPEN
@@ -29,6 +30,7 @@ DEAL_II_NAMESPACE_OPEN
// forward declaration
template class Vector;
+class IndexSet;
/*! @addtogroup PETScWrappers
@@ -186,6 +188,7 @@ namespace PETScWrappers
const unsigned int n,
const unsigned int local_size);
+
/**
* Copy-constructor from deal.II
* vectors. Sets the dimension to that
@@ -206,6 +209,7 @@ namespace PETScWrappers
const dealii::Vector &v,
const unsigned int local_size);
+
/**
* Copy-constructor the
* values from a PETSc wrapper vector
@@ -224,6 +228,29 @@ namespace PETScWrappers
const VectorBase &v,
const unsigned int local_size);
+
+ /**
+ * Constructs a new parallel PETSc
+ * vector from an Indexset. Note that
+ * @p local must be contiguous and
+ * the global size of the vector is
+ * determined by local.size(). The
+ * global indices in @p ghost are
+ * sluppied as ghost indices that can
+ * also be read locally. Note that
+ * the @p ghost IndexSet may be empty
+ * and that any indices already
+ * contained in @p local are ignored
+ * during construction. That way you
+ * can construct with
+ * locally_relevent_dofs() for
+ * example.
+ */
+ explicit Vector (const MPI_Comm &communicator,
+ const IndexSet & local,
+ const IndexSet & ghost = IndexSet(0));
+
+
/**
* Copy the given vector. Resize the
* present vector if necessary. Also
@@ -232,6 +259,7 @@ namespace PETScWrappers
*/
Vector & operator = (const Vector &v);
+
/**
* Copy the given sequential
* (non-distributed) vector
@@ -346,6 +374,16 @@ namespace PETScWrappers
void reinit (const Vector &v,
const bool fast = false);
+ /**
+ * Reinit as a ghosted vector. See
+ * constructor with same signature
+ * for more details.
+ */
+ void reinit (const MPI_Comm &communicator,
+ const IndexSet & local,
+ const IndexSet & ghost = IndexSet(0));
+
+
/**
* Return a reference to the MPI
* communicator object in use with
@@ -366,6 +404,21 @@ namespace PETScWrappers
virtual void create_vector (const unsigned int n,
const unsigned int local_size);
+
+
+ /**
+ * Create a vector of global length
+ * @p n, local size @p local_size and
+ * with the specified ghost
+ * indices. Note that you need to
+ * call update_ghost_values() before
+ * accessing those.
+ */
+ virtual void create_vector (const unsigned int n,
+ const unsigned int local_size,
+ const IndexSet & ghostnodes);
+
+
private:
/**
* Copy of the communicator object to
diff --git a/deal.II/lac/include/lac/petsc_vector_base.h b/deal.II/lac/include/lac/petsc_vector_base.h
index 30a2036778..919b2fccad 100644
--- a/deal.II/lac/include/lac/petsc_vector_base.h
+++ b/deal.II/lac/include/lac/petsc_vector_base.h
@@ -25,6 +25,7 @@
# include
# include
+# include
DEAL_II_NAMESPACE_OPEN
@@ -656,6 +657,14 @@ namespace PETScWrappers
void ratio (const VectorBase &a,
const VectorBase &b);
+ /**
+ * Updates the ghost values of this
+ * vector. This is necessary after any
+ * modification before reading ghost
+ * values.
+ */
+ void update_ghost_values() const;
+
/**
* Print to a
* stream. @p precision denotes
@@ -728,6 +737,23 @@ namespace PETScWrappers
*/
Vec vector;
+ /**
+ * Denotes if this vector has ghost
+ * indices associated with it. This
+ * means that at least one of the
+ * processes in a parallel programm has
+ * at least one ghost index.
+ */
+ bool ghosted;
+
+ /**
+ * This vector contains the global
+ * indices of the ghost values. The
+ * location in this vector denotes the
+ * local numbering, which is used in
+ * PETSc.
+ */
+ IndexSet ghost_indices;
/**
* PETSc doesn't allow to mix additions
@@ -783,6 +809,8 @@ namespace PETScWrappers
const unsigned int *indices,
const PetscScalar *values,
const bool add_values);
+
+
};
diff --git a/deal.II/lac/include/lac/sparsity_tools.h b/deal.II/lac/include/lac/sparsity_tools.h
index 70f0a7ea46..442ac45bf7 100644
--- a/deal.II/lac/include/lac/sparsity_tools.h
+++ b/deal.II/lac/include/lac/sparsity_tools.h
@@ -2,7 +2,7 @@
// $Id$
// Version: $Name$
//
-// Copyright (C) 2008, 2009 by the deal.II authors
+// Copyright (C) 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
@@ -19,6 +19,11 @@
#include
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#include
+#include
+#endif
+
DEAL_II_NAMESPACE_OPEN
class SparsityPattern;
@@ -190,6 +195,33 @@ namespace SparsityTools
std::vector &new_indices,
const std::vector &starting_indices = std::vector());
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ /**
+ * Communciate rows in a compressed
+ * sparsity pattern over MPI. The @param
+ * csp is modified inline. All entries in
+ * rows that belong to a different
+ * processor are send to them and added
+ * there. The ownership is determined by
+ * parameter @param rows_per_cpu. The
+ * IndexSet @param myrange should be the
+ * one used in the constructor of the
+ * CompressedSimpleSparsityPattern. All
+ * rows contained in @param myrange are
+ * checked in @param csp. This function
+ * needs to be used with
+ * PETScWrappers::MPI::SparseMatrix for it
+ * to work correctly.
+ */
+ template
+ void distribute_sparsity_pattern(CSP_t & csp,
+ const std::vector & rows_per_cpu,
+ const MPI_Comm & mpi_comm,
+ const IndexSet & myrange);
+#endif
+
+
/**
* Exception
*/
diff --git a/deal.II/lac/source/constraint_matrix.cc b/deal.II/lac/source/constraint_matrix.cc
index 9c43849525..14bcc92661 100644
--- a/deal.II/lac/source/constraint_matrix.cc
+++ b/deal.II/lac/source/constraint_matrix.cc
@@ -1944,6 +1944,16 @@ template<>
void
ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
{
+ //TODO: not implemented yet, we need to fix
+ //LocalRange() first to only include
+ //"owned" indices. For this we need to keep
+ //track of the owned indices, because
+ //Trilinos doesn't. Use same constructor
+ //interface as in PETSc with two IndexSets!
+ AssertThrow (vec.vector_partitioner().IsOneToOne(),
+ ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning."));
+
+
typedef std::vector::const_iterator constraint_iterator;
ConstraintLine index_comparison;
index_comparison.line = vec.local_range().first;
@@ -2011,6 +2021,8 @@ ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
it->entries[i].second);
vec(it->line) = new_value;
}
+
+ vec.compress ();
}
@@ -2103,11 +2115,99 @@ ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const
vec(it->line) = new_value;
}
}
+
+ vec.compress ();
+}
+
+#endif
+
+#ifdef DEAL_II_USE_PETSC
+
+ // this is a specialization for a
+ // parallel (non-block) PETSc
+ // vector. The basic idea is to just work
+ // on the local range of the vector. But
+ // we need access to values that the
+ // local nodes are constrained to.
+
+template<>
+void
+ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const
+{
+ typedef std::vector::const_iterator constraint_iterator;
+ ConstraintLine index_comparison;
+ index_comparison.line = vec.local_range().first;
+ const constraint_iterator begin_my_constraints =
+ std::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+ index_comparison.line = vec.local_range().second;
+ const constraint_iterator end_my_constraints
+ = std::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+ // all indices we need to read from
+ IndexSet my_indices (vec.size());
+
+ const std::pair
+ local_range = vec.local_range();
+
+ my_indices.add_range (local_range.first, local_range.second);
+
+ std::set individual_indices;
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ for (unsigned int i=0; ientries.size(); ++i)
+ if ((it->entries[i].first < local_range.first)
+ ||
+ (it->entries[i].first >= local_range.second))
+ individual_indices.insert (it->entries[i].first);
+
+ my_indices.add_indices (individual_indices.begin(),
+ individual_indices.end());
+
+ IndexSet local_range_is (vec.size());
+ local_range_is.add_range(local_range.first, local_range.second);
+
+
+ // create a vector and import those indices
+ PETScWrappers::MPI::Vector ghost_vec(vec.get_mpi_communicator(),
+ local_range_is,
+ my_indices);
+ ghost_vec = vec;
+ ghost_vec.update_ghost_values();
+
+ // finally do the distribution on own
+ // constraints
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ {
+ // fill entry in line
+ // next_constraint.line by adding the
+ // different contributions
+ double new_value = it->inhomogeneity;
+ for (unsigned int i=0; ientries.size(); ++i)
+ new_value += (ghost_vec(it->entries[i].first) *
+ it->entries[i].second);
+ vec(it->line) = new_value;
+ }
+
+ // force every processor to write something
+ vec(local_range.first) = vec(local_range.first);
+
+ vec.compress ();
+}
+
+
+template<>
+void
+ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const
+{
+ AssertThrow (false, ExcNotImplemented());
}
#endif
+
unsigned int ConstraintMatrix::n_constraints () const
{
return lines.size();
diff --git a/deal.II/lac/source/petsc_parallel_sparse_matrix.cc b/deal.II/lac/source/petsc_parallel_sparse_matrix.cc
index 02328ba853..ac4163e11d 100644
--- a/deal.II/lac/source/petsc_parallel_sparse_matrix.cc
+++ b/deal.II/lac/source/petsc_parallel_sparse_matrix.cc
@@ -2,7 +2,7 @@
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2005, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 2004, 2005, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
@@ -302,6 +302,13 @@ namespace PETScWrappers
}
const unsigned int
local_row_end = local_row_start + local_rows_per_process[this_process];
+
+#if DEAL_II_PETSC_VERSION_LT(2,3,3)
+ //old version to create the matrix, we
+ //can skip calculating the row length
+ //at least starting from 2.3.3 (tested,
+ //see below)
+
const unsigned int
local_col_end = local_col_start + local_columns_per_process[this_process];
@@ -345,6 +352,27 @@ namespace PETScWrappers
&matrix);
AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else //PETSC_VERSION>=2.3.3
+ // new version to create the matrix. We
+ // do not set row length but set the
+ // correct SparsityPattern later.
+ int ierr;
+
+ ierr = MatCreate(communicator,&matrix);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = MatSetSizes(matrix,
+ local_rows_per_process[this_process],
+ local_columns_per_process[this_process],
+ sparsity_pattern.n_rows(),
+ sparsity_pattern.n_cols());
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = MatSetType(matrix,MATMPIAIJ);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+
+
// next preset the exact given matrix
// entries with zeros, if the user
// requested so. this doesn't avoid any
@@ -422,13 +450,24 @@ namespace PETScWrappers
// now copy over the information
// from the sparsity pattern.
{
- unsigned int index=0;
+#ifdef PETSC_USE_64BIT_INDICES
+ PetscInt
+#else
+ int
+#endif
+ * ptr = & colnums_in_window[0];
+
for (unsigned int i=local_row_start; i ghostindices;
+ ghostnodes.fill_index_vector(ghostindices);
+
+ const PetscInt * ptr= (const PetscInt*)(&(ghostindices[0]));
+
+ int ierr
+ = VecCreateGhost(communicator,
+ local_size,
+ PETSC_DETERMINE,
+ ghostindices.size(),
+ ptr,
+ &vector);
+
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ Assert (size() == n,
+ ExcDimensionMismatch (size(), n));
+
+#if DEBUG
+ // test ghost allocation in debug mode
+
+#ifdef PETSC_USE_64BIT_INDICES
+ PetscInt
+#else
+ int
+#endif
+ begin, end;
+
+ ierr = VecGetOwnershipRange (vector, &begin, &end);
+
+ Assert(local_size==(unsigned int)(end-begin), ExcInternalError());
+
+ Vec l;
+ ierr = VecGhostGetLocalForm(vector, &l);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscInt lsize;
+ ierr = VecGetSize(l, &lsize);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = VecGhostRestoreLocalForm(vector, &l);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ Assert( lsize==end-begin+(PetscInt)ghost_indices.n_elements() ,ExcInternalError());
+
+#endif
+
+
}
+
+
+}
+
}
DEAL_II_NAMESPACE_CLOSE
diff --git a/deal.II/lac/source/petsc_vector_base.cc b/deal.II/lac/source/petsc_vector_base.cc
index ff60b8fa12..53c1df3e70 100644
--- a/deal.II/lac/source/petsc_vector_base.cc
+++ b/deal.II/lac/source/petsc_vector_base.cc
@@ -2,7 +2,7 @@
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
@@ -31,63 +31,139 @@ namespace PETScWrappers
Assert (index < vector.size(),
ExcIndexRange (index, 0, vector.size()));
- // this is clumsy: there is no simple
- // way in PETSc to read an element from
- // a vector, i.e. there is no function
- // VecGetValue or so. The only way is
- // to obtain a pointer to a contiguous
+ // old versions of PETSc appear to be
+ // missing the function VecGetValues(),
+ // so the workaround consists of
+ // obtaining a pointer to a contiguous
// representation of the vector and
- // read from it. Subsequently, the
- // vector representation has to be
- // restored. In addition, we can only
- // get access to the local part of the
- // vector, so we have to guard against
- // that
+ // read from it. In addition, we can
+ // only get access to the local part of
+ // the vector, so we have to guard
+ // against that
if (dynamic_cast(&vector) != 0)
{
+ #if (PETSC_VERSION_MAJOR <= 2) && (PETSC_VERSION_MINOR < 3)
PetscScalar *ptr;
int ierr
- = VecGetArray (static_cast(vector), &ptr);
+ = VecGetArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
const PetscScalar value = *(ptr+index);
- ierr = VecRestoreArray (static_cast(vector), &ptr);
+ ierr = VecRestoreArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
-
return value;
+#else
+ PetscInt idx = index;
+ PetscScalar value;
+ int ierr = VecGetValues(vector.vector, 1, &idx, &value);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+ return value;
+#endif
}
else if (dynamic_cast(&vector) != 0)
{
+ int ierr;
+
+ if (vector.ghosted)
+ {
+#ifdef PETSC_USE_64BIT_INDICES
+ PetscInt
+#else
+ int
+#endif
+ begin, end;
+ ierr = VecGetOwnershipRange (vector.vector, &begin, &end);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ Vec l;
+ ierr = VecGhostGetLocalForm(vector.vector, &l);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscInt lsize;
+ ierr = VecGetSize(l, &lsize);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscScalar *ptr;
+ ierr = VecGetArray(l, &ptr);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscScalar value;
+
+ if ( index>=static_cast(begin)
+ && index(end) )
+ {
+ //local entry
+ value=*(ptr+index-begin);
+ }
+ else
+ {
+ //ghost entry
+ unsigned ghostidx
+ = vector.ghost_indices.index_within_set(index);
+
+ Assert(ghostidx+end-begin<(unsigned int)lsize, ExcInternalError());
+ value=*(ptr+ghostidx+end-begin);
+
+
+ }
+
+
+ ierr = VecRestoreArray(l, &ptr);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = VecGhostRestoreLocalForm(vector.vector, &l);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ return value;
+ }
+
+
// first verify that the requested
// element is actually locally
// available
- int ierr;
+
#ifdef PETSC_USE_64BIT_INDICES
PetscInt
#else
int
#endif
begin, end;
- ierr = VecGetOwnershipRange (static_cast(vector),
- &begin, &end);
+ ierr = VecGetOwnershipRange (vector.vector, &begin, &end);
AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+
AssertThrow ((index >= static_cast(begin)) &&
(index < static_cast(end)),
ExcAccessToNonlocalElement (index, begin, end-1));
+ // old version which only work with
+ // VecGetArray()...
+#if (PETSC_VERSION_MAJOR <= 2) && (PETSC_VERSION_MINOR < 3)
+
// then access it
PetscScalar *ptr;
- ierr = VecGetArray (static_cast(vector), &ptr);
+ ierr = VecGetArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
const PetscScalar value = *(ptr+index-begin);
- ierr = VecRestoreArray (static_cast(vector), &ptr);
+ ierr = VecRestoreArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
return value;
+
+#else
+ //new version with VecGetValues()
+ PetscInt idx = index;
+ PetscScalar value;
+ ierr = VecGetValues(vector.vector, 1, &idx, &value);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ return value;
+#endif
+
}
else
// what? what other kind of vector
@@ -99,6 +175,7 @@ namespace PETScWrappers
VectorBase::VectorBase ()
:
+ ghosted(false),
last_action (LastAction::none)
{}
@@ -107,6 +184,8 @@ namespace PETScWrappers
VectorBase::VectorBase (const VectorBase &v)
:
Subscriptor (),
+ ghosted(v.ghosted),
+ ghost_indices(v.ghost_indices),
last_action (LastAction::none)
{
int ierr = VecDuplicate (v.vector, &vector);
@@ -1041,6 +1120,25 @@ namespace PETScWrappers
last_action = LastAction::insert;
}
+
+ void
+ VectorBase::update_ghost_values() const
+ {
+ // generate an error for not ghosted
+ // vectors
+ if (!ghosted)
+ throw ExcInternalError();
+
+ int ierr;
+
+ ierr = VecGhostUpdateBegin(vector, INSERT_VALUES, SCATTER_FORWARD);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+ ierr = VecGhostUpdateEnd(vector, INSERT_VALUES, SCATTER_FORWARD);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+}
+
+
+
}
DEAL_II_NAMESPACE_CLOSE
diff --git a/deal.II/lac/source/sparsity_tools.cc b/deal.II/lac/source/sparsity_tools.cc
index d4e0e1aca2..7dc9f41f26 100644
--- a/deal.II/lac/source/sparsity_tools.cc
+++ b/deal.II/lac/source/sparsity_tools.cc
@@ -18,6 +18,13 @@
#include
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#include
+#include
+#include
+#include
+#endif
+
#ifdef DEAL_II_USE_METIS
// This is sorta stupid. what we really would like to do here is this:
// extern "C" {
@@ -406,6 +413,149 @@ namespace SparsityTools
(next_free_number == sparsity.n_rows()),
ExcInternalError());
}
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ template
+ void distribute_sparsity_pattern(CSP_t & csp,
+ const std::vector & rows_per_cpu,
+ const MPI_Comm & mpi_comm,
+ const IndexSet & myrange)
+ {
+ unsigned int myid = Utilities::System::get_this_mpi_process(mpi_comm);
+ std::vector start_index(rows_per_cpu.size()+1);
+ start_index[0]=0;
+ for (unsigned int i=0;i > map_vec_t;
+
+ map_vec_t send_data;
+
+ {
+ unsigned int dest_cpu=0;
+
+ unsigned int n_local_rel_rows = myrange.n_elements();
+ for (unsigned int row_idx=0;row_idx=start_index[dest_cpu+1])
+ ++dest_cpu;
+
+ //skip myself
+ if (dest_cpu==myid)
+ {
+ row_idx+=rows_per_cpu[myid]-1;
+ continue;
}
+ unsigned int rlen = csp.row_length(row);
+
+ //skip empty lines
+ if (!rlen)
+ continue;
+
+ //save entries
+ std::vector & dst = send_data[dest_cpu];
+
+ dst.push_back(rlen); // number of entries
+ dst.push_back(row); // row index
+ for (unsigned int c=0; c send_to;
+ send_to.reserve(send_data.size());
+ for (map_vec_t::iterator it=send_data.begin();it!=send_data.end();++it)
+ send_to.push_back(it->first);
+
+ num_receive =
+ Utilities::System::
+ compute_point_to_point_communication_pattern(mpi_comm, send_to).size();
+ }
+
+ std::vector requests(send_data.size());
+
+
+ // send data
+ {
+ unsigned int idx=0;
+ for (map_vec_t::iterator it=send_data.begin();it!=send_data.end();++it, ++idx)
+ MPI_Isend(&(it->second[0]),
+ it->second.size(),
+ MPI_INT,
+ it->first,
+ 124,
+ mpi_comm,
+ &requests[idx]);
+ }
+
+ {
+ //receive
+ std::vector recv_buf;
+ for (unsigned int index=0;index (SparsityType & csp, \
+const std::vector & rows_per_cpu,\
+const MPI_Comm & mpi_comm,\
+const IndexSet & myrange)
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+SPARSITY_FUNCTIONS(CompressedSparsityPattern);
+SPARSITY_FUNCTIONS(CompressedSimpleSparsityPattern);
+#endif
+
+#undef SPARSITY_FUNCTIONS
+
DEAL_II_NAMESPACE_CLOSE