functionality is available. */
#undef DEAL_II_USE_MT_POSIX_NO_BARRIERS
+/* Defined if a MUMPS installation was found and is going to be used */
+#undef DEAL_II_USE_MUMPS
+
/* Defined if a PETSc installation was found and is going to be used */
#undef DEAL_II_USE_PETSC
/* Defined if a Trilinos installation was found and is going to be used */
#undef DEAL_II_USE_TRILINOS
-/* Defined if a MUMPS installation was found and is going to be used */
-#undef DEAL_II_USE_MUMPS
-
/* Define if vector iterators are just plain pointers */
#undef DEAL_II_VECTOR_ITERATOR_IS_POINTER
* compatibility; and (ii) DEAL_II_PETSC_VERSION_GTE is used to add
* functionality to the PETScWrappers that does not exist in previous
* versions of PETSc. Examples of usage can be found in
- * lac/source/petsc_matrix_base.h.
- *
- * Note: SLEPcWrappers do not need their own anological macros, since
- * SLEPc and PETSc must have identical version numbers anyways.
+ * lac/source/petsc_matrix_base.h. Note: SLEPcWrappers do not need
+ * their own anological macros, since SLEPc and PETSc must have
+ * identical version numbers anyways.
*/
#define DEAL_II_PETSC_VERSION_LT(major,minor,subminor) \
((PETSC_VERSION_MAJOR * 10000 + \
(major)*10000 + (minor)*100 + (subminor))
#include <base/numbers.h>
+#include <base/types.h>
/**
* If the compiler supports the upcoming C++1x standard, allow us to refer
IndexSet get_view (const unsigned int begin,
const unsigned int end) const;
+
+ /**
+ * Removes all elements contained in @p
+ * other from this set. In other words,
+ * if $x$ is the current object and $o$
+ * the argument, then we compute $x
+ * \leftarrow x \backslash o$.
+ */
+ void subtract_set (const IndexSet & other);
+
+
+ /**
+ * Fills the given vector with all
+ * indices contained in this IndexSet.
+ */
+ void fill_index_vector(std::vector<unsigned int> & indices) const;
+
+
/**
* Outputs a text representation of this
* IndexSet to the given stream. Used for
DEAL_II_NAMESPACE_OPEN
-/**
- * A namespace in which we define a few algorithms that can run in parallel
- * when deal.II is configured to use multiple threads.
- *
- * @ingroup threads
- * @author Wolfgang Bangerth, 2008, 2009
- */
namespace parallel
{
namespace internal
// $Id$
// Version: $Name$
//
-// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009 by the deal.II authors
+// Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
#include <mpi.h>
+#include <base/utilities.h>
#endif
#include <string>
*
* Starts the timer at 0 sec.
*
+ * If @p sync_wall_time is true, the wall
+ * time is synchronized between all CPUs
+ * using a MPI_Barrier() and a collective
+ * operation. Note that this only works
+ * if you stop() the timer before
+ * querying for the wall time. The time
+ * for the MPI operations are not
+ * included in the timing but may slow
+ * down your program.
+ *
* This constructor is only available
* if the deal.II compiler is an MPI
* compiler.
*/
- Timer (MPI_Comm mpi_communicator);
+ Timer (MPI_Comm mpi_communicator,
+ bool sync_wall_time = false);
+
+
+ /**
+ * Structure to save collective data
+ * measured by this timer class. Queried
+ * by get_data() or printed with
+ * print_data() after calling stop().
+ */
+ struct TimeMinMaxAvg
+ {
+ double sum;
+ double min;
+ double max;
+ unsigned int min_index;
+ unsigned int max_index;
+ double avg;
+
+ /**
+ * Set the time values to @p
+ * val, and the MPI rank to
+ * the given @p rank.
+ */
+ void set(const double val,
+ const unsigned int rank)
+ {
+ sum = min = max = val;
+ min_index = max_index = rank;
+ }
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ /**
+ * Given two structures
+ * indicating timer
+ * information, do the
+ * reduction by choosing the
+ * one with the longer time
+ * interval. This is a max
+ * operation and therefore a
+ * reduction.
+ *
+ * Arguments are passed as
+ * void pointers to satisfy
+ * the MPI requirement of
+ * reduction operations.
+ */
+ static void max_reduce ( const void * in_lhs_,
+ void * inout_rhs_,
+ int * len,
+ MPI_Datatype * );
+#endif
+ };
+
+ /**
+ * Returns a reference to the data
+ * structure with global timing
+ * information. Filled after calling
+ * stop().
+ */
+ const TimeMinMaxAvg & get_data() const;
+
+ /**
+ * Prints the data to the given stream.
+ */
+ template <class STREAM>
+ void print_data(STREAM & stream) const;
+
+
#endif
/**
* running.
*/
MPI_Comm mpi_communicator;
+
+ /**
+ * Store whether the wall time is
+ * synchronized between machines.
+ */
+ bool sync_wall_time;
+
+ TimeMinMaxAvg mpi_data;
#endif
};
ConditionalOStream &stream,
const enum OutputFrequency output_frequency,
const enum OutputType output_type);
+
+
+
+
#endif
/**
/* ---------------- inline functions ----------------- */
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+
+const Timer::TimeMinMaxAvg & Timer::get_data() const
+{
+ return mpi_data;
+}
+
+template <class STREAM>
+void Timer::print_data(STREAM & stream) const
+{
+ unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+ if (my_id==0)
+ stream << mpi_data.max << " wall,"
+ << " max @" << mpi_data.max_index
+ << ", min=" << mpi_data.min << " @" << mpi_data.min_index
+ << ", avg=" << mpi_data.avg
+ << std::endl;
+}
+
+#endif
+
inline
void
TimerOutput::enter_section (const std::string §ion_name)
// $Id$
// Version: $Name$
//
-// Copyright (C) 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
* MPI.
*/
bool job_supports_mpi ();
-
+
/**
* Return the number of MPI processes
* there exist in the given communicator
*/
unsigned int get_this_mpi_process (const MPI_Comm &mpi_communicator);
+
+ /**
+ * Consider an unstructured
+ * communication pattern where
+ * every process in an MPI
+ * universe wants to send some
+ * data to a subset of the other
+ * processors. To do that, the
+ * other processors need to know
+ * who to expect messages
+ * from. This function computes
+ * this information.
+ *
+ * @param mpi_comm A communicator
+ * that describes the processors
+ * that are going to communicate
+ * with each other.
+ *
+ * @param destinations The list
+ * of processors the current
+ * process wants to send
+ * information to. This list need
+ * not be sorted in any way. If
+ * it contains duplicate entries
+ * that means that multiple
+ * messages are intended for a
+ * given destination.
+ *
+ * @return A list of processors
+ * that have indicated that they
+ * want to send something to the
+ * current processor. The
+ * resulting list is not
+ * sorted. It may contain
+ * duplicate entries if
+ * processors enter the same
+ * destination more than once in
+ * their destinations list.
+ */
+ std::vector<unsigned int>
+ compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+ const std::vector<unsigned int> & destinations);
+
+
+
/**
* Given a communicator, generate a new
* communicator that contains the same
*/
void
destroy_communicator (Epetra_Comm &communicator);
-
+
/**
* Return the number of MPI processes
* there exist in the given communicator
// $Id$
// Version: $Name$
//
-// Copyright (C) 2005, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 2005, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
#include <base/index_set.h>
+#include <list>
#ifdef DEAL_II_USE_TRILINOS
# ifdef DEAL_II_COMPILER_SUPPORTS_MPI
i != ranges.end();
++i)
{
+ Assert(i->begin < i->end, ExcInternalError());
+
i->nth_index_in_set = next_index;
next_index += (i->end - i->begin);
}
}
+void
+IndexSet::subtract_set (const IndexSet & other)
+{
+ compress();
+ other.compress();
+ is_compressed = false;
+
+
+ // we save new ranges to be added to our
+ // IndexSet in an temporary list and add
+ // all of them in one go at the end. This
+ // is necessary because a growing ranges
+ // vector invalidates iterators.
+ std::list<Range> temp_list;
+
+ std::vector<Range>::iterator own_it = ranges.begin();
+ std::vector<Range>::iterator other_it = other.ranges.begin();
+
+ while (own_it != ranges.end() && other_it != other.ranges.end())
+ {
+ //advance own iterator until we get an
+ //overlap
+ if (own_it->end <= other_it->begin)
+ {
+ ++own_it;
+ continue;
+ }
+ //we are done with other_it, so advance
+ if (own_it->begin >= other_it->end)
+ {
+ ++other_it;
+ continue;
+ }
+
+ //Now own_it and other_it overlap.
+ //First save the part of own_it that is
+ //before other_it (if not empty).
+ if (own_it->begin < other_it->begin)
+ {
+ Range r(own_it->begin, other_it->begin);
+ r.nth_index_in_set = 0; //fix warning of unused variable
+ temp_list.push_back(r);
+ }
+ // change own_it to the sub range
+ // behind other_it. Do not delete
+ // own_it in any case. As removal would
+ // invalidate iterators, we just shrink
+ // the range to an empty one.
+ own_it->begin = other_it->end;
+ if (own_it->begin > own_it->end)
+ {
+ own_it->begin = own_it->end;
+ ++own_it;
+ }
+
+ // continue without advancing
+ // iterators, the right one will be
+ // advanced next.
+ }
+
+ // Now delete all empty ranges we might
+ // have created.
+ for (std::vector<Range>::iterator it = ranges.begin();
+ it != ranges.end(); )
+ {
+ if (it->begin >= it->end)
+ it = ranges.erase(it);
+ else
+ ++it;
+ }
+
+ // done, now add the temporary ranges
+ for (std::list<Range>::iterator it = temp_list.begin();
+ it != temp_list.end();
+ ++it)
+ add_range(it->begin, it->end);
+
+ compress();
+}
+
+
+void IndexSet::fill_index_vector(std::vector<unsigned int> & indices) const
+{
+ compress();
+
+ indices.clear();
+ indices.reserve(n_elements());
+
+ for (std::vector<Range>::iterator it = ranges.begin();
+ it != ranges.end();
+ ++it)
+ for (unsigned int i=it->begin; i<it->end; ++i)
+ indices.push_back (i);
+
+ Assert (indices.size() == n_elements(), ExcInternalError());
+}
+
#ifdef DEAL_II_USE_TRILINOS
#endif
else
{
- std::vector<int> indices;
- indices.reserve(n_elements());
- for (std::vector<Range>::iterator
- i = ranges.begin();
- i != ranges.end();
- ++i)
- for (unsigned int j=i->begin; j<i->end; ++j)
- indices.push_back (j);
- Assert (indices.size() == n_elements(), ExcInternalError());
+ std::vector<unsigned int> indices;
+ fill_index_vector(indices);
+
+ int * indices_ptr = reinterpret_cast<int*>(&indices[0]);
return Epetra_Map (-1,
n_elements(),
- &indices[0],
+ indices_ptr,
0,
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
Epetra_MpiComm(communicator));
#endif
-
DEAL_II_NAMESPACE_CLOSE
#include <base/timer.h>
#include <base/exceptions.h>
+#include <base/utilities.h>
#include <sstream>
#include <iostream>
#include <iomanip>
#include <algorithm>
+#include <stddef.h>
// these includes should probably be properly
// ./configure'd using the AC_HEADER_TIME macro:
cumulative_wall_time (0.)
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
, mpi_communicator (MPI_COMM_SELF)
+ , sync_wall_time (false)
#endif
{
start();
// in case we use an MPI compiler, use
// the communicator given from input
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-Timer::Timer(MPI_Comm mpi_communicator)
+Timer::Timer(MPI_Comm mpi_communicator,
+ bool sync_wall_time_)
:
cumulative_time (0.),
cumulative_wall_time (0.),
- mpi_communicator (mpi_communicator)
+ mpi_communicator (mpi_communicator),
+ sync_wall_time(sync_wall_time_)
{
start();
}
{
running = true;
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (sync_wall_time)
+ MPI_Barrier(mpi_communicator);
+#endif
+
struct timeval wall_timer;
gettimeofday(&wall_timer, NULL);
start_wall_time = wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec;
}
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+
+void Timer::TimeMinMaxAvg::max_reduce ( const void * in_lhs_,
+ void * inout_rhs_,
+ int * len,
+ MPI_Datatype * )
+{
+ const Timer::TimeMinMaxAvg * in_lhs = static_cast<const Timer::TimeMinMaxAvg*>(in_lhs_);
+ Timer::TimeMinMaxAvg * inout_rhs = static_cast<Timer::TimeMinMaxAvg*>(inout_rhs_);
+
+ Assert(*len==1, ExcInternalError());
+
+ inout_rhs->sum += in_lhs->sum;
+ if (inout_rhs->min>in_lhs->min)
+ {
+ inout_rhs->min = in_lhs->min;
+ inout_rhs->min_index = in_lhs->min_index;
+ }
+ if (inout_rhs->max<in_lhs->max)
+ {
+ inout_rhs->max = in_lhs->max;
+ inout_rhs->max_index = in_lhs->max_index;
+ }
+}
+
+
+#endif
double Timer::stop ()
{
struct timeval wall_timer;
gettimeofday(&wall_timer, NULL);
- cumulative_wall_time += wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec
- - start_wall_time;
+ double time = wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec
+ - start_wall_time;
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ if (sync_wall_time)
+ {
+ unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+
+ MPI_Op op;
+ int ierr = MPI_Op_create((MPI_User_function *)&Timer::TimeMinMaxAvg::max_reduce,
+ false, &op);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ TimeMinMaxAvg in;
+ in.set(time, my_id);
+
+ MPI_Datatype type;
+ int lengths[]={3,2};
+ MPI_Aint displacements[]={0,offsetof(TimeMinMaxAvg, min_index)};
+ MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
+
+ ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Type_commit(&type);
+
+ ierr = MPI_Reduce ( &in, &this->mpi_data, 1, type, op, 0, mpi_communicator );
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Type_free (&type);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Op_free(&op);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ this->mpi_data.avg = this->mpi_data.sum / dealii::Utilities::System::get_n_mpi_processes(mpi_communicator);
+
+ cumulative_wall_time += this->mpi_data.max;
+ }
+ else
+ cumulative_wall_time += time;
+#else
+ cumulative_wall_time += time;
+#endif
}
return cumulative_time;
}
// $Id$
// Version: $Name$
//
-// Copyright (C) 2005, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 2005, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
}
+ std::vector<unsigned int>
+ compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+ const std::vector<unsigned int> & destinations)
+ {
+ unsigned int myid = Utilities::System::get_this_mpi_process(mpi_comm);
+ unsigned int n_procs = Utilities::System::get_n_mpi_processes(mpi_comm);
+
+ for (unsigned int i=0; i<destinations.size(); ++i)
+ {
+ Assert (destinations[i] < n_procs,
+ ExcIndexRange (destinations[i], 0, n_procs));
+ Assert (destinations[i] != myid,
+ ExcMessage ("There is no point in communicating with ourselves."));
+ }
+
+
+ // let all processors
+ // communicate the maximal
+ // number of destinations they
+ // have
+ unsigned int my_n_destinations = destinations.size();
+ unsigned int max_n_destinations = 0;
+
+ MPI_Allreduce (&my_n_destinations, &max_n_destinations, 1, MPI_UNSIGNED,
+ MPI_MAX, mpi_comm);
+
+ // now that we know the number
+ // of data packets every
+ // processor wants to send, set
+ // up a buffer with the maximal
+ // size and copy our
+ // destinations in there,
+ // padded with -1's
+ std::vector<unsigned int> my_destinations(max_n_destinations,
+ numbers::invalid_unsigned_int);
+ std::copy (destinations.begin(), destinations.end(),
+ my_destinations.begin());
+
+ // now exchange these (we could
+ // communicate less data if we
+ // used MPI_Allgatherv, but
+ // we'd have to communicate
+ // my_n_destinations to all
+ // processors in this case,
+ // which is more expensive than
+ // the reduction operation
+ // above in MPI_Allreduce)
+ std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
+ MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
+ mpi_comm);
+
+ // now we know who is going to
+ // communicate with
+ // whom. collect who is going
+ // to communicate with us!
+ std::vector<unsigned int> origins;
+ for (unsigned int i=0; i<n_procs; ++i)
+ for (unsigned int j=0; j<max_n_destinations; ++j)
+ if (all_destinations[i*max_n_destinations + j] == myid)
+ origins.push_back (i);
+ else if (all_destinations[i*max_n_destinations + j] ==
+ numbers::invalid_unsigned_int)
+ break;
+
+ return origins;
+ }
+
#else
bool job_supports_mpi ()
duplicate_map (const Epetra_BlockMap &map,
const Epetra_Comm &comm)
{
- // assume that each processor stores a
- // contiguous range of elements in the
- // following constructor call
- Assert (map.LinearMap() == true,
- ExcNotImplemented());
- return
- Epetra_Map (map.NumGlobalElements(),
- map.NumMyElements(),
- map.IndexBase(),
- comm);
+ if (map.LinearMap() == true)
+ {
+ // each processor stores a
+ // contiguous range of
+ // elements in the
+ // following constructor
+ // call
+ return Epetra_Map (map.NumGlobalElements(),
+ map.NumMyElements(),
+ map.IndexBase(),
+ comm);
+ }
+ else
+ {
+ // the range is not
+ // contiguous
+ return Epetra_Map (map.NumGlobalElements(),
+ map.NumMyElements(),
+ map.MyGlobalElements (),
+ 0,
+ comm);
+ }
}
}
#! /bin/sh
-# From configure.in Revision: 20963 .
+# From configure.in Revision: 21091 .
# Guess values for system-dependent variables and create Makefiles.
# Generated by GNU Autoconf 2.63 for deal.II 6.3.pre.
#
USE_CONTRIB_HSL
HSL_INCLUDE_DIR
NEEDS_F77LIBS
+DEAL_II_DEFINE_DEAL_II_USE_MUMPS
+DEAL_II_BLACS_ARCH
+DEAL_II_BLACS_DIR
+DEAL_II_SCALAPACK_DIR
+DEAL_II_MUMPS_DIR
+USE_CONTRIB_MUMPS
DEAL_II_TRILINOS_STATIC
DEAL_II_TRILINOS_SHARED
DEAL_II_TRILINOS_LIBDIR
with_trilinos
with_trilinos_include
with_trilinos_libs
+with_mumps
+with_scalapack
+with_blacs
with_blas
with_zlib
with_netcdf
contributed one. The optional argument points to the
directory containing the boost subdirectory for
header files.
- --with-petsc=path/to/slepc
+ --with-petsc=path/to/petsc
Specify the path to the PETSc installation, of which
the include and library directories are subdirs; use
this if you want to override the PETSC_DIR
Specify the path to the Trilinos libraries; use this
if you want to override the TRILINOS_LIBDIR
environment variable.
+ --with-mumps=path/to/mumps
+ Specify the path to the MUMPS installation, for
+ which the include directory and lib directory are
+ subdirs; use this if you want to override the
+ MUMPS_DIR environment variable.
+ --with-scalapack=path/to/scalapack
+ Specify the path to the scalapack installation; use
+ this if you want to override the SCALAPACK_DIR
+ environment variable.
+ --with-blacs=path/to/blacs
+ Specify the path to the BLACS installation; use this
+ if you want to override the BLACS_DIR environment
+ variable.
--with-blas=blaslib Use the blas library blaslib. Make sure the path
to the libary is searched by ld, since it is
included by the argument -lblaslib. If no argument
$as_echo "---------------- configuring additional libs ----------------" >&6; }
-
{ $as_echo "$as_me:$LINENO: checking for PETSc library directory" >&5
$as_echo_n "checking for PETSc library directory... " >&6; }
+ { $as_echo "$as_me:$LINENO: checking for MUMPS library directory" >&5
+$as_echo_n "checking for MUMPS library directory... " >&6; }
+
+# Check whether --with-mumps was given.
+if test "${with_mumps+set}" = set; then
+ withval=$with_mumps; if test "x$withval" = "xno" ; then
+ { $as_echo "$as_me:$LINENO: result: explicitly disabled" >&5
+$as_echo "explicitly disabled" >&6; }
+ USE_CONTRIB_MUMPS=no
+ else
+ USE_CONTRIB_MUMPS=yes
+ DEAL_II_MUMPS_DIR="$withval"
+ { $as_echo "$as_me:$LINENO: result: $DEAL_II_MUMPS_DIR" >&5
+$as_echo "$DEAL_II_MUMPS_DIR" >&6; }
+ if test ! -d $DEAL_II_MUMPS_DIR \
+ -o ! -d $DEAL_II_MUMPS_DIR/include \
+ -o ! -d $DEAL_II_MUMPS_DIR/lib \
+ ; then
+ { { $as_echo "$as_me:$LINENO: error: Path to MUMPS specified with --with-mumps does not point to a complete MUMPS installation" >&5
+$as_echo "$as_me: error: Path to MUMPS specified with --with-mumps does not point to a complete MUMPS installation" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+ fi
+
+else
+ USE_CONTRIB_MUMPS=no
+
+fi
+
+
+ if test "$USE_CONTRIB_MUMPS" = "yes" ; then
+
+ { $as_echo "$as_me:$LINENO: checking for SCALAPACK library directory" >&5
+$as_echo_n "checking for SCALAPACK library directory... " >&6; }
+
+# Check whether --with-scalapack was given.
+if test "${with_scalapack+set}" = set; then
+ withval=$with_scalapack; DEAL_II_SCALAPACK_DIR="$withval"
+ { $as_echo "$as_me:$LINENO: result: $DEAL_II_SCALAPACK_DIR" >&5
+$as_echo "$DEAL_II_SCALAPACK_DIR" >&6; }
+ if test ! -d $DEAL_II_SCALAPCK_DIR ; then
+ { { $as_echo "$as_me:$LINENO: error: The path to SCALAPACK specified with --with-scalapack does t point to a complete SCALAPACK installation" >&5
+$as_echo "$as_me: error: The path to SCALAPACK specified with --with-scalapack does t point to a complete SCALAPACK installation" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+else
+ { { $as_echo "$as_me:$LINENO: error: If MUMPS is used, the path to SCALAPACK must be specified with --with-scalapack" >&5
+$as_echo "$as_me: error: If MUMPS is used, the path to SCALAPACK must be specified with --with-scalapack" >&2;}
+ { (exit 1); exit 1; }; }
+
+fi
+
+
+ { $as_echo "$as_me:$LINENO: checking for BLACS library directory" >&5
+$as_echo_n "checking for BLACS library directory... " >&6; }
+
+# Check whether --with-blacs was given.
+if test "${with_blacs+set}" = set; then
+ withval=$with_blacs; DEAL_II_BLACS_DIR="$withval"
+ { $as_echo "$as_me:$LINENO: result: $DEAL_II_BLACS_DIR" >&5
+$as_echo "$DEAL_II_BLACS_DIR" >&6; }
+ if test ! -d $DEAL_II_BLACS_DIR \
+ -o ! -d $DEAL_II_BLACS_DIR/LIB ; then
+ { { $as_echo "$as_me:$LINENO: error: The path to BLACS specified with --with-blacs does not point to a complete BLACS installation" >&5
+$as_echo "$as_me: error: The path to BLACS specified with --with-blacs does not point to a complete BLACS installation" >&2;}
+ { (exit 1); exit 1; }; }
+ fi
+
+else
+ { { $as_echo "$as_me:$LINENO: error: If MUMPS is used, the path to BLACS must be specified with --with-blacs" >&5
+$as_echo "$as_me: error: If MUMPS is used, the path to BLACS must be specified with --with-blacs" >&2;}
+ { (exit 1); exit 1; }; }
+
+fi
+
+
+ { $as_echo "$as_me:$LINENO: checking for BLACS library architecture" >&5
+$as_echo_n "checking for BLACS library architecture... " >&6; }
+ BLACS_COMM=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+ | grep "COMMLIB = " \
+ | perl -pi -e 's/.*LIB =\s+//g;'`
+ BLACS_PLAT=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+ | grep "PLAT = " \
+ | perl -pi -e 's/.*PLAT =\s+//g;'`
+ BLACS_DEBUG=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+ | grep "BLACSDBGLVL = " \
+ | perl -pi -e 's/.*DBGLVL =\s+//g;'`
+ DEAL_II_BLACS_ARCH="$BLACS_COMM-$BLACS_PLAT-$BLACS_DEBUG"
+ { $as_echo "$as_me:$LINENO: result: $DEAL_II_BLACS_ARCH" >&5
+$as_echo "$DEAL_II_BLACS_ARCH" >&6; }
+
+ fi
+
+ if test "$USE_CONTRIB_MUMPS" = "yes" ; then
+
+cat >>confdefs.h <<\_ACEOF
+#define DEAL_II_USE_MUMPS 1
+_ACEOF
+
+ DEAL_II_DEFINE_DEAL_II_USE_MUMPS=DEAL_II_USE_MUMPS
+ if test "x$with_mumps" = "x" ; then
+ with_mumps="yes"
+ fi
+ fi
+
+
+
+
+
+
if test "x$with_umfpack" != "x" -a "x$with_umfpack" != "xno" ; then
+
for ac_func in daxpy_ saxpy_ dgemv_ sgemv_ dgeev_ sgeev_ dgeevx_ sgeevx_
do
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
AC_SUBST(DEAL_II_MUMPS_DIR)
AC_SUBST(DEAL_II_SCALAPACK_DIR) dnl MUMPS dependency
AC_SUBST(DEAL_II_BLACS_DIR) dnl MUMPS dependency
-AC_SUBST(DEAL_II_BLACS_ARCH)
+AC_SUBST(DEAL_II_BLACS_ARCH)
AC_SUBST(DEAL_II_DEFINE_DEAL_II_USE_MUMPS)
dnl Make sure we configure for libraries used by other libraries. For
(major)*10000 + (minor)*100 + (subminor))
#include <base/numbers.h>
+#include <base/types.h>
/**
* If the compiler supports the upcoming C++1x standard, allow us to refer
* documentation of classes of deal.II. The glossary often only gives
* a microscopic view of a particular concept; if you struggle with
* the bigger picture, it may therefore also be worth to consult the
- * global overview of classes on the main/@ref index page.
+ * global overview of classes on the @ref index page.
*
* <dl>
*
* <dd>Mesh cells not refined any further in the hierarchy.</dd>
*
*
+ *
+ * <dt class="glossary">@anchor GlossArtificialCell <b>Artificial cells</b></dt>
+ * <dd>
+ * If a mesh is distributed across multiple MPI processes using the
+ * parallel::distributed::Triangulation class, each processor stores
+ * only the cells it owns, one layer of adjacent cells that are owned
+ * by other processors (called @ref GlossGhostCell "ghost cells"), all coarse level
+ * cells, and all cells that are necessary to maintain the invariant
+ * that adjacent cells must differ by at most one refinement
+ * level. The cells stored on each process that are not owned by this
+ * process and that are not ghost cells are called "artificial cells",
+ * and for these cells the predicate
+ * <code>cell-@>is_artificial()</code> returns true. Artificial cells
+ * are guaranteed to exist in the globally distributed mesh but they
+ * may be further refined on other processors. See the
+ * @ref distributed_paper "Distributed Computing paper" for more
+ * information.
+ *
+ * The concept of artificial cells has no meaning for triangulations
+ * that store the entire mesh on each processor, i.e. the
+ * dealii::Triangulation class. </dd>
+ *
+ *
* <dt class="glossary">@anchor GlossBlockLA <b>Block (linear algebra)</b></dt>
* <dd>It is often convenient to treat a matrix or vector as a collection of
*
* The way out of a situation like this is to use one of the two following
* ways:
+ *
* - You tell the object that you want to compress what operation is
- * intended. The TrilinosWrappers::VectorBase::compress() can take such an
- * additional argument. Or,
- * - You do a fake addition or set operation on the object in question.
+ * intended. The TrilinosWrappers::VectorBase::compress() can take
+ * such an additional argument.
+ * - You do a fake addition or set operation on the object in question. For
+ * example, you can add a zero to an element of the matrix or vector,
+ * which has no effect other than telling the object that the next
+ * compress operation should be in <code>Add</code> mode.
*
* Some of the objects are also indifferent and can figure out what to
* do without being told. The TrilinosWrappers::SparseMatrix can do that,
* flag.
*
*
+ * <dt class="glossary">@anchor distributed_paper
+ * <b>Distributed computing paper</b></dt>
+
+ * <dd>The "distributed computing paper" is a paper by W. Bangerth,
+ * C. Burstedde, T. Heister and M. Kronbichler titled "Algorithms and Data
+ * Structures for Massively Parallel Generic Finite Element Codes" that
+ * described the implementation of parallel distributed computing in deal.II,
+ * i.e. computations where not only the linear system is split onto different
+ * machines as in, for example, step-18, but also the Triangulation and
+ * DoFHandler objects. In essence, it is a guide to the parallel::distributed
+ * namespace.
+ *
+ * The paper is currently in preparation.
+ * </dd>
+ *
+ *
* <dt class="glossary">@anchor GlossFaceOrientation <b>Face orientation</b></dt>
* <dd>In a triangulation, the normal vector to a face
* can be deduced from the face orientation by
* </dd>
*
*
+ * <dt class="glossary">@anchor GlossGhostCell <b>Ghost cells</b></dt>
+ * <dd>
+ * If a mesh is distributed across multiple MPI processes using the
+ * parallel::distributed::Triangulation class, each processor stores
+ * only the cells it owns, one layer of adjacent cells that are owned
+ * by other processors, all coarse level cells, and all cells that are
+ * necessary to maintain the invariant that adjacent cells must differ
+ * by at most one refinement level. The cells stored on each process
+ * that are not owned by this process but that are adjacent to the
+ * ones owned by this process are called "ghost cells", and for these
+ * cells the predicate <code>cell-@>is_ghost()</code> returns
+ * true. Ghost cells are guaranteed to exist in the globally
+ * distributed mesh, i.e. these cells are actually owned by another
+ * process and are not further refined there. See the
+ * @ref distributed_paper "Distributed Computing paper" for more
+ * information.
+ *
+ * The concept of ghost cells has no meaning for triangulations that
+ * store the entire mesh on each processor, i.e. the
+ * dealii::Triangulation class. </dd>
+ *
+ *
* <dt class="glossary">@anchor hp_paper <b>%hp paper</b></dt>
* <dd>The "hp paper" is a paper by W. Bangerth and O. Kayser-Herold, titled
* "Data Structures and Requirements for hp Finite Element Software", that
* element shape functions are defined.</dd>
*
*
- * <dt class="glossary">@anchor GlossShape <b>Shape functions</b></dt> <dd>The restriction of
- * the finite element basis functions to a single grid cell.</dd>
+ * <dt class="glossary">@anchor GlossShape <b>Shape functions</b></dt>
+ * <dd>The restriction of the finite element basis functions to a single
+ * grid cell.</dd>
*
*
* <dt class="glossary">@anchor GlossSubdomainId <b>Subdomain id</b></dt>
* coincides with the rank of the MPI process within the MPI
* communicator). Partitioning is typically done using the
* GridTools::partition() function, but any other method can also be used to
- * do this though most other ideas will likely lead to less well balanced
+ * do this though most simple ideas will likely lead to less well balanced
* numbers of degrees of freedom on the various subdomains.
+ *
+ * On the other hand, for programs that are parallelized using MPI but
+ * where meshes are held distributed across several processors using
+ * the parallel::distributed::Triangulation and
+ * parallel::distributed::DoFHandler classes, the subdomain id of
+ * cells are tied to the processor that owns the cell. In other words,
+ * querying the subdomain id of a cell tells you if the cell is owned
+ * by the current processor (i.e. if <code>cell-@>subdomain_id() ==
+ * triangulation.parallel::distributed::Triangulation::locally_owned_subdomain()</code>)
+ * or by another processor. In the parallel distributed case,
+ * subdomain ids are only assigned to cells that the current processor
+ * owns as well as the immediately adjacent @ref GlossGhostCell "ghost cells".
+ * Cells further away are held on each processor to ensure
+ * that every MPI process has access to the full coarse grid as well
+ * as to ensure the invariant that neighboring cells differ by at most
+ * one refinement level. These cells are called "artificial" (see
+ * @ref GlossArtificialCell "here") and have the special subdomain id value
+ * types::artificial_subdomain_id.
* </dd>
*
*
//-------------------------------------------------------------------------
/**
- * @defgroup threads Parallel computing with multiple processors
+ * @defgroup threads Parallel computing with multiple processors accessing the shared memory
+ * @ingroup Parallel
*
* @brief A module discussing the use of parallelism on shared memory
* machines. See the detailed documentation and
--- /dev/null
+//-------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//-------------------------------------------------------------------------
+
+/**
+ * @defgroup Parallel Parallel computing
+ *
+ * @brief A module discussing the use of multiple processor.
+ *
+ * This module contains information on %parallel computing. It is
+ * subdivided into parts on @ref threads and on @ref distributed.
+ */
+
+
+/**
+ * A namespace in which we define classes and algorithms that deal
+ * with running in %parallel on shared memory machines when deal.II is
+ * configured to use multiple threads (see @ref threads), as well as
+ * running things in %parallel on %distributed memory machines (see @ref
+ * distributed).
+ *
+ * @ingroup threads
+ * @author Wolfgang Bangerth, 2008, 2009
+ */
+namespace parallel
+{
+}
<h3>base</h3>
<ol>
+ <li><p>New: The Timer class can now accumulate and average run times of
+ pieces of code across multiple MPI processes.
+ <br>
+ (Timo Heister 2010/06/07)
+ </p></li>
+
+ <li><p>New: The Utilities::System::compute_point_to_point_communication_pattern
+ function can be used to compute who wants to send messages to the
+ current processor in unstructured point-to-point MPI communications.
+ <br>
+ (WB 2010/06/07)
+ </p></li>
+
<li><p>New: The DataOutBase class (and all derived classes such as DataOut,
MatrixOut, etc) can now produce the XML-based version of the VTK file format
(the so-called VTU format). Furthermore, the
<h3>lac</h3>
<ol>
+ <li><p>New: The ConstraintMatrix class can now handle storing only
+ a subset of all constraints, for example only for degrees of
+ freedom that are relevant for the subdomain that is owned by one
+ process in an MPI universe.
+ <br>
+ (Timo Heister, Martin Kronbichler 2010/06/07)
+ </p></li>
+
+ <li><p>New: The PETScWrappers::MPI::Vector and TrilinosWrappers::MPI::Vector
+ classes can now handle ghost elements, i.e. elements that are not
+ owned by the current processor but are available for reading
+ anyway. The simplest form of ghosting would be to simply import
+ an entire vector to local memory, but the new function allow to
+ select the elements we need to support the case of computations
+ where importing all elements of even a single vector would
+ exceed available memory.
+ <br>
+ (Timo Heister 2010/06/07)
+ </p></li>
+
<li>
<p>
- New: A class SparseDirectMumps that provides an interface to
- the MUltifrontal Massively Parallel sparse direct Solver (MUMPS).
+ New: A class SparseDirectMumps that provides an interface to
+ the MUltifrontal Massively Parallel sparse direct Solver (MUMPS).
</p>
<br>
(Markus Buerg 2010/05/10)
want to use Trilinos with MPI on parallel machines, you also need to
flip the value of the <code>TPL_ENABLE_MPI</code> flag above.
</p>
+ <p>
+ Note: if the deal.II ./configure reports an error related to
+ HAVE_INTTYPES_H, edit <trilinos>/include/ml_config.h and comment out the
+ line
+ <code>
+ <pre>
+#define HAVE_INTTYPES_H
+ </pre>
+ </code>
+ </p>
<h3>Configuring for installed Trilinos packages</h3>
* indicates whether the template
* argument to this class is a block
* matrix (in fact whether the type is
- * derived from BlockMatrix<T>).
+ * derived from BlockMatrixBase<T>).
*/
static const bool value = (sizeof(check_for_block_matrix
((MatrixType*)0))
*@{
*/
+template <typename> class BlockVectorBase;
+
+
+/**
+ * A class that can be used to determine whether a given type is a block
+ * vector type or not. For example,
+ * @code
+ * IsBlockVector<Vector<double> >::value
+ * @endcode
+ * has the value false, whereas
+ * @code
+ * IsBlockVector<BlockVector<double> >::value
+ * @endcode
+ * is true. This is sometimes useful in template contexts where we may
+ * want to do things differently depending on whether a template type
+ * denotes a regular or a block vector type.
+ *
+ * @author Wolfgang Bangerth, 2010
+ */
+template <typename VectorType>
+struct IsBlockVector
+{
+ private:
+ struct yes_type { char c[1]; };
+ struct no_type { char c[2]; };
+
+ /**
+ * Overload returning true if the class
+ * is derived from BlockVectorBase,
+ * which is what block vectors do.
+ */
+ template <typename T>
+ static yes_type check_for_block_vector (const BlockVectorBase<T> *);
+
+ /**
+ * Catch all for all other potential
+ * vector types that are not block
+ * matrices.
+ */
+ static no_type check_for_block_vector (...);
+
+ public:
+ /**
+ * A statically computable value that
+ * indicates whether the template
+ * argument to this class is a block
+ * vector (in fact whether the type is
+ * derived from BlockVectorBase<T>).
+ */
+ static const bool value = (sizeof(check_for_block_vector
+ ((VectorType*)0))
+ ==
+ sizeof(yes_type));
+};
+
+
+// instantiation of the static member
+template <typename VectorType>
+const bool IsBlockVector<VectorType>::value;
+
+
+
+
namespace internal
{
ConstraintMatrix (const ConstraintMatrix &constraint_matrix);
/**
- * Reinit the ConstraintMatrix
- * object. This function is only relevant
- * in the distributed case, to supply a
- * different IndexSet. Otherwise this
- * routine is equivalent to calling
- * clear().
+ * Reinit the ConstraintMatrix object and
+ * supply an IndexSet with lines that may
+ * be constrained. This function is only
+ * relevant in the distributed case, to
+ * supply a different IndexSet. Otherwise
+ * this routine is equivalent to calling
+ * clear(). Normally an IndexSet with all
+ * locally_active_dofs should be supplied
+ * here.
*/
void reinit (const IndexSet & local_constraints = IndexSet());
+ /**
+ * Determines if we can store a
+ * constraint for the given @p
+ * line_index. This routine only matters
+ * in the distributed case and checks if
+ * the IndexSet allows storage of this
+ * line. Always returns true if not in
+ * the distributed case.
+ */
+ bool can_store_line(unsigned int line_index) const;
/**
* This function copies the content of @p
lines[lines_cache[calculate_line_index(*local_indices_begin)]];
for (unsigned int j=0; j<position.entries.size(); ++j)
{
- Assert (is_constrained(position.entries[j].first) == false,
+ Assert (!(!local_lines.size()
+ || local_lines.is_element(position.entries[j].first))
+ || is_constrained(position.entries[j].first) == false,
ExcMessage ("Tried to distribute to a fixed dof."));
global_vector(position.entries[j].first)
+= *local_vector_begin * position.entries[j].second;
return local_lines.index_within_set(line);
}
+inline bool
+ConstraintMatrix::can_store_line(unsigned int line_index) const
+{
+ return !local_lines.size() || local_lines.is_element(line_index);
+}
+
+
DEAL_II_NAMESPACE_CLOSE
lines[lines_cache[calculate_line_index(local_dof_indices[j])]];
for (unsigned int q=0; q<position_j.entries.size(); ++q)
{
- Assert (is_constrained(position_j.entries[q].first) == false,
+ Assert (!(!local_lines.size()
+ || local_lines.is_element(position_j.entries[q].first))
+ || is_constrained(position_j.entries[q].first) == false,
ExcMessage ("Tried to distribute to a fixed dof."));
global_vector(position_j.entries[q].first)
-= val * position_j.entries[q].second * matrix_entry;
// the entries of fixed dofs
for (unsigned int j=0; j<position->entries.size(); ++j)
{
- Assert (is_constrained(position->entries[j].first) == false,
+ Assert (!(!local_lines.size()
+ || local_lines.is_element(position->entries[j].first))
+ || is_constrained(position->entries[j].first) == false,
ExcMessage ("Tried to distribute to a fixed dof."));
global_vector(position->entries[j].first)
+= local_vector(i) * position->entries[j].second;
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2005, 2006, 2007, 2009 by the deal.II authors
+// Copyright (C) 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
# include <lac/exceptions.h>
# include <lac/vector.h>
# include <lac/petsc_vector_base.h>
+# include <base/index_set.h>
DEAL_II_NAMESPACE_OPEN
// forward declaration
template <typename> class Vector;
+class IndexSet;
/*! @addtogroup PETScWrappers
const unsigned int n,
const unsigned int local_size);
+
/**
* Copy-constructor from deal.II
* vectors. Sets the dimension to that
const dealii::Vector<Number> &v,
const unsigned int local_size);
+
/**
* Copy-constructor the
* values from a PETSc wrapper vector
const VectorBase &v,
const unsigned int local_size);
+
+ /**
+ * Constructs a new parallel PETSc
+ * vector from an Indexset. Note that
+ * @p local must be contiguous and
+ * the global size of the vector is
+ * determined by local.size(). The
+ * global indices in @p ghost are
+ * sluppied as ghost indices that can
+ * also be read locally. Note that
+ * the @p ghost IndexSet may be empty
+ * and that any indices already
+ * contained in @p local are ignored
+ * during construction. That way you
+ * can construct with
+ * locally_relevent_dofs() for
+ * example.
+ */
+ explicit Vector (const MPI_Comm &communicator,
+ const IndexSet & local,
+ const IndexSet & ghost = IndexSet(0));
+
+
/**
* Copy the given vector. Resize the
* present vector if necessary. Also
*/
Vector & operator = (const Vector &v);
+
/**
* Copy the given sequential
* (non-distributed) vector
void reinit (const Vector &v,
const bool fast = false);
+ /**
+ * Reinit as a ghosted vector. See
+ * constructor with same signature
+ * for more details.
+ */
+ void reinit (const MPI_Comm &communicator,
+ const IndexSet & local,
+ const IndexSet & ghost = IndexSet(0));
+
+
/**
* Return a reference to the MPI
* communicator object in use with
virtual void create_vector (const unsigned int n,
const unsigned int local_size);
+
+
+ /**
+ * Create a vector of global length
+ * @p n, local size @p local_size and
+ * with the specified ghost
+ * indices. Note that you need to
+ * call update_ghost_values() before
+ * accessing those.
+ */
+ virtual void create_vector (const unsigned int n,
+ const unsigned int local_size,
+ const IndexSet & ghostnodes);
+
+
private:
/**
* Copy of the communicator object to
# include <utility>
# include <petscvec.h>
+# include <base/index_set.h>
DEAL_II_NAMESPACE_OPEN
void ratio (const VectorBase &a,
const VectorBase &b);
+ /**
+ * Updates the ghost values of this
+ * vector. This is necessary after any
+ * modification before reading ghost
+ * values.
+ */
+ void update_ghost_values() const;
+
/**
* Print to a
* stream. @p precision denotes
*/
Vec vector;
+ /**
+ * Denotes if this vector has ghost
+ * indices associated with it. This
+ * means that at least one of the
+ * processes in a parallel programm has
+ * at least one ghost index.
+ */
+ bool ghosted;
+
+ /**
+ * This vector contains the global
+ * indices of the ghost values. The
+ * location in this vector denotes the
+ * local numbering, which is used in
+ * PETSc.
+ */
+ IndexSet ghost_indices;
/**
* PETSc doesn't allow to mix additions
const unsigned int *indices,
const PetscScalar *values,
const bool add_values);
+
+
};
// $Id$
// Version: $Name$
//
-// Copyright (C) 2008, 2009 by the deal.II authors
+// Copyright (C) 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
#include <vector>
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#include <mpi.h>
+#include <base/index_set.h>
+#endif
+
DEAL_II_NAMESPACE_OPEN
class SparsityPattern;
std::vector<unsigned int> &new_indices,
const std::vector<unsigned int> &starting_indices = std::vector<unsigned int>());
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ /**
+ * Communciate rows in a compressed
+ * sparsity pattern over MPI. The @param
+ * csp is modified inline. All entries in
+ * rows that belong to a different
+ * processor are send to them and added
+ * there. The ownership is determined by
+ * parameter @param rows_per_cpu. The
+ * IndexSet @param myrange should be the
+ * one used in the constructor of the
+ * CompressedSimpleSparsityPattern. All
+ * rows contained in @param myrange are
+ * checked in @param csp. This function
+ * needs to be used with
+ * PETScWrappers::MPI::SparseMatrix for it
+ * to work correctly.
+ */
+ template <class CSP_t>
+ void distribute_sparsity_pattern(CSP_t & csp,
+ const std::vector<unsigned int> & rows_per_cpu,
+ const MPI_Comm & mpi_comm,
+ const IndexSet & myrange);
+#endif
+
+
/**
* Exception
*/
void
ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
{
+ //TODO: not implemented yet, we need to fix
+ //LocalRange() first to only include
+ //"owned" indices. For this we need to keep
+ //track of the owned indices, because
+ //Trilinos doesn't. Use same constructor
+ //interface as in PETSc with two IndexSets!
+ AssertThrow (vec.vector_partitioner().IsOneToOne(),
+ ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning."));
+
+
typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
ConstraintLine index_comparison;
index_comparison.line = vec.local_range().first;
it->entries[i].second);
vec(it->line) = new_value;
}
+
+ vec.compress ();
}
vec(it->line) = new_value;
}
}
+
+ vec.compress ();
+}
+
+#endif
+
+#ifdef DEAL_II_USE_PETSC
+
+ // this is a specialization for a
+ // parallel (non-block) PETSc
+ // vector. The basic idea is to just work
+ // on the local range of the vector. But
+ // we need access to values that the
+ // local nodes are constrained to.
+
+template<>
+void
+ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const
+{
+ typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+ ConstraintLine index_comparison;
+ index_comparison.line = vec.local_range().first;
+ const constraint_iterator begin_my_constraints =
+ std::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+ index_comparison.line = vec.local_range().second;
+ const constraint_iterator end_my_constraints
+ = std::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+ // all indices we need to read from
+ IndexSet my_indices (vec.size());
+
+ const std::pair<unsigned int, unsigned int>
+ local_range = vec.local_range();
+
+ my_indices.add_range (local_range.first, local_range.second);
+
+ std::set<unsigned int> individual_indices;
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ if ((it->entries[i].first < local_range.first)
+ ||
+ (it->entries[i].first >= local_range.second))
+ individual_indices.insert (it->entries[i].first);
+
+ my_indices.add_indices (individual_indices.begin(),
+ individual_indices.end());
+
+ IndexSet local_range_is (vec.size());
+ local_range_is.add_range(local_range.first, local_range.second);
+
+
+ // create a vector and import those indices
+ PETScWrappers::MPI::Vector ghost_vec(vec.get_mpi_communicator(),
+ local_range_is,
+ my_indices);
+ ghost_vec = vec;
+ ghost_vec.update_ghost_values();
+
+ // finally do the distribution on own
+ // constraints
+ for (constraint_iterator it = begin_my_constraints;
+ it != end_my_constraints; ++it)
+ {
+ // fill entry in line
+ // next_constraint.line by adding the
+ // different contributions
+ double new_value = it->inhomogeneity;
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ new_value += (ghost_vec(it->entries[i].first) *
+ it->entries[i].second);
+ vec(it->line) = new_value;
+ }
+
+ // force every processor to write something
+ vec(local_range.first) = vec(local_range.first);
+
+ vec.compress ();
+}
+
+
+template<>
+void
+ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const
+{
+ AssertThrow (false, ExcNotImplemented());
}
#endif
+
unsigned int ConstraintMatrix::n_constraints () const
{
return lines.size();
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2005, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 2004, 2005, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
}
const unsigned int
local_row_end = local_row_start + local_rows_per_process[this_process];
+
+#if DEAL_II_PETSC_VERSION_LT(2,3,3)
+ //old version to create the matrix, we
+ //can skip calculating the row length
+ //at least starting from 2.3.3 (tested,
+ //see below)
+
const unsigned int
local_col_end = local_col_start + local_columns_per_process[this_process];
&matrix);
AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else //PETSC_VERSION>=2.3.3
+ // new version to create the matrix. We
+ // do not set row length but set the
+ // correct SparsityPattern later.
+ int ierr;
+
+ ierr = MatCreate(communicator,&matrix);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = MatSetSizes(matrix,
+ local_rows_per_process[this_process],
+ local_columns_per_process[this_process],
+ sparsity_pattern.n_rows(),
+ sparsity_pattern.n_cols());
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = MatSetType(matrix,MATMPIAIJ);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+
+
// next preset the exact given matrix
// entries with zeros, if the user
// requested so. this doesn't avoid any
// now copy over the information
// from the sparsity pattern.
{
- unsigned int index=0;
+#ifdef PETSC_USE_64BIT_INDICES
+ PetscInt
+#else
+ int
+#endif
+ * ptr = & colnums_in_window[0];
+
for (unsigned int i=local_row_start; i<local_row_end; ++i)
- for (unsigned int j=0; j<sparsity_pattern.row_length(i);
- ++j, ++index)
- colnums_in_window[index] = sparsity_pattern.column_number(i,j);
- Assert (index == colnums_in_window.size()-1, ExcInternalError());
+ {
+ typename SparsityType::row_iterator
+ row_start = sparsity_pattern.row_begin(i),
+ row_end = sparsity_pattern.row_end(i);
+
+ std::copy(row_start, row_end, ptr);
+ ptr += row_end - row_start;
}
+ }
+
// then call the petsc function
// that summarily allocates these
&colnums_in_window[0],
0);
+#if DEAL_II_PETSC_VERSION_LT(2,3,3)
+ // this is only needed for old
+ // PETSc versions:
+
// for some reason, it does not
// seem to be possible to force
// actual allocation of actual
// set the dummy entries set above
// back to zero
*this = 0;
+#endif // version <=2.3.3
compress ();
#endif
- // Now we won't insert any
- // further entries, so PETSc can
- // internally optimize some data
- // structures.
+ // Tell PETSc that we are not
+ // planning on adding new entries
+ // to the matrix. Generate errors
+ // in debugmode.
#if DEAL_II_PETSC_VERSION_LT(3,0,0)
- const int ierr =
- MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
+ int ierr;
+#ifdef DEBUG
+ ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
#else
- const int ierr =
- MatSetOption (matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
+ ierr = MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
#endif
-
+#else
+ int ierr;
+#ifdef DEBUG
+ ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else
+ ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+#endif
}
}
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2006, 2008, 2009 by the deal.II authors
+// Copyright (C) 2004, 2006, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
+ Vector::Vector (const MPI_Comm &communicator,
+ const IndexSet & local,
+ const IndexSet & ghost)
+ :
+ communicator (communicator)
+ {
+ Assert(local.is_contiguous(), ExcNotImplemented());
+
+ IndexSet ghost_set = ghost;
+ ghost_set.subtract_set(local);
+
+ //possible optmization: figure out if
+ //there are ghost indices (collective
+ //operation!) and then create a
+ //non-ghosted vector.
+// Vector::create_vector (local.size(), local.n_elements());
+
+ Vector::create_vector(local.size(), local.n_elements(), ghost_set);
+ }
+
+
+
void
Vector::reinit (const MPI_Comm &comm,
const unsigned int n,
+ void
+ Vector::reinit (const MPI_Comm &comm,
+ const IndexSet & local,
+ const IndexSet & ghost)
+ {
+ communicator = comm;
+
+ Assert(local.is_contiguous(), ExcNotImplemented());
+
+ IndexSet ghost_set = ghost;
+ ghost_set.subtract_set(local);
+
+ create_vector(local.size(), local.n_elements(), ghost_set);
+ }
+
+
+
Vector &
Vector::operator = (const PETScWrappers::Vector &v)
{
ExcDimensionMismatch (size(), n));
}
+
+
+ void
+ Vector::create_vector (const unsigned int n,
+ const unsigned int local_size,
+ const IndexSet & ghostnodes)
+ {
+ Assert (local_size <= n, ExcIndexRange (local_size, 0, n));
+ ghosted = true;
+ ghost_indices = ghostnodes;
+
+ //64bit indices won't work yet:
+ Assert (sizeof(unsigned int)==sizeof(PetscInt), ExcInternalError());
+
+
+ std::vector<unsigned int> ghostindices;
+ ghostnodes.fill_index_vector(ghostindices);
+
+ const PetscInt * ptr= (const PetscInt*)(&(ghostindices[0]));
+
+ int ierr
+ = VecCreateGhost(communicator,
+ local_size,
+ PETSC_DETERMINE,
+ ghostindices.size(),
+ ptr,
+ &vector);
+
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ Assert (size() == n,
+ ExcDimensionMismatch (size(), n));
+
+#if DEBUG
+ // test ghost allocation in debug mode
+
+#ifdef PETSC_USE_64BIT_INDICES
+ PetscInt
+#else
+ int
+#endif
+ begin, end;
+
+ ierr = VecGetOwnershipRange (vector, &begin, &end);
+
+ Assert(local_size==(unsigned int)(end-begin), ExcInternalError());
+
+ Vec l;
+ ierr = VecGhostGetLocalForm(vector, &l);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscInt lsize;
+ ierr = VecGetSize(l, &lsize);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = VecGhostRestoreLocalForm(vector, &l);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ Assert( lsize==end-begin+(PetscInt)ghost_indices.n_elements() ,ExcInternalError());
+
+#endif
+
+
}
+
+
+}
+
}
DEAL_II_NAMESPACE_CLOSE
// $Id$
// Version: $Name$
//
-// Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+// Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
Assert (index < vector.size(),
ExcIndexRange (index, 0, vector.size()));
- // this is clumsy: there is no simple
- // way in PETSc to read an element from
- // a vector, i.e. there is no function
- // VecGetValue or so. The only way is
- // to obtain a pointer to a contiguous
+ // old versions of PETSc appear to be
+ // missing the function VecGetValues(),
+ // so the workaround consists of
+ // obtaining a pointer to a contiguous
// representation of the vector and
- // read from it. Subsequently, the
- // vector representation has to be
- // restored. In addition, we can only
- // get access to the local part of the
- // vector, so we have to guard against
- // that
+ // read from it. In addition, we can
+ // only get access to the local part of
+ // the vector, so we have to guard
+ // against that
if (dynamic_cast<const PETScWrappers::Vector *>(&vector) != 0)
{
+ #if (PETSC_VERSION_MAJOR <= 2) && (PETSC_VERSION_MINOR < 3)
PetscScalar *ptr;
int ierr
- = VecGetArray (static_cast<const Vec &>(vector), &ptr);
+ = VecGetArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
const PetscScalar value = *(ptr+index);
- ierr = VecRestoreArray (static_cast<const Vec &>(vector), &ptr);
+ ierr = VecRestoreArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
-
return value;
+#else
+ PetscInt idx = index;
+ PetscScalar value;
+ int ierr = VecGetValues(vector.vector, 1, &idx, &value);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+ return value;
+#endif
}
else if (dynamic_cast<const PETScWrappers::MPI::Vector *>(&vector) != 0)
{
+ int ierr;
+
+ if (vector.ghosted)
+ {
+#ifdef PETSC_USE_64BIT_INDICES
+ PetscInt
+#else
+ int
+#endif
+ begin, end;
+ ierr = VecGetOwnershipRange (vector.vector, &begin, &end);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ Vec l;
+ ierr = VecGhostGetLocalForm(vector.vector, &l);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscInt lsize;
+ ierr = VecGetSize(l, &lsize);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscScalar *ptr;
+ ierr = VecGetArray(l, &ptr);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ PetscScalar value;
+
+ if ( index>=static_cast<unsigned int>(begin)
+ && index<static_cast<unsigned int>(end) )
+ {
+ //local entry
+ value=*(ptr+index-begin);
+ }
+ else
+ {
+ //ghost entry
+ unsigned ghostidx
+ = vector.ghost_indices.index_within_set(index);
+
+ Assert(ghostidx+end-begin<(unsigned int)lsize, ExcInternalError());
+ value=*(ptr+ghostidx+end-begin);
+
+
+ }
+
+
+ ierr = VecRestoreArray(l, &ptr);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ ierr = VecGhostRestoreLocalForm(vector.vector, &l);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ return value;
+ }
+
+
// first verify that the requested
// element is actually locally
// available
- int ierr;
+
#ifdef PETSC_USE_64BIT_INDICES
PetscInt
#else
int
#endif
begin, end;
- ierr = VecGetOwnershipRange (static_cast<const Vec &>(vector),
- &begin, &end);
+ ierr = VecGetOwnershipRange (vector.vector, &begin, &end);
AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+
AssertThrow ((index >= static_cast<unsigned int>(begin)) &&
(index < static_cast<unsigned int>(end)),
ExcAccessToNonlocalElement (index, begin, end-1));
+ // old version which only work with
+ // VecGetArray()...
+#if (PETSC_VERSION_MAJOR <= 2) && (PETSC_VERSION_MINOR < 3)
+
// then access it
PetscScalar *ptr;
- ierr = VecGetArray (static_cast<const Vec &>(vector), &ptr);
+ ierr = VecGetArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
const PetscScalar value = *(ptr+index-begin);
- ierr = VecRestoreArray (static_cast<const Vec &>(vector), &ptr);
+ ierr = VecRestoreArray (vector.vector, &ptr);
AssertThrow (ierr == 0, ExcPETScError(ierr));
return value;
+
+#else
+ //new version with VecGetValues()
+ PetscInt idx = index;
+ PetscScalar value;
+ ierr = VecGetValues(vector.vector, 1, &idx, &value);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ return value;
+#endif
+
}
else
// what? what other kind of vector
VectorBase::VectorBase ()
:
+ ghosted(false),
last_action (LastAction::none)
{}
VectorBase::VectorBase (const VectorBase &v)
:
Subscriptor (),
+ ghosted(v.ghosted),
+ ghost_indices(v.ghost_indices),
last_action (LastAction::none)
{
int ierr = VecDuplicate (v.vector, &vector);
last_action = LastAction::insert;
}
+
+ void
+ VectorBase::update_ghost_values() const
+ {
+ // generate an error for not ghosted
+ // vectors
+ if (!ghosted)
+ throw ExcInternalError();
+
+ int ierr;
+
+ ierr = VecGhostUpdateBegin(vector, INSERT_VALUES, SCATTER_FORWARD);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+ ierr = VecGhostUpdateEnd(vector, INSERT_VALUES, SCATTER_FORWARD);
+ AssertThrow (ierr == 0, ExcPETScError(ierr));
+}
+
+
+
}
DEAL_II_NAMESPACE_CLOSE
#include <algorithm>
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#include <base/utilities.h>
+#include <lac/compressed_sparsity_pattern.h>
+#include <lac/compressed_set_sparsity_pattern.h>
+#include <lac/compressed_simple_sparsity_pattern.h>
+#endif
+
#ifdef DEAL_II_USE_METIS
// This is sorta stupid. what we really would like to do here is this:
// extern "C" {
(next_free_number == sparsity.n_rows()),
ExcInternalError());
}
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ template <class CSP_t>
+ void distribute_sparsity_pattern(CSP_t & csp,
+ const std::vector<unsigned int> & rows_per_cpu,
+ const MPI_Comm & mpi_comm,
+ const IndexSet & myrange)
+ {
+ unsigned int myid = Utilities::System::get_this_mpi_process(mpi_comm);
+ std::vector<unsigned int> start_index(rows_per_cpu.size()+1);
+ start_index[0]=0;
+ for (unsigned int i=0;i<rows_per_cpu.size();++i)
+ start_index[i+1]=start_index[i]+rows_per_cpu[i];
+
+ typedef std::map<unsigned int, std::vector<unsigned int> > map_vec_t;
+
+ map_vec_t send_data;
+
+ {
+ unsigned int dest_cpu=0;
+
+ unsigned int n_local_rel_rows = myrange.n_elements();
+ for (unsigned int row_idx=0;row_idx<n_local_rel_rows;++row_idx)
+ {
+ unsigned int row=myrange.nth_index_in_set(row_idx);
+
+ //calculate destination CPU
+ while (row>=start_index[dest_cpu+1])
+ ++dest_cpu;
+
+ //skip myself
+ if (dest_cpu==myid)
+ {
+ row_idx+=rows_per_cpu[myid]-1;
+ continue;
}
+ unsigned int rlen = csp.row_length(row);
+
+ //skip empty lines
+ if (!rlen)
+ continue;
+
+ //save entries
+ std::vector<unsigned int> & dst = send_data[dest_cpu];
+
+ dst.push_back(rlen); // number of entries
+ dst.push_back(row); // row index
+ for (unsigned int c=0; c<rlen; ++c)
+ {
+ //columns
+ unsigned int column = csp.column_number(row, c);
+ dst.push_back(column);
+ }
+ }
+
+ }
+
+ unsigned int num_receive=0;
+ {
+ std::vector<unsigned int> send_to;
+ send_to.reserve(send_data.size());
+ for (map_vec_t::iterator it=send_data.begin();it!=send_data.end();++it)
+ send_to.push_back(it->first);
+
+ num_receive =
+ Utilities::System::
+ compute_point_to_point_communication_pattern(mpi_comm, send_to).size();
+ }
+
+ std::vector<MPI_Request> requests(send_data.size());
+
+
+ // send data
+ {
+ unsigned int idx=0;
+ for (map_vec_t::iterator it=send_data.begin();it!=send_data.end();++it, ++idx)
+ MPI_Isend(&(it->second[0]),
+ it->second.size(),
+ MPI_INT,
+ it->first,
+ 124,
+ mpi_comm,
+ &requests[idx]);
+ }
+
+ {
+ //receive
+ std::vector<unsigned int> recv_buf;
+ for (unsigned int index=0;index<num_receive;++index)
+ {
+ MPI_Status status;
+ int len;
+ MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+ Assert (status.MPI_TAG==124, ExcInternalError());
+
+ MPI_Get_count(&status, MPI_BYTE, &len);
+ Assert( len%sizeof(unsigned int)==0, ExcInternalError());
+
+ recv_buf.resize(len/sizeof(unsigned int));
+
+ MPI_Recv(&recv_buf[0], len, MPI_BYTE, status.MPI_SOURCE,
+ status.MPI_TAG, mpi_comm, &status);
+
+ unsigned int *ptr=&recv_buf[0];
+ unsigned int *end=&*(--recv_buf.end());
+ while (ptr<end)
+ {
+ unsigned int num=*(ptr++);
+ unsigned int row=*(ptr++);
+ for (unsigned int c=0;c<num;++c)
+ {
+ csp.add(row, *ptr);
+ ptr++;
+ }
+ }
+ Assert(ptr-1==end, ExcInternalError());
+
+ }
+ }
+
+ // complete all sends, so that we can
+ // safely destroy the buffers.
+ MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+
+ }
+#endif
+}
+
+
+//explicit instantiations
+
+#define SPARSITY_FUNCTIONS(SparsityType) \
+template void SparsityTools::distribute_sparsity_pattern<SparsityType> (SparsityType & csp, \
+const std::vector<unsigned int> & rows_per_cpu,\
+const MPI_Comm & mpi_comm,\
+const IndexSet & myrange)
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+SPARSITY_FUNCTIONS(CompressedSparsityPattern);
+SPARSITY_FUNCTIONS(CompressedSimpleSparsityPattern);
+#endif
+
+#undef SPARSITY_FUNCTIONS
+
DEAL_II_NAMESPACE_CLOSE