]> https://gitweb.dealii.org/ - dealii-svn.git/commitdiff
Take over many of the changes in base/ and lac/ from branch_distributed_grids.
authorbangerth <bangerth@0785d39b-7218-0410-832d-ea1e28bc413d>
Mon, 7 Jun 2010 20:47:13 +0000 (20:47 +0000)
committerbangerth <bangerth@0785d39b-7218-0410-832d-ea1e28bc413d>
Mon, 7 Jun 2010 20:47:13 +0000 (20:47 +0000)
git-svn-id: https://svn.dealii.org/trunk@21162 0785d39b-7218-0410-832d-ea1e28bc413d

27 files changed:
deal.II/base/include/base/config.h.in
deal.II/base/include/base/index_set.h
deal.II/base/include/base/parallel.h
deal.II/base/include/base/timer.h
deal.II/base/include/base/utilities.h
deal.II/base/source/index_set.cc
deal.II/base/source/timer.cc
deal.II/base/source/utilities.cc
deal.II/configure
deal.II/configure.in
deal.II/doc/doxygen/headers/glossary.h
deal.II/doc/doxygen/headers/multithreading.h
deal.II/doc/doxygen/headers/parallel.h [new file with mode: 0644]
deal.II/doc/news/changes.h
deal.II/doc/readme-petsc-trilinos.html
deal.II/lac/include/lac/block_matrix_base.h
deal.II/lac/include/lac/block_vector_base.h
deal.II/lac/include/lac/constraint_matrix.h
deal.II/lac/include/lac/constraint_matrix.templates.h
deal.II/lac/include/lac/petsc_parallel_vector.h
deal.II/lac/include/lac/petsc_vector_base.h
deal.II/lac/include/lac/sparsity_tools.h
deal.II/lac/source/constraint_matrix.cc
deal.II/lac/source/petsc_parallel_sparse_matrix.cc
deal.II/lac/source/petsc_parallel_vector.cc
deal.II/lac/source/petsc_vector_base.cc
deal.II/lac/source/sparsity_tools.cc

index c5bb06ecc9b55f2ada78358aee637816c38b7443..52a470e0b3a1f7012cfad9a4bec14e9524956d64 100644 (file)
    functionality is available. */
 #undef DEAL_II_USE_MT_POSIX_NO_BARRIERS
 
+/* Defined if a MUMPS installation was found and is going to be used */
+#undef DEAL_II_USE_MUMPS
+
 /* Defined if a PETSc installation was found and is going to be used */
 #undef DEAL_II_USE_PETSC
 
 /* Defined if a Trilinos installation was found and is going to be used */
 #undef DEAL_II_USE_TRILINOS
 
-/* Defined if a MUMPS installation was found and is going to be used */
-#undef DEAL_II_USE_MUMPS
-
 /* Define if vector iterators are just plain pointers */
 #undef DEAL_II_VECTOR_ITERATOR_IS_POINTER
 
  * compatibility; and (ii) DEAL_II_PETSC_VERSION_GTE is used to add
  * functionality to the PETScWrappers that does not exist in previous
  * versions of PETSc.  Examples of usage can be found in
- * lac/source/petsc_matrix_base.h.  
- *
- * Note: SLEPcWrappers do not need their own anological macros, since 
- * SLEPc and PETSc must have identical version numbers anyways.
+ * lac/source/petsc_matrix_base.h.  Note: SLEPcWrappers do not need
+ * their own anological macros, since SLEPc and PETSc must have
+ * identical version numbers anyways.
  */
 #define DEAL_II_PETSC_VERSION_LT(major,minor,subminor) \
   ((PETSC_VERSION_MAJOR * 10000 + \
     (major)*10000 + (minor)*100 + (subminor))
 
 #include <base/numbers.h>
+#include <base/types.h>
 
 /**
  * If the compiler supports the upcoming C++1x standard, allow us to refer
index c5dad3458b1ce2b71f25b2d1896b5a70e730d501..19311fefb19d5657497215361ddfcfbe7a70a16a 100644 (file)
@@ -224,6 +224,24 @@ class IndexSet
     IndexSet get_view (const unsigned int begin,
                       const unsigned int end) const;
 
+
+                                    /**
+                                     * Removes all elements contained in @p
+                                     * other from this set. In other words,
+                                     * if $x$ is the current object and $o$
+                                     * the argument, then we compute $x
+                                     * \leftarrow x \backslash o$.
+                                     */
+    void subtract_set (const IndexSet & other);
+
+
+                                    /**
+                                     * Fills the given vector with all
+                                     * indices contained in this IndexSet.
+                                     */
+    void fill_index_vector(std::vector<unsigned int> & indices) const;
+
+
                                     /**
                                      * Outputs a text representation of this
                                      * IndexSet to the given stream. Used for
index 3c0aa8dc0b237561d383a219c770a6d383cae4d1..a2daa4ba4345dbc445306f2eacc297a3d5864ae0 100644 (file)
 
 DEAL_II_NAMESPACE_OPEN
 
-/**
- * A namespace in which we define a few algorithms that can run in parallel
- * when deal.II is configured to use multiple threads.
- *
- * @ingroup threads
- * @author Wolfgang Bangerth, 2008, 2009
- */
 namespace parallel
 {
   namespace internal
index 43462a7ee7418e8fb56b3ce07dfdd0a6300b886e..26775ee1a2e664fe6980a381156911dfe51b5c9f 100644 (file)
@@ -2,7 +2,7 @@
 //    $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009 by the deal.II authors
+//    Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -19,6 +19,7 @@
 
 #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
 #include <mpi.h>
+#include <base/utilities.h>
 #endif
 
 #include <string>
@@ -89,11 +90,89 @@ class Timer
                                      *
                                      * Starts the timer at 0 sec.
                                      *
+                                     * If @p sync_wall_time is true, the wall
+                                     * time is synchronized between all CPUs
+                                     * using a MPI_Barrier() and a collective
+                                     * operation. Note that this only works
+                                     * if you stop() the timer before
+                                     * querying for the wall time. The time
+                                     * for the MPI operations are not
+                                     * included in the timing but may slow
+                                     * down your program.
+                                     *
                                      * This constructor is only available
                                      * if the deal.II compiler is an MPI
                                      * compiler.
                                      */
-    Timer (MPI_Comm mpi_communicator);
+    Timer (MPI_Comm mpi_communicator,
+          bool sync_wall_time = false);
+
+
+                                    /**
+                                     * Structure to save collective data
+                                     * measured by this timer class. Queried
+                                     * by get_data() or printed with
+                                     * print_data() after calling stop().
+                                     */
+    struct TimeMinMaxAvg
+    {
+       double sum;
+       double min;
+       double max;
+       unsigned int min_index;
+       unsigned int max_index;
+       double avg;
+
+                                        /**
+                                         * Set the time values to @p
+                                         * val, and the MPI rank to
+                                         * the given @p rank.
+                                         */
+       void set(const double val,
+                const unsigned int rank)
+         {
+           sum = min = max = val;
+           min_index = max_index = rank;
+         }
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+                                        /**
+                                         * Given two structures
+                                         * indicating timer
+                                         * information, do the
+                                         * reduction by choosing the
+                                         * one with the longer time
+                                         * interval. This is a max
+                                         * operation and therefore a
+                                         * reduction.
+                                         *
+                                         * Arguments are passed as
+                                         * void pointers to satisfy
+                                         * the MPI requirement of
+                                         * reduction operations.
+                                         */
+       static void max_reduce ( const void * in_lhs_,
+                                void * inout_rhs_,
+                                int * len,
+                                MPI_Datatype * );
+#endif
+    };
+
+                                    /**
+                                     * Returns a reference to the data
+                                     * structure with global timing
+                                     * information. Filled after calling
+                                     * stop().
+                                     */
+    const TimeMinMaxAvg & get_data() const;
+
+                                    /**
+                                     * Prints the data to the given stream.
+                                     */
+    template <class STREAM>
+    void print_data(STREAM & stream) const;
+
+
 #endif
 
                                     /**
@@ -200,6 +279,14 @@ class Timer
                                      * running.
                                      */
     MPI_Comm            mpi_communicator;
+
+                                    /**
+                                     * Store whether the wall time is
+                                     * synchronized between machines.
+                                     */
+    bool sync_wall_time;
+
+    TimeMinMaxAvg mpi_data;
 #endif
 };
 
@@ -333,6 +420,10 @@ class TimerOutput
                 ConditionalOStream        &stream,
                 const enum OutputFrequency output_frequency,
                 const enum OutputType      output_type);
+
+
+
+
 #endif
 
                                     /**
@@ -472,6 +563,27 @@ class TimerOutput
 
 /* ---------------- inline functions ----------------- */
 
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+
+const Timer::TimeMinMaxAvg & Timer::get_data() const
+{
+  return mpi_data;
+}
+
+template <class STREAM>
+void Timer::print_data(STREAM & stream) const
+{
+  unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+  if (my_id==0)
+    stream << mpi_data.max << " wall,"
+          << " max @" << mpi_data.max_index
+          << ", min=" << mpi_data.min << " @" << mpi_data.min_index
+          << ", avg=" << mpi_data.avg
+          << std::endl;
+}
+
+#endif
+
 inline
 void
 TimerOutput::enter_section (const std::string &section_name)
index a0c69f54f4adc545fa995098202cb5c387c8e771..ee7d56796c34a97db68711096527023a311eb51c 100644 (file)
@@ -2,7 +2,7 @@
 //    $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+//    Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -245,7 +245,7 @@ namespace Utilities
                                      * MPI.
                                      */
     bool job_supports_mpi ();
-    
+
                                     /**
                                      * Return the number of MPI processes
                                      * there exist in the given communicator
@@ -266,6 +266,51 @@ namespace Utilities
                                      */
     unsigned int get_this_mpi_process (const MPI_Comm &mpi_communicator);
 
+
+                                    /**
+                                     * Consider an unstructured
+                                     * communication pattern where
+                                     * every process in an MPI
+                                     * universe wants to send some
+                                     * data to a subset of the other
+                                     * processors. To do that, the
+                                     * other processors need to know
+                                     * who to expect messages
+                                     * from. This function computes
+                                     * this information.
+                                     *
+                                     * @param mpi_comm A communicator
+                                     * that describes the processors
+                                     * that are going to communicate
+                                     * with each other.
+                                     *
+                                     * @param destinations The list
+                                     * of processors the current
+                                     * process wants to send
+                                     * information to. This list need
+                                     * not be sorted in any way. If
+                                     * it contains duplicate entries
+                                     * that means that multiple
+                                     * messages are intended for a
+                                     * given destination.
+                                     *
+                                     * @return A list of processors
+                                     * that have indicated that they
+                                     * want to send something to the
+                                     * current processor. The
+                                     * resulting list is not
+                                     * sorted. It may contain
+                                     * duplicate entries if
+                                     * processors enter the same
+                                     * destination more than once in
+                                     * their destinations list.
+                                     */
+    std::vector<unsigned int>
+    compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+                                                 const std::vector<unsigned int> & destinations);
+
+
+
                                     /**
                                      * Given a communicator, generate a new
                                      * communicator that contains the same
@@ -502,7 +547,7 @@ namespace Utilities
                                      */
     void
     destroy_communicator (Epetra_Comm &communicator);
-    
+
                                     /**
                                      * Return the number of MPI processes
                                      * there exist in the given communicator
index ac5e5cf71f122926dfbc17684b9ad2fe0c406908..174c9ff59106fe629446a41c88470a2373412729 100644 (file)
@@ -2,7 +2,7 @@
 //      $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 2005, 2006, 2008, 2009 by the deal.II authors
+//    Copyright (C) 2005, 2006, 2008, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -13,6 +13,7 @@
 
 
 #include <base/index_set.h>
+#include <list>
 
 #ifdef DEAL_II_USE_TRILINOS
 #  ifdef DEAL_II_COMPILER_SUPPORTS_MPI
@@ -75,6 +76,8 @@ IndexSet::do_compress () const
        i != ranges.end();
        ++i)
     {
+      Assert(i->begin < i->end, ExcInternalError());
+
       i->nth_index_in_set = next_index;
       next_index += (i->end - i->begin);
     }
@@ -197,6 +200,103 @@ IndexSet::get_view (const unsigned int begin,
 }
 
 
+void
+IndexSet::subtract_set (const IndexSet & other)
+{
+  compress();
+  other.compress();
+  is_compressed = false;
+
+
+                                  // we save new ranges to be added to our
+                                  // IndexSet in an temporary list and add
+                                  // all of them in one go at the end. This
+                                  // is necessary because a growing ranges
+                                  // vector invalidates iterators.
+  std::list<Range> temp_list;
+
+  std::vector<Range>::iterator own_it = ranges.begin();
+  std::vector<Range>::iterator other_it = other.ranges.begin();
+
+  while (own_it != ranges.end() && other_it != other.ranges.end())
+    {
+                                      //advance own iterator until we get an
+                                      //overlap
+      if (own_it->end <= other_it->begin)
+       {
+         ++own_it;
+         continue;
+       }
+                                      //we are done with other_it, so advance
+      if (own_it->begin >= other_it->end)
+       {
+         ++other_it;
+         continue;
+       }
+
+                                      //Now own_it and other_it overlap.
+                                      //First save the part of own_it that is
+                                      //before other_it (if not empty).
+      if (own_it->begin < other_it->begin)
+       {
+         Range r(own_it->begin, other_it->begin);
+         r.nth_index_in_set = 0; //fix warning of unused variable
+         temp_list.push_back(r);
+       }
+                                      // change own_it to the sub range
+                                      // behind other_it. Do not delete
+                                      // own_it in any case. As removal would
+                                      // invalidate iterators, we just shrink
+                                      // the range to an empty one.
+      own_it->begin = other_it->end;
+      if (own_it->begin > own_it->end)
+       {
+         own_it->begin = own_it->end;
+         ++own_it;
+       }
+
+                                      // continue without advancing
+                                      // iterators, the right one will be
+                                      // advanced next.
+    }
+
+                                  // Now delete all empty ranges we might
+                                  // have created.
+  for (std::vector<Range>::iterator it = ranges.begin();
+       it != ranges.end(); )
+    {
+      if (it->begin >= it->end)
+       it = ranges.erase(it);
+      else
+       ++it;
+    }
+
+                                  // done, now add the temporary ranges
+  for (std::list<Range>::iterator it = temp_list.begin();
+       it != temp_list.end();
+       ++it)
+    add_range(it->begin, it->end);
+
+  compress();
+}
+
+
+void IndexSet::fill_index_vector(std::vector<unsigned int> & indices) const
+{
+  compress();
+
+  indices.clear();
+  indices.reserve(n_elements());
+
+  for (std::vector<Range>::iterator it = ranges.begin();
+       it != ranges.end();
+       ++it)
+    for (unsigned int i=it->begin; i<it->end; ++i)
+      indices.push_back (i);
+
+  Assert (indices.size() == n_elements(), ExcInternalError());
+}
+
 
 #ifdef DEAL_II_USE_TRILINOS
 
@@ -217,19 +317,14 @@ IndexSet::make_trilinos_map (const MPI_Comm &communicator,
 #endif
   else
     {
-      std::vector<int> indices;
-      indices.reserve(n_elements());
-      for (std::vector<Range>::iterator
-            i = ranges.begin();
-          i != ranges.end();
-          ++i)
-       for (unsigned int j=i->begin; j<i->end; ++j)
-         indices.push_back (j);
-      Assert (indices.size() == n_elements(), ExcInternalError());
+      std::vector<unsigned int> indices;
+      fill_index_vector(indices);
+
+      int * indices_ptr = reinterpret_cast<int*>(&indices[0]);
 
       return Epetra_Map (-1,
                         n_elements(),
-                        &indices[0],
+                        indices_ptr,
                         0,
 #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
                         Epetra_MpiComm(communicator));
@@ -243,5 +338,4 @@ IndexSet::make_trilinos_map (const MPI_Comm &communicator,
 
 #endif
 
-
 DEAL_II_NAMESPACE_CLOSE
index 86771313572922489abe7fb880b6333a1164ae2c..a41ce678f6f4c6f528174ff537ee1cb53850497b 100644 (file)
 
 #include <base/timer.h>
 #include <base/exceptions.h>
+#include <base/utilities.h>
 #include <sstream>
 #include <iostream>
 #include <iomanip>
 #include <algorithm>
+#include <stddef.h>
 
 // these includes should probably be properly
 // ./configure'd using the AC_HEADER_TIME macro:
@@ -46,6 +48,7 @@ Timer::Timer()
                cumulative_wall_time (0.)
 #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
                , mpi_communicator (MPI_COMM_SELF)
+               , sync_wall_time (false)
 #endif
 {
   start();
@@ -56,11 +59,13 @@ Timer::Timer()
                                   // in case we use an MPI compiler, use
                                   // the communicator given from input
 #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-Timer::Timer(MPI_Comm mpi_communicator)
+Timer::Timer(MPI_Comm mpi_communicator,
+            bool sync_wall_time_)
                 :
                 cumulative_time (0.),
                cumulative_wall_time (0.),
-               mpi_communicator (mpi_communicator)
+               mpi_communicator (mpi_communicator),
+               sync_wall_time(sync_wall_time_)
 {
   start();
 }
@@ -72,6 +77,11 @@ void Timer::start ()
 {
   running    = true;
 
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+  if (sync_wall_time)
+    MPI_Barrier(mpi_communicator);
+#endif
+
   struct timeval wall_timer;
   gettimeofday(&wall_timer, NULL);
   start_wall_time = wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec;
@@ -86,6 +96,33 @@ void Timer::start ()
 }
 
 
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+
+void Timer::TimeMinMaxAvg::max_reduce ( const void * in_lhs_,
+                                       void * inout_rhs_,
+                                       int * len,
+                                       MPI_Datatype * )
+{
+  const Timer::TimeMinMaxAvg * in_lhs = static_cast<const Timer::TimeMinMaxAvg*>(in_lhs_);
+  Timer::TimeMinMaxAvg * inout_rhs = static_cast<Timer::TimeMinMaxAvg*>(inout_rhs_);
+
+  Assert(*len==1, ExcInternalError());
+
+  inout_rhs->sum += in_lhs->sum;
+  if (inout_rhs->min>in_lhs->min)
+    {
+      inout_rhs->min = in_lhs->min;
+      inout_rhs->min_index = in_lhs->min_index;
+    }
+  if (inout_rhs->max<in_lhs->max)
+    {
+      inout_rhs->max = in_lhs->max;
+      inout_rhs->max_index = in_lhs->max_index;
+    }
+}
+
+
+#endif
 
 double Timer::stop ()
 {
@@ -105,8 +142,50 @@ double Timer::stop ()
 
       struct timeval wall_timer;
       gettimeofday(&wall_timer, NULL);
-      cumulative_wall_time += wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec
-       - start_wall_time;
+      double time = wall_timer.tv_sec + 1.e-6 * wall_timer.tv_usec
+                             - start_wall_time;
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      if (sync_wall_time)
+       {
+         unsigned int my_id = dealii::Utilities::System::get_this_mpi_process(mpi_communicator);
+
+         MPI_Op op;
+         int ierr = MPI_Op_create((MPI_User_function *)&Timer::TimeMinMaxAvg::max_reduce,
+                                  false, &op);
+         AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+         TimeMinMaxAvg in;
+         in.set(time, my_id);
+
+         MPI_Datatype type;
+         int lengths[]={3,2};
+         MPI_Aint displacements[]={0,offsetof(TimeMinMaxAvg, min_index)};
+         MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
+
+         ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
+         AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+         ierr = MPI_Type_commit(&type);
+
+         ierr = MPI_Reduce ( &in, &this->mpi_data, 1, type, op, 0, mpi_communicator );
+         AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+         ierr = MPI_Type_free (&type);
+         AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+         ierr = MPI_Op_free(&op);
+         AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+         this->mpi_data.avg = this->mpi_data.sum / dealii::Utilities::System::get_n_mpi_processes(mpi_communicator);
+
+         cumulative_wall_time += this->mpi_data.max;
+       }
+      else
+       cumulative_wall_time += time;
+#else
+      cumulative_wall_time += time;
+#endif
     }
   return cumulative_time;
 }
index e543ee3838477aaf26f525cd8f5c0dc2d9ea74d2..ab8e94d80746170d8b50c04d23f47688d371dbeb 100644 (file)
@@ -2,7 +2,7 @@
 //      $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 2005, 2006, 2008, 2009 by the deal.II authors
+//    Copyright (C) 2005, 2006, 2008, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -516,6 +516,74 @@ namespace Utilities
     }
 
 
+    std::vector<unsigned int>
+    compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+                                                 const std::vector<unsigned int> & destinations)
+    {
+      unsigned int myid = Utilities::System::get_this_mpi_process(mpi_comm);
+      unsigned int n_procs = Utilities::System::get_n_mpi_processes(mpi_comm);
+
+      for (unsigned int i=0; i<destinations.size(); ++i)
+       {
+         Assert (destinations[i] < n_procs,
+                 ExcIndexRange (destinations[i], 0, n_procs));
+         Assert (destinations[i] != myid,
+                 ExcMessage ("There is no point in communicating with ourselves."));
+       }
+
+
+                                      // let all processors
+                                      // communicate the maximal
+                                      // number of destinations they
+                                      // have
+      unsigned int my_n_destinations = destinations.size();
+      unsigned int max_n_destinations = 0;
+
+      MPI_Allreduce (&my_n_destinations, &max_n_destinations, 1, MPI_UNSIGNED,
+                   MPI_MAX, mpi_comm);
+
+                                      // now that we know the number
+                                      // of data packets every
+                                      // processor wants to send, set
+                                      // up a buffer with the maximal
+                                      // size and copy our
+                                      // destinations in there,
+                                      // padded with -1's
+      std::vector<unsigned int> my_destinations(max_n_destinations,
+                                               numbers::invalid_unsigned_int);
+      std::copy (destinations.begin(), destinations.end(),
+                my_destinations.begin());
+
+                                      // now exchange these (we could
+                                      // communicate less data if we
+                                      // used MPI_Allgatherv, but
+                                      // we'd have to communicate
+                                      // my_n_destinations to all
+                                      // processors in this case,
+                                      // which is more expensive than
+                                      // the reduction operation
+                                      // above in MPI_Allreduce)
+      std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
+      MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
+                    &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
+                    mpi_comm);
+
+                                      // now we know who is going to
+                                      // communicate with
+                                      // whom. collect who is going
+                                      // to communicate with us!
+      std::vector<unsigned int> origins;
+      for (unsigned int i=0; i<n_procs; ++i)
+       for (unsigned int j=0; j<max_n_destinations; ++j)
+         if (all_destinations[i*max_n_destinations + j] == myid)
+           origins.push_back (i);
+         else if (all_destinations[i*max_n_destinations + j] ==
+                  numbers::invalid_unsigned_int)
+           break;
+
+      return origins;
+    }
+
 #else
 
     bool job_supports_mpi ()
@@ -742,16 +810,28 @@ namespace Utilities
     duplicate_map (const Epetra_BlockMap &map,
                   const Epetra_Comm     &comm)
     {
-                                      // assume that each processor stores a
-                                      // contiguous range of elements in the
-                                      // following constructor call
-      Assert (map.LinearMap() == true,
-             ExcNotImplemented());
-      return
-       Epetra_Map (map.NumGlobalElements(),
-                   map.NumMyElements(),
-                   map.IndexBase(),
-                   comm);
+      if (map.LinearMap() == true)
+       {
+                                          // each processor stores a
+                                          // contiguous range of
+                                          // elements in the
+                                          // following constructor
+                                          // call
+         return Epetra_Map (map.NumGlobalElements(),
+                            map.NumMyElements(),
+                            map.IndexBase(),
+                            comm);
+       }
+      else
+       {
+                                          // the range is not
+                                          // contiguous
+         return Epetra_Map (map.NumGlobalElements(),
+                            map.NumMyElements(),
+                            map.MyGlobalElements (),
+                            0,
+                            comm);
+       }
     }
   }
 
index 3f6359b58807c33b012d09ec6ff12a9d734e475b..756e4db6f8dbaf21fe6c67a1ea7871f10ad71519 100755 (executable)
@@ -1,5 +1,5 @@
 #! /bin/sh
-# From configure.in Revision: 20963 .
+# From configure.in Revision: 21091 .
 # Guess values for system-dependent variables and create Makefiles.
 # Generated by GNU Autoconf 2.63 for deal.II 6.3.pre.
 #
@@ -659,6 +659,12 @@ TECPLOT_INCLUDE_DIR
 USE_CONTRIB_HSL
 HSL_INCLUDE_DIR
 NEEDS_F77LIBS
+DEAL_II_DEFINE_DEAL_II_USE_MUMPS
+DEAL_II_BLACS_ARCH
+DEAL_II_BLACS_DIR
+DEAL_II_SCALAPACK_DIR
+DEAL_II_MUMPS_DIR
+USE_CONTRIB_MUMPS
 DEAL_II_TRILINOS_STATIC
 DEAL_II_TRILINOS_SHARED
 DEAL_II_TRILINOS_LIBDIR
@@ -796,6 +802,9 @@ with_slepc
 with_trilinos
 with_trilinos_include
 with_trilinos_libs
+with_mumps
+with_scalapack
+with_blacs
 with_blas
 with_zlib
 with_netcdf
@@ -1469,7 +1478,7 @@ Optional Packages:
                           contributed one. The optional argument points to the
                           directory containing the boost subdirectory for
                           header files.
-  --with-petsc=path/to/slepc
+  --with-petsc=path/to/petsc
                           Specify the path to the PETSc installation, of which
                           the include and library directories are subdirs; use
                           this if you want to override the PETSC_DIR
@@ -1496,6 +1505,19 @@ Optional Packages:
                           Specify the path to the Trilinos libraries; use this
                           if you want to override the TRILINOS_LIBDIR
                           environment variable.
+  --with-mumps=path/to/mumps
+                          Specify the path to the MUMPS installation, for
+                          which the include directory and lib directory are
+                          subdirs; use this if you want to override the
+                          MUMPS_DIR environment variable.
+  --with-scalapack=path/to/scalapack
+                          Specify the path to the scalapack installation; use
+                          this if you want to override the SCALAPACK_DIR
+                          environment variable.
+  --with-blacs=path/to/blacs
+                          Specify the path to the BLACS installation; use this
+                          if you want to override the BLACS_DIR environment
+                          variable.
   --with-blas=blaslib     Use the blas library blaslib. Make sure the path
                           to the libary is searched by ld, since it is
                          included by the argument -lblaslib. If no argument
@@ -13014,7 +13036,6 @@ $as_echo "" >&6; }
 $as_echo "---------------- configuring additional libs ----------------" >&6; }
 
 
-
     { $as_echo "$as_me:$LINENO: checking for PETSc library directory" >&5
 $as_echo_n "checking for PETSc library directory... " >&6; }
 
@@ -14011,6 +14032,117 @@ done
 
 
 
+  { $as_echo "$as_me:$LINENO: checking for MUMPS library directory" >&5
+$as_echo_n "checking for MUMPS library directory... " >&6; }
+
+# Check whether --with-mumps was given.
+if test "${with_mumps+set}" = set; then
+  withval=$with_mumps;      if test "x$withval" = "xno" ; then
+       { $as_echo "$as_me:$LINENO: result: explicitly disabled" >&5
+$as_echo "explicitly disabled" >&6; }
+       USE_CONTRIB_MUMPS=no
+     else
+       USE_CONTRIB_MUMPS=yes
+       DEAL_II_MUMPS_DIR="$withval"
+       { $as_echo "$as_me:$LINENO: result: $DEAL_II_MUMPS_DIR" >&5
+$as_echo "$DEAL_II_MUMPS_DIR" >&6; }
+              if test ! -d $DEAL_II_MUMPS_DIR         \
+            -o ! -d $DEAL_II_MUMPS_DIR/include \
+            -o ! -d $DEAL_II_MUMPS_DIR/lib     \
+          ; then
+         { { $as_echo "$as_me:$LINENO: error: Path to MUMPS specified with --with-mumps does not point to a complete MUMPS installation" >&5
+$as_echo "$as_me: error: Path to MUMPS specified with --with-mumps does not point to a complete MUMPS installation" >&2;}
+   { (exit 1); exit 1; }; }
+       fi
+     fi
+
+else
+       USE_CONTRIB_MUMPS=no
+
+fi
+
+
+        if test "$USE_CONTRIB_MUMPS" = "yes" ; then
+
+            { $as_echo "$as_me:$LINENO: checking for SCALAPACK library directory" >&5
+$as_echo_n "checking for SCALAPACK library directory... " >&6; }
+
+# Check whether --with-scalapack was given.
+if test "${with_scalapack+set}" = set; then
+  withval=$with_scalapack;        DEAL_II_SCALAPACK_DIR="$withval"
+       { $as_echo "$as_me:$LINENO: result: $DEAL_II_SCALAPACK_DIR" >&5
+$as_echo "$DEAL_II_SCALAPACK_DIR" >&6; }
+              if test ! -d $DEAL_II_SCALAPCK_DIR ; then
+         { { $as_echo "$as_me:$LINENO: error: The path to SCALAPACK specified with --with-scalapack does t point to a complete SCALAPACK installation" >&5
+$as_echo "$as_me: error: The path to SCALAPACK specified with --with-scalapack does t point to a complete SCALAPACK installation" >&2;}
+   { (exit 1); exit 1; }; }
+       fi
+
+else
+         { { $as_echo "$as_me:$LINENO: error: If MUMPS is used, the path to SCALAPACK must be specified with --with-scalapack" >&5
+$as_echo "$as_me: error: If MUMPS is used, the path to SCALAPACK must be specified with --with-scalapack" >&2;}
+   { (exit 1); exit 1; }; }
+
+fi
+
+
+            { $as_echo "$as_me:$LINENO: checking for BLACS library directory" >&5
+$as_echo_n "checking for BLACS library directory... " >&6; }
+
+# Check whether --with-blacs was given.
+if test "${with_blacs+set}" = set; then
+  withval=$with_blacs;        DEAL_II_BLACS_DIR="$withval"
+       { $as_echo "$as_me:$LINENO: result: $DEAL_II_BLACS_DIR" >&5
+$as_echo "$DEAL_II_BLACS_DIR" >&6; }
+              if test ! -d $DEAL_II_BLACS_DIR     \
+            -o ! -d $DEAL_II_BLACS_DIR/LIB ; then
+       { { $as_echo "$as_me:$LINENO: error: The path to BLACS specified with --with-blacs does not point to a complete BLACS installation" >&5
+$as_echo "$as_me: error: The path to BLACS specified with --with-blacs does not point to a complete BLACS installation" >&2;}
+   { (exit 1); exit 1; }; }
+       fi
+
+else
+         { { $as_echo "$as_me:$LINENO: error: If MUMPS is used, the path to BLACS must be specified with --with-blacs" >&5
+$as_echo "$as_me: error: If MUMPS is used, the path to BLACS must be specified with --with-blacs" >&2;}
+   { (exit 1); exit 1; }; }
+
+fi
+
+
+                    { $as_echo "$as_me:$LINENO: checking for BLACS library architecture" >&5
+$as_echo_n "checking for BLACS library architecture... " >&6; }
+    BLACS_COMM=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+              | grep "COMMLIB = " \
+              | perl -pi -e 's/.*LIB =\s+//g;'`
+    BLACS_PLAT=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+              | grep "PLAT = " \
+              | perl -pi -e 's/.*PLAT =\s+//g;'`
+    BLACS_DEBUG=`cat $DEAL_II_BLACS_DIR/Bmake.inc \
+              | grep "BLACSDBGLVL = " \
+              | perl -pi -e 's/.*DBGLVL =\s+//g;'`
+        DEAL_II_BLACS_ARCH="$BLACS_COMM-$BLACS_PLAT-$BLACS_DEBUG"
+    { $as_echo "$as_me:$LINENO: result: $DEAL_II_BLACS_ARCH" >&5
+$as_echo "$DEAL_II_BLACS_ARCH" >&6; }
+
+  fi
+
+    if test "$USE_CONTRIB_MUMPS" = "yes" ; then
+
+cat >>confdefs.h <<\_ACEOF
+#define DEAL_II_USE_MUMPS 1
+_ACEOF
+
+        DEAL_II_DEFINE_DEAL_II_USE_MUMPS=DEAL_II_USE_MUMPS
+        if test "x$with_mumps" = "x" ; then
+      with_mumps="yes"
+    fi
+  fi
+
+
+
+
+
+
 
 
 if test "x$with_umfpack" != "x" -a "x$with_umfpack" != "xno" ; then
@@ -16662,6 +16794,7 @@ fi
 
 
 
+
 for ac_func in daxpy_ saxpy_ dgemv_ sgemv_ dgeev_ sgeev_ dgeevx_ sgeevx_
 do
 as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
index f34464ffaa1e73a2c6ece1ec06864c4705d4ec30..8ff1210d026f5e9a26e86968626ae406a0a4132e 100644 (file)
@@ -433,7 +433,7 @@ AC_SUBST(USE_CONTRIB_MUMPS)
 AC_SUBST(DEAL_II_MUMPS_DIR)
 AC_SUBST(DEAL_II_SCALAPACK_DIR) dnl MUMPS dependency
 AC_SUBST(DEAL_II_BLACS_DIR)     dnl MUMPS dependency
-AC_SUBST(DEAL_II_BLACS_ARCH)  
+AC_SUBST(DEAL_II_BLACS_ARCH)
 AC_SUBST(DEAL_II_DEFINE_DEAL_II_USE_MUMPS)
 
 dnl Make sure we configure for libraries used by other libraries. For
@@ -727,6 +727,7 @@ AH_BOTTOM(
     (major)*10000 + (minor)*100 + (subminor))
 
 #include <base/numbers.h>
+#include <base/types.h>
 
 /**
  * If the compiler supports the upcoming C++1x standard, allow us to refer
index cea96384367c040451baeff7bdf9c0870060c214..dd2bc097ee623d5a7c9cd9a400920d01d1a5cda5 100644 (file)
@@ -18,7 +18,7 @@
  * documentation of classes of deal.II. The glossary often only gives
  * a microscopic view of a particular concept; if you struggle with
  * the bigger picture, it may therefore also be worth to consult the
- * global overview of classes on the main/@ref index page.
+ * global overview of classes on the @ref index page.
  *
  * <dl>
  *
  * <dd>Mesh cells not refined any further in the hierarchy.</dd>
  *
  *
+ *
+ * <dt class="glossary">@anchor GlossArtificialCell <b>Artificial cells</b></dt>
+ * <dd>
+ * If a mesh is distributed across multiple MPI processes using the
+ * parallel::distributed::Triangulation class, each processor stores
+ * only the cells it owns, one layer of adjacent cells that are owned
+ * by other processors (called @ref GlossGhostCell "ghost cells"), all coarse level
+ * cells, and all cells that are necessary to maintain the invariant
+ * that adjacent cells must differ by at most one refinement
+ * level. The cells stored on each process that are not owned by this
+ * process and that are not ghost cells are called "artificial cells",
+ * and for these cells the predicate
+ * <code>cell-@>is_artificial()</code> returns true. Artificial cells
+ * are guaranteed to exist in the globally distributed mesh but they
+ * may be further refined on other processors. See the
+ * @ref distributed_paper "Distributed Computing paper" for more
+ * information.
+ *
+ * The concept of artificial cells has no meaning for triangulations
+ * that store the entire mesh on each processor, i.e. the
+ * dealii::Triangulation class.  </dd>
+ *
+ *
  * <dt class="glossary">@anchor GlossBlockLA <b>Block (linear algebra)</b></dt>
 
  * <dd>It is often convenient to treat a matrix or vector as a collection of
  *
  * The way out of a situation like this is to use one of the two following
  * ways:
+ *
  * - You tell the object that you want to compress what operation is
- *   intended. The TrilinosWrappers::VectorBase::compress() can take such an
- *   additional argument. Or,
- * - You do a fake addition or set operation on the object in question.
+ *   intended. The TrilinosWrappers::VectorBase::compress() can take
+ *   such an additional argument.
+ * - You do a fake addition or set operation on the object in question. For
+ *   example, you can add a zero to an element of the matrix or vector,
+ *   which has no effect other than telling the object that the next
+ *   compress operation should be in <code>Add</code> mode.
  *
  * Some of the objects are also indifferent and can figure out what to
  * do without being told. The TrilinosWrappers::SparseMatrix can do that,
  * flag.
  *
  *
+ * <dt class="glossary">@anchor distributed_paper
+ *                           <b>Distributed computing paper</b></dt>
+
+ * <dd>The "distributed computing paper" is a paper by W. Bangerth,
+ * C. Burstedde, T. Heister and M. Kronbichler titled "Algorithms and Data
+ * Structures for Massively Parallel Generic Finite Element Codes" that
+ * described the implementation of parallel distributed computing in deal.II,
+ * i.e. computations where not only the linear system is split onto different
+ * machines as in, for example, step-18, but also the Triangulation and
+ * DoFHandler objects. In essence, it is a guide to the parallel::distributed
+ * namespace.
+ *
+ * The paper is currently in preparation.
+ * </dd>
+ *
+ *
  * <dt class="glossary">@anchor GlossFaceOrientation <b>Face orientation</b></dt>
  * <dd>In a triangulation, the normal vector to a face
  * can be deduced from the face orientation by
  * </dd>
  *
  *
+ * <dt class="glossary">@anchor GlossGhostCell <b>Ghost cells</b></dt>
+ * <dd>
+ * If a mesh is distributed across multiple MPI processes using the
+ * parallel::distributed::Triangulation class, each processor stores
+ * only the cells it owns, one layer of adjacent cells that are owned
+ * by other processors, all coarse level cells, and all cells that are
+ * necessary to maintain the invariant that adjacent cells must differ
+ * by at most one refinement level. The cells stored on each process
+ * that are not owned by this process but that are adjacent to the
+ * ones owned by this process are called "ghost cells", and for these
+ * cells the predicate <code>cell-@>is_ghost()</code> returns
+ * true. Ghost cells are guaranteed to exist in the globally
+ * distributed mesh, i.e. these cells are actually owned by another
+ * process and are not further refined there. See the
+ * @ref distributed_paper "Distributed Computing paper" for more
+ * information.
+ *
+ * The concept of ghost cells has no meaning for triangulations that
+ * store the entire mesh on each processor, i.e. the
+ * dealii::Triangulation class.  </dd>
+ *
+ *
  * <dt class="glossary">@anchor hp_paper <b>%hp paper</b></dt>
  * <dd>The "hp paper" is a paper by W. Bangerth and O. Kayser-Herold, titled
  * "Data Structures and Requirements for hp Finite Element Software", that
@@ -577,8 +642,9 @@ Article{JK10,
  * element shape functions are defined.</dd>
  *
  *
- * <dt class="glossary">@anchor GlossShape <b>Shape functions</b></dt> <dd>The restriction of
- * the finite element basis functions to a single grid cell.</dd>
+ * <dt class="glossary">@anchor GlossShape <b>Shape functions</b></dt>
+ * <dd>The restriction of the finite element basis functions to a single
+ * grid cell.</dd>
  *
  *
  * <dt class="glossary">@anchor GlossSubdomainId <b>Subdomain id</b></dt>
@@ -600,8 +666,26 @@ Article{JK10,
  * coincides with the rank of the MPI process within the MPI
  * communicator). Partitioning is typically done using the
  * GridTools::partition() function, but any other method can also be used to
- * do this though most other ideas will likely lead to less well balanced
+ * do this though most simple ideas will likely lead to less well balanced
  * numbers of degrees of freedom on the various subdomains.
+ *
+ * On the other hand, for programs that are parallelized using MPI but
+ * where meshes are held distributed across several processors using
+ * the parallel::distributed::Triangulation and
+ * parallel::distributed::DoFHandler classes, the subdomain id of
+ * cells are tied to the processor that owns the cell. In other words,
+ * querying the subdomain id of a cell tells you if the cell is owned
+ * by the current processor (i.e. if <code>cell-@>subdomain_id() ==
+ * triangulation.parallel::distributed::Triangulation::locally_owned_subdomain()</code>)
+ * or by another processor. In the parallel distributed case,
+ * subdomain ids are only assigned to cells that the current processor
+ * owns as well as the immediately adjacent @ref GlossGhostCell "ghost cells".
+ * Cells further away are held on each processor to ensure
+ * that every MPI process has access to the full coarse grid as well
+ * as to ensure the invariant that neighboring cells differ by at most
+ * one refinement level. These cells are called "artificial" (see
+ * @ref GlossArtificialCell "here") and have the special subdomain id value
+ * types::artificial_subdomain_id.
  * </dd>
  *
  *
index 4a3368bb64832bb135994a537b912a4f39bc03ec..895fee01161d5fa6b72899844580f4c8de44bc39 100644 (file)
@@ -12,7 +12,8 @@
 //-------------------------------------------------------------------------
 
 /**
- * @defgroup threads Parallel computing with multiple processors
+ * @defgroup threads Parallel computing with multiple processors accessing the shared memory
+ * @ingroup Parallel
  *
  * @brief A module discussing the use of parallelism on shared memory
  * machines. See the detailed documentation and
diff --git a/deal.II/doc/doxygen/headers/parallel.h b/deal.II/doc/doxygen/headers/parallel.h
new file mode 100644 (file)
index 0000000..b61f9f5
--- /dev/null
@@ -0,0 +1,36 @@
+//-------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2009 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//-------------------------------------------------------------------------
+
+/**
+ * @defgroup Parallel Parallel computing
+ *
+ * @brief A module discussing the use of multiple processor.
+ *
+ * This module contains information on %parallel computing. It is
+ * subdivided into parts on @ref threads and on @ref distributed.
+ */
+
+
+/**
+ * A namespace in which we define classes and algorithms that deal
+ * with running in %parallel on shared memory machines when deal.II is
+ * configured to use multiple threads (see @ref threads), as well as
+ * running things in %parallel on %distributed memory machines (see @ref
+ * distributed).
+ *
+ * @ingroup threads
+ * @author Wolfgang Bangerth, 2008, 2009
+ */
+namespace parallel
+{
+}
index 9d0d4e17b2a62e361dc0360caaac013d45e37a4c..2dbe0009f3359d881945ad589b3365dcfa7234a7 100644 (file)
@@ -277,6 +277,19 @@ inconvenience this causes.
 <h3>base</h3>
 
 <ol>
+  <li><p>New: The Timer class can now accumulate and average run times of
+  pieces of code across multiple MPI processes.
+  <br>
+  (Timo Heister 2010/06/07)
+  </p></li>
+
+  <li><p>New: The Utilities::System::compute_point_to_point_communication_pattern
+  function can be used to compute who wants to send messages to the
+  current processor in unstructured point-to-point MPI communications.
+  <br>
+  (WB 2010/06/07)
+  </p></li>
+
   <li><p>New: The DataOutBase class (and all derived classes such as DataOut,
   MatrixOut, etc) can now produce the XML-based version of the VTK file format
   (the so-called VTU format). Furthermore, the
@@ -436,10 +449,30 @@ inconvenience this causes.
 <h3>lac</h3>
 
 <ol>
+  <li><p>New: The ConstraintMatrix class can now handle storing only
+  a subset of all constraints, for example only for degrees of
+  freedom that are relevant for the subdomain that is owned by one
+  process in an MPI universe.
+  <br>
+  (Timo Heister, Martin Kronbichler 2010/06/07)
+  </p></li>
+
+  <li><p>New: The PETScWrappers::MPI::Vector and TrilinosWrappers::MPI::Vector
+  classes can now handle ghost elements, i.e. elements that are not
+  owned by the current processor but are available for reading
+  anyway. The simplest form of ghosting would be to simply import
+  an entire vector to local memory, but the new function allow to
+  select the elements we need to support the case of computations
+  where importing all elements of even a single vector would
+  exceed available memory.
+  <br>
+  (Timo Heister 2010/06/07)
+  </p></li>
+
   <li>
     <p>
-    New: A class SparseDirectMumps that provides an interface to 
-    the MUltifrontal Massively Parallel sparse direct Solver (MUMPS). 
+    New: A class SparseDirectMumps that provides an interface to
+    the MUltifrontal Massively Parallel sparse direct Solver (MUMPS).
     </p>
   <br>
   (Markus Buerg 2010/05/10)
index 1484cf759ad92c341246ed97c06fd675dfee04dd..a426571f5958606d62b7fcf14d1d26eb35e0ab60 100644 (file)
@@ -200,6 +200,16 @@ make install
       want to use Trilinos with MPI on parallel machines, you also need to
       flip the value of the <code>TPL_ENABLE_MPI</code> flag above.
       </p>
+    <p>
+      Note: if the deal.II ./configure reports an error related to
+      HAVE_INTTYPES_H, edit <trilinos>/include/ml_config.h and comment out the
+      line
+       <code>
+       <pre>
+#define HAVE_INTTYPES_H
+       </pre>
+       </code>
+    </p>
 
       <h3>Configuring for installed Trilinos packages</h3>
 
index d336d1d94c69f436fab6006696f485014478d915..8c63c06eea6da6b3cde66c9a854e84c50f181ee4 100644 (file)
@@ -97,7 +97,7 @@ struct IsBlockMatrix
                                      * indicates whether the template
                                      * argument to this class is a block
                                      * matrix (in fact whether the type is
-                                     * derived from BlockMatrix<T>).
+                                     * derived from BlockMatrixBase<T>).
                                      */
     static const bool value = (sizeof(check_for_block_matrix
                                      ((MatrixType*)0))
index 632b8d1e6fa2ea2dcd31d9b62052cfe562e84c16..4bd235721b803b9580fdd7c23149213ce95ffe11 100644 (file)
@@ -33,6 +33,69 @@ DEAL_II_NAMESPACE_OPEN
  *@{
  */
 
+template <typename> class BlockVectorBase;
+
+
+/**
+ * A class that can be used to determine whether a given type is a block
+ * vector type or not. For example,
+ * @code
+ *   IsBlockVector<Vector<double> >::value
+ * @endcode
+ * has the value false, whereas
+ * @code
+ *   IsBlockVector<BlockVector<double> >::value
+ * @endcode
+ * is true. This is sometimes useful in template contexts where we may
+ * want to do things differently depending on whether a template type
+ * denotes a regular or a block vector type.
+ *
+ * @author Wolfgang Bangerth, 2010
+ */
+template <typename VectorType>
+struct IsBlockVector
+{
+  private:
+    struct yes_type { char c[1]; };
+    struct no_type  { char c[2]; };
+
+                                    /**
+                                     * Overload returning true if the class
+                                     * is derived from BlockVectorBase,
+                                     * which is what block vectors do.
+                                     */
+    template <typename T>
+    static yes_type check_for_block_vector (const BlockVectorBase<T> *);
+
+                                    /**
+                                     * Catch all for all other potential
+                                     * vector types that are not block
+                                     * matrices.
+                                     */
+    static no_type check_for_block_vector (...);
+
+  public:
+                                    /**
+                                     * A statically computable value that
+                                     * indicates whether the template
+                                     * argument to this class is a block
+                                     * vector (in fact whether the type is
+                                     * derived from BlockVectorBase<T>).
+                                     */
+    static const bool value = (sizeof(check_for_block_vector
+                                     ((VectorType*)0))
+                              ==
+                              sizeof(yes_type));
+};
+
+
+// instantiation of the static member
+template <typename VectorType>
+const bool IsBlockVector<VectorType>::value;
+
+
+
+
 namespace internal
 {
 
index 9e745bb0cb4b25ee85cca8e70f6854497c190203..dd7a32f0c950da186d6ecb106988c818d510db6c 100644 (file)
@@ -344,15 +344,28 @@ class ConstraintMatrix : public Subscriptor
     ConstraintMatrix (const ConstraintMatrix &constraint_matrix);
 
                                     /**
-                                     * Reinit the ConstraintMatrix
-                                     * object. This function is only relevant
-                                     * in the distributed case, to supply a
-                                     * different IndexSet. Otherwise this
-                                     * routine is equivalent to calling
-                                     * clear().
+                                     * Reinit the ConstraintMatrix object and
+                                     * supply an IndexSet with lines that may
+                                     * be constrained. This function is only
+                                     * relevant in the distributed case, to
+                                     * supply a different IndexSet. Otherwise
+                                     * this routine is equivalent to calling
+                                     * clear(). Normally an IndexSet with all
+                                     * locally_active_dofs should be supplied
+                                     * here.
                                      */
     void reinit (const IndexSet & local_constraints = IndexSet());
 
+                                    /**
+                                     * Determines if we can store a
+                                     * constraint for the given @p
+                                     * line_index. This routine only matters
+                                     * in the distributed case and checks if
+                                     * the IndexSet allows storage of this
+                                     * line. Always returns true if not in
+                                     * the distributed case.
+                                     */
+    bool can_store_line(unsigned int line_index) const;
 
                                     /**
                                      * This function copies the content of @p
@@ -2093,7 +2106,9 @@ void ConstraintMatrix::
            lines[lines_cache[calculate_line_index(*local_indices_begin)]];
          for (unsigned int j=0; j<position.entries.size(); ++j)
            {
-             Assert (is_constrained(position.entries[j].first) == false,
+             Assert (!(!local_lines.size()
+                       || local_lines.is_element(position.entries[j].first))
+                     || is_constrained(position.entries[j].first) == false,
                      ExcMessage ("Tried to distribute to a fixed dof."));
              global_vector(position.entries[j].first)
                += *local_vector_begin * position.entries[j].second;
@@ -2150,6 +2165,13 @@ ConstraintMatrix::calculate_line_index (const unsigned int line) const
   return local_lines.index_within_set(line);
 }
 
+inline bool
+ConstraintMatrix::can_store_line(unsigned int line_index) const
+{
+  return !local_lines.size() || local_lines.is_element(line_index);
+}
+
+
 
 DEAL_II_NAMESPACE_CLOSE
 
index 2486511204e4978ea92ec21c4cca44e2f3ea21eb..c1cb2362a643446ba65c359c8af4ca45e7eec15d 100644 (file)
@@ -777,7 +777,9 @@ distribute_local_to_global (const Vector<double>            &local_vector,
                  lines[lines_cache[calculate_line_index(local_dof_indices[j])]];
                for (unsigned int q=0; q<position_j.entries.size(); ++q)
                  {
-                   Assert (is_constrained(position_j.entries[q].first) == false,
+                   Assert (!(!local_lines.size()
+                             || local_lines.is_element(position_j.entries[q].first))
+                           || is_constrained(position_j.entries[q].first) == false,
                            ExcMessage ("Tried to distribute to a fixed dof."));
                    global_vector(position_j.entries[q].first)
                      -= val * position_j.entries[q].second * matrix_entry;
@@ -789,7 +791,9 @@ distribute_local_to_global (const Vector<double>            &local_vector,
                                   // the entries of fixed dofs
        for (unsigned int j=0; j<position->entries.size(); ++j)
          {
-           Assert (is_constrained(position->entries[j].first) == false,
+           Assert (!(!local_lines.size()
+                     || local_lines.is_element(position->entries[j].first))
+                   || is_constrained(position->entries[j].first) == false,
                    ExcMessage ("Tried to distribute to a fixed dof."));
            global_vector(position->entries[j].first)
              += local_vector(i) * position->entries[j].second;
index d05859da3c58d08953fa8ba50189fa4a1ce1ebd2..337f94429e5183ff2e19ed1e793a014f2221aecc 100644 (file)
@@ -2,7 +2,7 @@
 //    $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 2004, 2005, 2006, 2007, 2009 by the deal.II authors
+//    Copyright (C) 2004, 2005, 2006, 2007, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -22,6 +22,7 @@
 #  include <lac/exceptions.h>
 #  include <lac/vector.h>
 #  include <lac/petsc_vector_base.h>
+#  include <base/index_set.h>
 
 DEAL_II_NAMESPACE_OPEN
 
@@ -29,6 +30,7 @@ DEAL_II_NAMESPACE_OPEN
 
 // forward declaration
 template <typename> class Vector;
+class IndexSet;
 
 
 /*! @addtogroup PETScWrappers
@@ -186,6 +188,7 @@ namespace PETScWrappers
                          const unsigned int  n,
                          const unsigned int  local_size);
     
+       
                                          /**
                                           * Copy-constructor from deal.II
                                           * vectors. Sets the dimension to that
@@ -206,6 +209,7 @@ namespace PETScWrappers
                          const dealii::Vector<Number> &v,
                          const unsigned int      local_size);
 
+       
                                          /**
                                           * Copy-constructor the
                                           * values from a PETSc wrapper vector
@@ -224,6 +228,29 @@ namespace PETScWrappers
                          const VectorBase   &v,
                          const unsigned int  local_size);
 
+
+                                         /**
+                                         * Constructs a new parallel PETSc
+                                         * vector from an Indexset. Note that
+                                         * @p local must be contiguous and
+                                         * the global size of the vector is
+                                         * determined by local.size(). The
+                                         * global indices in @p ghost are
+                                         * sluppied as ghost indices that can
+                                         * also be read locally. Note that
+                                         * the @p ghost IndexSet may be empty
+                                         * and that any indices already
+                                         * contained in @p local are ignored
+                                         * during construction. That way you
+                                         * can construct with
+                                         * locally_relevent_dofs() for
+                                         * example.
+                                         */
+       explicit Vector (const MPI_Comm     &communicator,
+                         const IndexSet &  local,
+                         const IndexSet & ghost = IndexSet(0));
+       
+       
                                          /**
                                           * Copy the given vector. Resize the
                                           * present vector if necessary. Also
@@ -232,6 +259,7 @@ namespace PETScWrappers
                                           */
         Vector & operator = (const Vector &v);
 
+       
                                          /**
                                          * Copy the given sequential
                                          * (non-distributed) vector
@@ -346,6 +374,16 @@ namespace PETScWrappers
         void reinit (const Vector &v,
                      const bool    fast = false);
 
+                                         /**
+                                         * Reinit as a ghosted vector. See
+                                         * constructor with same signature
+                                         * for more details.
+                                         */
+       void reinit (const MPI_Comm     &communicator,
+                     const IndexSet &  local,
+                     const IndexSet & ghost = IndexSet(0));
+
+       
                                          /**
                                           * Return a reference to the MPI
                                           * communicator object in use with
@@ -366,6 +404,21 @@ namespace PETScWrappers
         virtual void create_vector (const unsigned int n,
                                     const unsigned int local_size);
 
+
+
+                                        /**
+                                         * Create a vector of global length
+                                         * @p n, local size @p local_size and
+                                         * with the specified ghost
+                                         * indices. Note that you need to
+                                         * call update_ghost_values() before
+                                         * accessing those.
+                                         */
+       virtual void create_vector (const unsigned int  n,
+                                   const unsigned int  local_size,
+                                   const IndexSet & ghostnodes);
+       
+
       private:
                                          /**
                                           * Copy of the communicator object to
index 30a2036778a87b4ec5a82a28bb57deaeb1164321..919b2fccadeb3deca531caffa3a6cef6ad9fa30b 100644 (file)
@@ -25,6 +25,7 @@
 #  include <utility>
 
 #  include <petscvec.h>
+#  include <base/index_set.h>
 
 DEAL_II_NAMESPACE_OPEN
 
@@ -656,6 +657,14 @@ namespace PETScWrappers
       void ratio (const VectorBase &a,
                   const VectorBase &b);
 
+                                       /**
+                                       * Updates the ghost values of this
+                                       * vector. This is necessary after any
+                                       * modification before reading ghost
+                                       * values.
+                                       */
+      void update_ghost_values() const;
+
                                        /**
                                         * Print to a
                                         * stream. @p precision denotes
@@ -728,6 +737,23 @@ namespace PETScWrappers
                                         */
       Vec vector;
 
+                                      /**
+                                       * Denotes if this vector has ghost
+                                       * indices associated with it. This
+                                       * means that at least one of the
+                                       * processes in a parallel programm has
+                                       * at least one ghost index.
+                                       */
+      bool ghosted;
+
+                                       /**
+                                       * This vector contains the global
+                                       * indices of the ghost values. The
+                                       * location in this vector denotes the
+                                       * local numbering, which is used in
+                                       * PETSc.
+                                       */
+      IndexSet ghost_indices;
 
                                        /**
                                         * PETSc doesn't allow to mix additions
@@ -783,6 +809,8 @@ namespace PETScWrappers
                                 const unsigned int *indices,
                                 const PetscScalar  *values,
                                 const bool add_values);
+
+
   };
 
 
index 70f0a7ea4685a06fd5b04b95efbf25f5b88af024..442ac45bf70589d2e9ee4176f95aa6430aa8b447 100644 (file)
@@ -2,7 +2,7 @@
 //    $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 2008, 2009 by the deal.II authors
+//    Copyright (C) 2008, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
 
 #include <vector>
 
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#include <mpi.h>
+#include <base/index_set.h>
+#endif
+
 DEAL_II_NAMESPACE_OPEN
 
 class SparsityPattern;
@@ -190,6 +195,33 @@ namespace SparsityTools
                         std::vector<unsigned int> &new_indices,
                         const std::vector<unsigned int> &starting_indices = std::vector<unsigned int>());
 
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+                                  /**
+                                   * Communciate rows in a compressed
+                                   * sparsity pattern over MPI. The @param
+                                   * csp is modified inline. All entries in
+                                   * rows that belong to a different
+                                   * processor are send to them and added
+                                   * there. The ownership is determined by
+                                   * parameter @param rows_per_cpu. The
+                                   * IndexSet @param myrange should be the
+                                   * one used in the constructor of the
+                                   * CompressedSimpleSparsityPattern. All
+                                   * rows contained in @param myrange are
+                                   * checked in @param csp.  This function
+                                   * needs to be used with
+                                   * PETScWrappers::MPI::SparseMatrix for it
+                                   * to work correctly.
+                                   */
+  template <class CSP_t>
+  void distribute_sparsity_pattern(CSP_t & csp,
+                                  const std::vector<unsigned int> & rows_per_cpu,
+                                  const MPI_Comm & mpi_comm,
+                                  const IndexSet & myrange);
+#endif
+
+  
                                   /**
                                    * Exception
                                    */
index 9c438495254413ad0c907fb9ad379369e5bfc09f..14bcc926615cb7901f9571e65ecc23d14b1a2ee7 100644 (file)
@@ -1944,6 +1944,16 @@ template<>
 void
 ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
 {
+                                  //TODO: not implemented yet, we need to fix
+                                  //LocalRange() first to only include
+                                  //"owned" indices. For this we need to keep
+                                  //track of the owned indices, because
+                                  //Trilinos doesn't. Use same constructor
+                                  //interface as in PETSc with two IndexSets!
+  AssertThrow (vec.vector_partitioner().IsOneToOne(),
+              ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning."));
+
+  
   typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
   ConstraintLine index_comparison;
   index_comparison.line = vec.local_range().first;
@@ -2011,6 +2021,8 @@ ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const
                       it->entries[i].second);
       vec(it->line) = new_value;
     }
+
+  vec.compress ();
 }
 
 
@@ -2103,11 +2115,99 @@ ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const
          vec(it->line) = new_value;
        }
     }
+
+  vec.compress ();
+}
+
+#endif
+
+#ifdef DEAL_II_USE_PETSC
+
+                                  // this is a specialization for a
+                                  // parallel (non-block) PETSc
+                                  // vector. The basic idea is to just work
+                                  // on the local range of the vector. But
+                                  // we need access to values that the
+                                  // local nodes are constrained to.
+
+template<>
+void
+ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const
+{
+  typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
+  ConstraintLine index_comparison;
+  index_comparison.line = vec.local_range().first;
+  const constraint_iterator begin_my_constraints =
+    std::lower_bound (lines.begin(),lines.end(),index_comparison);
+
+  index_comparison.line = vec.local_range().second;
+  const constraint_iterator end_my_constraints
+    = std::lower_bound(lines.begin(),lines.end(),index_comparison);
+
+                                  // all indices we need to read from
+  IndexSet my_indices (vec.size());
+
+  const std::pair<unsigned int, unsigned int>
+    local_range = vec.local_range();
+
+  my_indices.add_range (local_range.first, local_range.second);
+
+  std::set<unsigned int> individual_indices;
+  for (constraint_iterator it = begin_my_constraints;
+       it != end_my_constraints; ++it)
+    for (unsigned int i=0; i<it->entries.size(); ++i)
+      if ((it->entries[i].first < local_range.first)
+         ||
+         (it->entries[i].first >= local_range.second))
+       individual_indices.insert (it->entries[i].first);
+
+  my_indices.add_indices (individual_indices.begin(),
+                         individual_indices.end());
+
+  IndexSet local_range_is (vec.size());
+  local_range_is.add_range(local_range.first, local_range.second);
+
+  
+                                  // create a vector and import those indices
+  PETScWrappers::MPI::Vector ghost_vec(vec.get_mpi_communicator(),
+                                      local_range_is,
+                                      my_indices);
+  ghost_vec = vec;
+  ghost_vec.update_ghost_values();
+
+                                  // finally do the distribution on own
+                                  // constraints
+  for (constraint_iterator it = begin_my_constraints;
+       it != end_my_constraints; ++it)
+    {
+                                      // fill entry in line
+                                      // next_constraint.line by adding the
+                                      // different contributions
+      double new_value = it->inhomogeneity;
+      for (unsigned int i=0; i<it->entries.size(); ++i)
+       new_value += (ghost_vec(it->entries[i].first) *
+                      it->entries[i].second);
+      vec(it->line) = new_value;
+    }
+
+                                  // force every processor to write something
+  vec(local_range.first) = vec(local_range.first);
+
+  vec.compress ();
+}
+
+
+template<>
+void
+ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const
+{
+  AssertThrow (false, ExcNotImplemented());
 }
 
 #endif
 
 
+
 unsigned int ConstraintMatrix::n_constraints () const
 {
   return lines.size();
index 02328ba8536dcfd074dcfcd9f3672e322e0a2087..ac4163e11d7b62e0ee334cae1607e2d68c9c471d 100644 (file)
@@ -2,7 +2,7 @@
 //    $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 2004, 2005, 2006, 2008, 2009 by the deal.II authors
+//    Copyright (C) 2004, 2005, 2006, 2008, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -302,6 +302,13 @@ namespace PETScWrappers
         }
       const unsigned int
         local_row_end = local_row_start + local_rows_per_process[this_process];
+
+#if DEAL_II_PETSC_VERSION_LT(2,3,3)
+                                      //old version to create the matrix, we
+                                      //can skip calculating the row length
+                                      //at least starting from 2.3.3 (tested,
+                                      //see below)
+
       const unsigned int
         local_col_end = local_col_start + local_columns_per_process[this_process];
 
@@ -345,6 +352,27 @@ namespace PETScWrappers
                           &matrix);
       AssertThrow (ierr == 0, ExcPETScError(ierr));
 
+#else //PETSC_VERSION>=2.3.3
+                                      // new version to create the matrix. We
+                                      // do not set row length but set the
+                                      // correct SparsityPattern later.
+      int ierr;
+      
+      ierr = MatCreate(communicator,&matrix);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+      
+      ierr = MatSetSizes(matrix,
+                        local_rows_per_process[this_process],
+                        local_columns_per_process[this_process],
+                        sparsity_pattern.n_rows(),
+                        sparsity_pattern.n_cols());
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+      
+      ierr = MatSetType(matrix,MATMPIAIJ);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+      
+      
                                        // next preset the exact given matrix
                                        // entries with zeros, if the user
                                        // requested so. this doesn't avoid any
@@ -422,13 +450,24 @@ namespace PETScWrappers
                                            // now copy over the information
                                            // from the sparsity pattern.
           {
-            unsigned int index=0;
+#ifdef PETSC_USE_64BIT_INDICES
+           PetscInt
+#else
+           int
+#endif
+             * ptr = & colnums_in_window[0];
+             
             for (unsigned int i=local_row_start; i<local_row_end; ++i)
-              for (unsigned int j=0; j<sparsity_pattern.row_length(i);
-                   ++j, ++index)
-              colnums_in_window[index] = sparsity_pattern.column_number(i,j);
-            Assert (index == colnums_in_window.size()-1, ExcInternalError());
+             {
+               typename SparsityType::row_iterator
+                 row_start = sparsity_pattern.row_begin(i),
+                 row_end = sparsity_pattern.row_end(i);
+
+               std::copy(row_start, row_end, ptr);
+               ptr += row_end - row_start;
           }
+         }
+
 
                                            // then call the petsc function
                                            // that summarily allocates these
@@ -438,6 +477,10 @@ namespace PETScWrappers
                                         &colnums_in_window[0],
                                         0);
 
+#if DEAL_II_PETSC_VERSION_LT(2,3,3)
+                                          // this is only needed for old
+                                          // PETSc versions:
+         
                                            // for some reason, it does not
                                            // seem to be possible to force
                                            // actual allocation of actual
@@ -479,23 +522,34 @@ namespace PETScWrappers
                                            // set the dummy entries set above
                                            // back to zero
           *this = 0;
+#endif // version <=2.3.3
           compress ();
 
 #endif
 
-                                          // Now we won't insert any
-                                          // further entries, so PETSc can
-                                          // internally optimize some data
-                                          // structures.
+                                          // Tell PETSc that we are not
+                                          // planning on adding new entries
+                                          // to the matrix. Generate errors
+                                          // in debugmode.
 #if DEAL_II_PETSC_VERSION_LT(3,0,0)
-          const int ierr =
-           MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
+          int ierr;
+#ifdef DEBUG
+         ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR);
+         AssertThrow (ierr == 0, ExcPETScError(ierr));
 #else
-          const int ierr =
-           MatSetOption (matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
+         ierr = MatSetOption (matrix, MAT_NO_NEW_NONZERO_LOCATIONS);
+         AssertThrow (ierr == 0, ExcPETScError(ierr));
 #endif
-
+#else
+          int ierr;
+#ifdef DEBUG
+         ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
          AssertThrow (ierr == 0, ExcPETScError(ierr));
+#else
+         ierr = MatSetOption (matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
+         AssertThrow (ierr == 0, ExcPETScError(ierr));
+#endif
+#endif
         }
     }
 
index 53506ad0315a7d7c4b0d7370cb4f819bc6873df1..78701fc1c37f1d77b79ef944771946f65faae334 100644 (file)
@@ -2,7 +2,7 @@
 //    $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 2004, 2006, 2008, 2009 by the deal.II authors
+//    Copyright (C) 2004, 2006, 2008, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -65,6 +65,28 @@ namespace PETScWrappers
 
   
 
+    Vector::Vector (const MPI_Comm     &communicator,
+                   const IndexSet &  local,
+                   const IndexSet & ghost)
+                    :
+                    communicator (communicator)
+    {
+      Assert(local.is_contiguous(), ExcNotImplemented());
+      
+      IndexSet ghost_set = ghost;
+      ghost_set.subtract_set(local);
+      
+                                      //possible optmization: figure out if
+                                      //there are ghost indices (collective
+                                      //operation!) and then create a
+                                      //non-ghosted vector.
+//      Vector::create_vector (local.size(), local.n_elements());
+      
+      Vector::create_vector(local.size(), local.n_elements(), ghost_set);    
+    }
+    
+
+
     void
     Vector::reinit (const MPI_Comm    &comm,
                     const unsigned int n,
@@ -116,6 +138,23 @@ namespace PETScWrappers
 
 
 
+    void
+    Vector::reinit (const MPI_Comm     &comm,
+                   const IndexSet &  local,
+                   const IndexSet & ghost)
+    {
+      communicator = comm;
+
+      Assert(local.is_contiguous(), ExcNotImplemented());
+      
+      IndexSet ghost_set = ghost;
+      ghost_set.subtract_set(local);
+
+      create_vector(local.size(), local.n_elements(), ghost_set); 
+    }
+
+       
+
     Vector &
     Vector::operator = (const PETScWrappers::Vector &v)
     {
@@ -169,8 +208,75 @@ namespace PETScWrappers
              ExcDimensionMismatch (size(), n));
     }
 
+
+    
+    void
+    Vector::create_vector (const unsigned int  n,
+                           const unsigned int  local_size,
+                          const IndexSet & ghostnodes)
+    {
+      Assert (local_size <= n, ExcIndexRange (local_size, 0, n));
+      ghosted = true;
+      ghost_indices = ghostnodes;
+      
+                                      //64bit indices won't work yet:
+      Assert (sizeof(unsigned int)==sizeof(PetscInt), ExcInternalError());
+
+      
+      std::vector<unsigned int> ghostindices;
+      ghostnodes.fill_index_vector(ghostindices);
+      
+      const PetscInt * ptr= (const PetscInt*)(&(ghostindices[0]));
+
+      int ierr
+       = VecCreateGhost(communicator,
+                        local_size,
+                        PETSC_DETERMINE,
+                        ghostindices.size(),
+                        ptr,
+                        &vector);
+      
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      Assert (size() == n,
+             ExcDimensionMismatch (size(), n));
+
+#if DEBUG
+                                      // test ghost allocation in debug mode
+
+#ifdef PETSC_USE_64BIT_INDICES
+      PetscInt
+#else
+       int
+#endif
+       begin, end;
+
+      ierr = VecGetOwnershipRange (vector, &begin, &end);
+
+      Assert(local_size==(unsigned int)(end-begin), ExcInternalError());
+
+      Vec l;
+      ierr = VecGhostGetLocalForm(vector, &l);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+      PetscInt lsize;
+      ierr = VecGetSize(l, &lsize);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+             
+      ierr = VecGhostRestoreLocalForm(vector, &l);
+      AssertThrow (ierr == 0, ExcPETScError(ierr));
+      
+      Assert( lsize==end-begin+(PetscInt)ghost_indices.n_elements() ,ExcInternalError());
+
+#endif
+
+      
   }
 
+    
+
+}
+
 }
 
 DEAL_II_NAMESPACE_CLOSE
index ff60b8fa12baae42a7d62974a3e814381c5074ed..53c1df3e70c547b7838ea5b0d2309003f251b00f 100644 (file)
@@ -2,7 +2,7 @@
 //    $Id$
 //    Version: $Name$
 //
-//    Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+//    Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -31,63 +31,139 @@ namespace PETScWrappers
       Assert (index < vector.size(),
               ExcIndexRange (index, 0, vector.size()));
 
-                                       // this is clumsy: there is no simple
-                                       // way in PETSc to read an element from
-                                       // a vector, i.e. there is no function
-                                       // VecGetValue or so. The only way is
-                                       // to obtain a pointer to a contiguous
+                                      // old versions of PETSc appear to be
+                                       // missing the function VecGetValues(),
+                                       // so the workaround consists of
+                                       // obtaining a pointer to a contiguous
                                        // representation of the vector and
-                                       // read from it. Subsequently, the
-                                       // vector representation has to be
-                                       // restored. In addition, we can only
-                                       // get access to the local part of the
-                                       // vector, so we have to guard against
-                                       // that
+                                       // read from it.  In addition, we can
+                                       // only get access to the local part of
+                                       // the vector, so we have to guard
+                                       // against that
       if (dynamic_cast<const PETScWrappers::Vector *>(&vector) != 0)
         {
+ #if (PETSC_VERSION_MAJOR <= 2) && (PETSC_VERSION_MINOR < 3)
           PetscScalar *ptr;
           int ierr
-            = VecGetArray (static_cast<const Vec &>(vector), &ptr);
+            = VecGetArray (vector.vector, &ptr);
           AssertThrow (ierr == 0, ExcPETScError(ierr));
 
           const PetscScalar value = *(ptr+index);
 
-          ierr = VecRestoreArray (static_cast<const Vec &>(vector), &ptr);
+          ierr = VecRestoreArray (vector.vector, &ptr);
           AssertThrow (ierr == 0, ExcPETScError(ierr));
-
           return value;
+#else
+         PetscInt idx = index;
+         PetscScalar value;
+         int ierr = VecGetValues(vector.vector, 1, &idx, &value);
+         AssertThrow (ierr == 0, ExcPETScError(ierr));
+         return value;
+#endif
         }
       else if (dynamic_cast<const PETScWrappers::MPI::Vector *>(&vector) != 0)
         {
+         int ierr;
+
+         if (vector.ghosted)
+           {
+#ifdef PETSC_USE_64BIT_INDICES
+             PetscInt
+#else
+               int
+#endif
+               begin, end;
+             ierr = VecGetOwnershipRange (vector.vector, &begin, &end);
+             AssertThrow (ierr == 0, ExcPETScError(ierr)); 
+
+             Vec l;
+             ierr = VecGhostGetLocalForm(vector.vector, &l);
+             AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+             PetscInt lsize;
+             ierr = VecGetSize(l, &lsize);
+             AssertThrow (ierr == 0, ExcPETScError(ierr));
+             PetscScalar *ptr;
+             ierr = VecGetArray(l, &ptr);
+             AssertThrow (ierr == 0, ExcPETScError(ierr));
+             PetscScalar value;
+
+             if ( index>=static_cast<unsigned int>(begin)
+                 && index<static_cast<unsigned int>(end) )
+               {
+                                                  //local entry
+                 value=*(ptr+index-begin);
+               }
+             else
+               {
+                                                  //ghost entry
+                 unsigned ghostidx
+                   = vector.ghost_indices.index_within_set(index);
+                 
+                 Assert(ghostidx+end-begin<(unsigned int)lsize, ExcInternalError());
+                 value=*(ptr+ghostidx+end-begin);
+                 
+                 
+               }
+             
+             
+             ierr = VecRestoreArray(l, &ptr);
+             AssertThrow (ierr == 0, ExcPETScError(ierr));
+             
+             ierr = VecGhostRestoreLocalForm(vector.vector, &l);
+             AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+             return value;
+           }
+
+
                                            // first verify that the requested
                                            // element is actually locally
                                            // available
-          int ierr;
+          
 #ifdef PETSC_USE_64BIT_INDICES
          PetscInt
 #else
          int
 #endif
            begin, end;
-          ierr = VecGetOwnershipRange (static_cast<const Vec &>(vector),
-                                       &begin, &end);
+          ierr = VecGetOwnershipRange (vector.vector, &begin, &end);
           AssertThrow (ierr == 0, ExcPETScError(ierr));
 
+
+         
           AssertThrow ((index >= static_cast<unsigned int>(begin)) &&
                        (index < static_cast<unsigned int>(end)),
                        ExcAccessToNonlocalElement (index, begin, end-1));
 
+                                          // old version which only work with
+                                          // VecGetArray()...
+#if (PETSC_VERSION_MAJOR <= 2) && (PETSC_VERSION_MINOR < 3)
+         
                                            // then access it
           PetscScalar *ptr;
-          ierr = VecGetArray (static_cast<const Vec &>(vector), &ptr);
+          ierr = VecGetArray (vector.vector, &ptr);
           AssertThrow (ierr == 0, ExcPETScError(ierr));
 
           const PetscScalar value = *(ptr+index-begin);
 
-          ierr = VecRestoreArray (static_cast<const Vec &>(vector), &ptr);
+          ierr = VecRestoreArray (vector.vector, &ptr);
           AssertThrow (ierr == 0, ExcPETScError(ierr));
 
           return value;
+         
+#else
+                                          //new version with VecGetValues()
+         PetscInt idx = index;
+         PetscScalar value;
+         ierr = VecGetValues(vector.vector, 1, &idx, &value);
+         AssertThrow (ierr == 0, ExcPETScError(ierr));
+         
+         return value;
+#endif
+         
         }
       else
                                          // what? what other kind of vector
@@ -99,6 +175,7 @@ namespace PETScWrappers
 
   VectorBase::VectorBase ()
                   :
+                 ghosted(false),
                   last_action (LastAction::none)
   {}
 
@@ -107,6 +184,8 @@ namespace PETScWrappers
   VectorBase::VectorBase (const VectorBase &v)
                   :
                  Subscriptor (),
+                 ghosted(v.ghosted),
+                 ghost_indices(v.ghost_indices),
                   last_action (LastAction::none)
   {
     int ierr = VecDuplicate (v.vector, &vector);
@@ -1041,6 +1120,25 @@ namespace PETScWrappers
     last_action = LastAction::insert;
   }
 
+
+  void 
+  VectorBase::update_ghost_values() const
+  {
+                                    // generate an error for not ghosted
+                                    // vectors
+    if (!ghosted)
+       throw ExcInternalError();
+
+    int ierr;
+
+    ierr = VecGhostUpdateBegin(vector, INSERT_VALUES, SCATTER_FORWARD);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+    ierr = VecGhostUpdateEnd(vector, INSERT_VALUES, SCATTER_FORWARD);
+    AssertThrow (ierr == 0, ExcPETScError(ierr));
+}
+
+
+  
 }
 
 DEAL_II_NAMESPACE_CLOSE
index d4e0e1aca25c152351c53c2ff00c7d9c3e27cb8c..7dc9f41f2656f20423c6f9b1fafe11082a7a421f 100644 (file)
 
 #include <algorithm>
 
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#include <base/utilities.h>
+#include <lac/compressed_sparsity_pattern.h>
+#include <lac/compressed_set_sparsity_pattern.h>
+#include <lac/compressed_simple_sparsity_pattern.h>
+#endif
+
 #ifdef DEAL_II_USE_METIS
 // This is sorta stupid. what we really would like to do here is this:
 //   extern "C" {
@@ -406,6 +413,149 @@ namespace SparsityTools
            (next_free_number == sparsity.n_rows()),
            ExcInternalError());
   }
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+  template <class CSP_t>
+  void distribute_sparsity_pattern(CSP_t & csp,
+                                  const std::vector<unsigned int> & rows_per_cpu,
+                                  const MPI_Comm & mpi_comm,
+                                  const IndexSet & myrange)
+  {
+    unsigned int myid = Utilities::System::get_this_mpi_process(mpi_comm);
+    std::vector<unsigned int> start_index(rows_per_cpu.size()+1);
+    start_index[0]=0;
+    for (unsigned int i=0;i<rows_per_cpu.size();++i)
+      start_index[i+1]=start_index[i]+rows_per_cpu[i];
+
+    typedef std::map<unsigned int, std::vector<unsigned int> > map_vec_t;
+
+    map_vec_t send_data;
+
+    {
+      unsigned int dest_cpu=0;
+
+      unsigned int n_local_rel_rows = myrange.n_elements();
+      for (unsigned int row_idx=0;row_idx<n_local_rel_rows;++row_idx)
+       {
+         unsigned int row=myrange.nth_index_in_set(row_idx);
+
+                                          //calculate destination CPU
+         while (row>=start_index[dest_cpu+1])
+           ++dest_cpu;
+
+                                          //skip myself
+         if (dest_cpu==myid)
+           {
+             row_idx+=rows_per_cpu[myid]-1;
+             continue;
 }
 
+         unsigned int rlen = csp.row_length(row);
+
+                                          //skip empty lines
+         if (!rlen)
+           continue;
+
+                                          //save entries
+         std::vector<unsigned int> & dst = send_data[dest_cpu];
+
+         dst.push_back(rlen); // number of entries
+         dst.push_back(row); // row index
+         for (unsigned int c=0; c<rlen; ++c)
+           {
+                                              //columns
+             unsigned int column = csp.column_number(row, c);
+             dst.push_back(column);
+           }
+       }
+
+    }
+
+    unsigned int num_receive=0;
+    {
+      std::vector<unsigned int> send_to;
+      send_to.reserve(send_data.size());
+      for (map_vec_t::iterator it=send_data.begin();it!=send_data.end();++it)
+         send_to.push_back(it->first);
+
+      num_receive =
+       Utilities::System::
+       compute_point_to_point_communication_pattern(mpi_comm, send_to).size();
+    }
+
+    std::vector<MPI_Request> requests(send_data.size());
+
+
+                                    // send data
+    {
+      unsigned int idx=0;
+      for (map_vec_t::iterator it=send_data.begin();it!=send_data.end();++it, ++idx)
+       MPI_Isend(&(it->second[0]),
+                 it->second.size(),
+                 MPI_INT,
+                 it->first,
+                 124,
+                 mpi_comm,
+                 &requests[idx]);
+    }
+
+    {
+                                      //receive
+      std::vector<unsigned int> recv_buf;
+      for (unsigned int index=0;index<num_receive;++index)
+       {
+         MPI_Status status;
+         int len;
+         MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, mpi_comm, &status);
+         Assert (status.MPI_TAG==124, ExcInternalError());
+
+         MPI_Get_count(&status, MPI_BYTE, &len);
+         Assert( len%sizeof(unsigned int)==0, ExcInternalError());
+
+         recv_buf.resize(len/sizeof(unsigned int));
+
+         MPI_Recv(&recv_buf[0], len, MPI_BYTE, status.MPI_SOURCE,
+                  status.MPI_TAG, mpi_comm, &status);
+
+         unsigned int *ptr=&recv_buf[0];
+         unsigned int *end=&*(--recv_buf.end());
+         while (ptr<end)
+           {
+             unsigned int num=*(ptr++);
+             unsigned int row=*(ptr++);
+             for (unsigned int c=0;c<num;++c)
+               {
+                 csp.add(row, *ptr);
+                 ptr++;
+               }
+           }
+         Assert(ptr-1==end, ExcInternalError());
+
+       }
+    }
+
+                                    // complete all sends, so that we can
+                                    // safely destroy the buffers.
+    MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
+
+  }
+#endif
+}
+
+
+//explicit instantiations
+
+#define SPARSITY_FUNCTIONS(SparsityType) \
+template void SparsityTools::distribute_sparsity_pattern<SparsityType> (SparsityType & csp, \
+const std::vector<unsigned int> & rows_per_cpu,\
+const MPI_Comm & mpi_comm,\
+const IndexSet & myrange)
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+SPARSITY_FUNCTIONS(CompressedSparsityPattern);
+SPARSITY_FUNCTIONS(CompressedSimpleSparsityPattern);
+#endif
+
+#undef SPARSITY_FUNCTIONS
+
 DEAL_II_NAMESPACE_CLOSE

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.