]> https://gitweb.dealii.org/ - dealii-svn.git/commitdiff
Add distributed vector class. Reorganize MPI utilities in deal.II/base/mpi.h. Improve...
authorkormann <kormann@0785d39b-7218-0410-832d-ea1e28bc413d>
Wed, 25 Jan 2012 12:35:17 +0000 (12:35 +0000)
committerkormann <kormann@0785d39b-7218-0410-832d-ea1e28bc413d>
Wed, 25 Jan 2012 12:35:17 +0000 (12:35 +0000)
git-svn-id: https://svn.dealii.org/trunk@24924 0785d39b-7218-0410-832d-ea1e28bc413d

96 files changed:
deal.II/aclocal.m4
deal.II/common/dealiitemplates.pm.in
deal.II/common/template-arguments.in
deal.II/configure
deal.II/configure.in
deal.II/doc/news/changes.h
deal.II/examples/step-45/doc/builds-on
deal.II/examples/step-45/step-45.cc
deal.II/include/deal.II/base/config.h.in
deal.II/include/deal.II/base/mpi.h [new file with mode: 0644]
deal.II/include/deal.II/base/partitioner.h [new file with mode: 0644]
deal.II/include/deal.II/base/tensor.h
deal.II/include/deal.II/base/tensor_base.h
deal.II/include/deal.II/base/types.h
deal.II/include/deal.II/base/utilities.h
deal.II/include/deal.II/fe/fe_values.h
deal.II/include/deal.II/lac/parallel_block_vector.h [new file with mode: 0644]
deal.II/include/deal.II/lac/parallel_vector.h [new file with mode: 0644]
deal.II/include/deal.II/lac/parallel_vector.templates.h [new file with mode: 0644]
deal.II/include/deal.II/lac/trilinos_precondition.h
deal.II/include/deal.II/lac/trilinos_sparse_matrix.h
deal.II/include/deal.II/lac/vector.h
deal.II/include/deal.II/multigrid/mg_transfer.templates.h
deal.II/include/deal.II/numerics/vectors.templates.h
deal.II/source/base/mpi.cc [new file with mode: 0644]
deal.II/source/base/partitioner.cc [new file with mode: 0644]
deal.II/source/base/utilities.cc
deal.II/source/distributed/solution_transfer.cc
deal.II/source/distributed/solution_transfer.inst.in
deal.II/source/dofs/dof_accessor.cc
deal.II/source/fe/fe_values.cc
deal.II/source/fe/mapping_q1.cc
deal.II/source/grid/grid_refinement.cc
deal.II/source/lac/constraint_matrix.cc
deal.II/source/lac/operator.cc
deal.II/source/lac/parallel_vector.cc [new file with mode: 0644]
deal.II/source/lac/parallel_vector.inst.in [new file with mode: 0644]
deal.II/source/lac/vector_memory.cc
deal.II/source/multigrid/mg_transfer_prebuilt.cc
deal.II/source/numerics/data_out.cc
deal.II/source/numerics/fe_field_function.cc
deal.II/source/numerics/mesh_worker_vector_selector.cc
deal.II/source/numerics/operator.cc
deal.II/source/numerics/point_value_history.cc
tests/mpi/parallel_partitioner_01.cc [new file with mode: 0644]
tests/mpi/parallel_partitioner_01/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_partitioner_01/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_partitioner_02.cc [new file with mode: 0644]
tests/mpi/parallel_partitioner_02/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_partitioner_02/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_partitioner_03.cc [new file with mode: 0644]
tests/mpi/parallel_partitioner_03/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_partitioner_03/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_partitioner_04.cc [new file with mode: 0644]
tests/mpi/parallel_partitioner_04/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_partitioner_04/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_01.cc [new file with mode: 0644]
tests/mpi/parallel_vector_01/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_01/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_02.cc [new file with mode: 0644]
tests/mpi/parallel_vector_02/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_02/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_03.cc [new file with mode: 0644]
tests/mpi/parallel_vector_03/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_03/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_04.cc [new file with mode: 0644]
tests/mpi/parallel_vector_04/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_04/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_05.cc [new file with mode: 0644]
tests/mpi/parallel_vector_05/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_05/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_06.cc [new file with mode: 0644]
tests/mpi/parallel_vector_06/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_06/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_07.cc [new file with mode: 0644]
tests/mpi/parallel_vector_07/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_07/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_08.cc [new file with mode: 0644]
tests/mpi/parallel_vector_08/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_08/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_09.cc [new file with mode: 0644]
tests/mpi/parallel_vector_09/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_09/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_10.cc [new file with mode: 0644]
tests/mpi/parallel_vector_10/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_10/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_11.cc [new file with mode: 0644]
tests/mpi/parallel_vector_11/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/parallel_vector_11/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/trilinos_ghost_01.cc
tests/mpi/trilinos_matvec_01.cc [new file with mode: 0644]
tests/mpi/trilinos_matvec_01/ncpu_1/cmp/generic [new file with mode: 0644]
tests/mpi/trilinos_matvec_01/ncpu_2/cmp/generic [new file with mode: 0644]
tests/mpi/trilinos_matvec_02.cc [new file with mode: 0644]
tests/mpi/trilinos_matvec_02/ncpu_1/cmp/generic [new file with mode: 0644]
tests/mpi/trilinos_matvec_02/ncpu_2/cmp/generic [new file with mode: 0644]

index f5a45c65b0657839a680606b6b9db6b4a9662b5a..1772eb23857cfe1411e8211035b8a5b44c7ae30d 100644 (file)
@@ -4922,6 +4922,106 @@ AC_DEFUN(DEAL_II_CHECK_ADVANCE_WARNING, dnl
 
 
 
+dnl -------------------------------------------------------------
+dnl Check whether the compiler allows to use arithmetic operations
+dnl +-*/ on vectorized data types or whether we need to use
+dnl _mm_add_pd for addition and so on. +-*/ is preferred because
+dnl it allows the compiler to choose other optimizations like
+dnl fused multiply add, whereas _mm_add_pd explicitly enforces the
+dnl assembler command.
+dnl
+dnl Usage: DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
+dnl
+dnl -------------------------------------------------------------
+AC_DEFUN(DEAL_II_CHECK_VECTOR_ARITHMETICS, dnl
+
+[
+  AC_MSG_CHECKING(whether compiler supports vector arithmetics)
+  AC_LANG(C++)
+  CXXFLAGS="$CXXFLAGSG"
+  AC_TRY_COMPILE(
+    [
+#include <emmintrin.h>
+    ],
+    [
+        __m128d a, b;
+        a = _mm_set_sd (1.0);
+       b = _mm_set1_pd (2.1);
+       __m128d c = a + b;
+       __m128d d = b - c;
+       __m128d e = c * a + d;
+       __m128d f = e/a;
+       (void)f;
+    ],
+    [
+       AC_MSG_RESULT(yes)
+        AC_DEFINE(DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS, 1,
+                  [Defined if the compiler can use arithmetic operations on
+                 vectorized data types])
+    ],
+    [
+        AC_MSG_RESULT(no)
+    ])
+])
+
+
+
+dnl -------------------------------------------------------------
+dnl Check for existence of a strong inline function. This can be used
+dnl to force a compiler to inline some functions also at low optimization
+dnl levels. We use it in vectorized data types, where we want inlining
+dnl also for debug code. If we cannot find a good inlining routine, we
+dnl just use 'inline'.
+dnl
+dnl Usage: DEAL_II_ALWAYS_INLINE
+dnl
+dnl -------------------------------------------------------------
+AC_DEFUN(DEAL_II_CHECK_ALWAYS_INLINE, dnl
+
+[
+  if test "$GXX" = "yes" ; then
+     dnl force inline for gcc compiler
+     TEMP_ALWAYS_INLINE='__inline __attribute__((__always_inline__))'
+  else
+    case "$GXX_VERSION" in
+      clang*)
+        dnl force inline for clang compiler
+       TEMP_ALWAYS_INLINE='__inline __attribute__((__always_inline__))'
+       ;;
+
+      *)
+       dnl for all other compilers, try with __forceinline
+       TEMP_ALWAYS_INLINE=__forceinline
+       ;;
+    esac
+  fi
+  AC_MSG_CHECKING(for forced inlining)
+  AC_LANG(C++)
+  CXXFLAGS="$CXXFLAGSG"
+  AC_TRY_COMPILE(
+    [
+        $TEMP_ALWAYS_INLINE
+       void f() {};
+    ],
+    [
+       f();
+    ],
+    [
+       AC_MSG_RESULT(yes)
+        AC_DEFINE_UNQUOTED(DEAL_II_ALWAYS_INLINE, $TEMP_ALWAYS_INLINE,
+                  [Forces the compiler to always inline functions, also in
+                  debug mode])
+    ],
+    [
+        AC_MSG_RESULT(no)
+       AC_DEFINE(DEAL_II_ALWAYS_INLINE, inline,
+                  [Forces the compiler to always inline functions, also in
+                  debug mode])
+    ])
+])
+
+
+
 dnl -------------------------------------------------------------
 dnl
 dnl Usage: DEAL_II_CHECK_MIN_VECTOR_CAPACITY
index a733b3c448053ec2538f209964d0910b15785e63..80b7a15838065810e33f9cb81c63cab5398eb66b 100644 (file)
@@ -12,7 +12,7 @@ BEGIN {
     our @EXPORT = qw(&multisubst
                      @dimensions @codim1_dimensions @dofhandlers @dofhandler_templates
                      @real_scalars @complex_scalars @all_scalars
-                     @deal_vector_templates @deal_vectors @deal_real_vectors @deal_complex_vectors
+                     @deal_vector_templates @deal_vectors @deal_real_vectors @deal_complex_vectors @deal_parallel_vectors
                      @trilinos_vectors @petsc_vectors
                      @sequential_vectors @parallel_vectors
                      @sparsity_patterns);
@@ -119,8 +119,9 @@ our @deal_vectors = (@deal_real_vectors, @deal_complex_vectors);
 ### Array of all sequential vectors, which includes deal.II vectors.
 our @sequential_vectors = @deal_real_vectors;
 
-### Array of all parallel vectors. None of those yet.
-our @parallel_vectors;
+### Array of all parallel vectors.
+our @deal_parallel_vectors = map { $t = $_ ; map { "parallel::distributed::$_<$t >" } @deal_vector_templates } @real_scalars;
+our @parallel_vectors = (@deal_parallel_vectors);
 
 ######################################################################
 # Configured arrays start here.
index ac5ffd56818bd58965af07eab537f90b929c7560..2e10b8c7bee4d488a21d8af72ee09f7a69d8e0b6 100644 (file)
@@ -17,6 +17,14 @@ SERIAL_VECTORS := { Vector<double>;
                     BlockVector<float>;
                     BlockVector<long double>;
 
+                   parallel::distributed::Vector<double>;
+                   parallel::distributed::Vector<float> ;
+                   parallel::distributed::Vector<long double>;
+
+                    parallel::distributed::BlockVector<double>;
+                    parallel::distributed::BlockVector<float> ;
+                    parallel::distributed::BlockVector<long double>;
+
                    @DEAL_II_EXPAND_TRILINOS_VECTOR@;
                    @DEAL_II_EXPAND_TRILINOS_MPI_VECTOR@;
                    @DEAL_II_EXPAND_PETSC_VECTOR@;
index e36ff27349c0331cd75d53b050d91a3b09eafbd8..83c4903981767b3908353edf27a4d98228019ea7 100755 (executable)
@@ -1,5 +1,5 @@
 #! /bin/sh
-# From configure.in Revision: 24724 .
+# From configure.in Revision: 24746 .
 # Guess values for system-dependent variables and create Makefiles.
 # Generated by GNU Autoconf 2.68 for deal.II 7.2.pre.
 #
@@ -8383,6 +8383,116 @@ rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
 
 
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether compiler supports vector arithmetics" >&5
+$as_echo_n "checking whether compiler supports vector arithmetics... " >&6; }
+  ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+  CXXFLAGS="$CXXFLAGSG"
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+#include <emmintrin.h>
+
+int
+main ()
+{
+
+        __m128d a, b;
+        a = _mm_set_sd (1.0);
+       b = _mm_set1_pd (2.1);
+       __m128d c = a + b;
+       __m128d d = b - c;
+       __m128d e = c * a + d;
+       __m128d f = e/a;
+       (void)f;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+       { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+$as_echo "#define DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS 1" >>confdefs.h
+
+
+else
+
+        { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
+
+  if test "$GXX" = "yes" ; then
+          TEMP_ALWAYS_INLINE='__inline __attribute__((__always_inline__))'
+  else
+    case "$GXX_VERSION" in
+      clang*)
+               TEMP_ALWAYS_INLINE='__inline __attribute__((__always_inline__))'
+       ;;
+
+      *)
+               TEMP_ALWAYS_INLINE=__forceinline
+       ;;
+    esac
+  fi
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for forced inlining" >&5
+$as_echo_n "checking for forced inlining... " >&6; }
+  ac_ext=cpp
+ac_cpp='$CXXCPP $CPPFLAGS'
+ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+
+  CXXFLAGS="$CXXFLAGSG"
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+        $TEMP_ALWAYS_INLINE
+       void f() {};
+
+int
+main ()
+{
+
+       f();
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_cxx_try_compile "$LINENO"; then :
+
+       { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+
+cat >>confdefs.h <<_ACEOF
+#define DEAL_II_ALWAYS_INLINE $TEMP_ALWAYS_INLINE
+_ACEOF
+
+
+else
+
+        { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+
+$as_echo "#define DEAL_II_ALWAYS_INLINE inline" >>confdefs.h
+
+
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+
   ac_ext=cpp
 ac_cpp='$CXXCPP $CPPFLAGS'
 ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
index af6240b628524425569ca496b7945c18b69d89de..e3ae8efc96de131b481f5b6c49ada3b5d9444d36 100644 (file)
@@ -245,6 +245,8 @@ DEAL_II_HAVE_BUILTIN_EXPECT
 DEAL_II_HAVE_VERBOSE_TERMINATE
 DEAL_II_HAVE_GLIBC_STACKTRACE
 DEAL_II_HAVE_DEMANGLER
+DEAL_II_CHECK_VECTOR_ARITHMETICS
+DEAL_II_CHECK_ALWAYS_INLINE
 DEAL_II_CHECK_MIN_VECTOR_CAPACITY
 DEAL_II_CHECK_ABORT
 DEAL_II_CHECK_GETRUSAGE
index 709653abf4052639dfccd56e6b8371a6fa1d7db8..f8e55547bb3f630b07ebf881b71ae3acb0bb442d 100644 (file)
@@ -34,6 +34,14 @@ inconvenience this causes.
 <h3>General</h3>
 
 <ol>
+<li> New: There is now a distributed deal.II vector class
+parallel::distributed::Vector<Number> that can be used with MPI. The
+vector is based on a contiguous locally owned range and allows easy
+access of ghost entries from other processors. The vector interface is
+very similar to the non-distributed class Vector<Number>.
+<br>
+(Katharina Kormann, Martin Kronbichler, 2012/01/25)
+
 <li> Fixed: The Intel compiler complains that it can't copy Trilinos vector
 reference objects, preventing the compiling of step-32. This is now fixed.
 <br>
index 5965545848a4c181d2f6e7aa6be42543d310903a..17402734c787ca2681ca4585d5db3399ad0e9c49 100644 (file)
@@ -1 +1 @@
-step-4 step-6
+step-6
index 56494a2423c1d10e084cfe60461fb71e8bb7accf..da24f888722e6883185596d6f8dbd7f432567d0a 100644 (file)
@@ -433,8 +433,7 @@ namespace Step45
                                   // To solve the linear system of equations
                                   // $Au=b$ we use the CG solver with an
                                   // SSOR-preconditioner. This is, again,
-                                  // copied almost verbatim from step-4, with
-                                  // the exception of the preconditioner. As in
+                                  // copied almost verbatim from step-6. As in
                                   // step-6, we need to make sure that
                                   // constrained degrees of freedom get their
                                   // correct values after solving by calling
index 72a9cda704eace1206480e63dae371a0e354368e..a7c85a5704d94a59b384b1cd647997f069d8af7e 100644 (file)
@@ -46,6 +46,9 @@
    */
 #undef DEAL_II_ABORT_NOTHROW_EXCEPTION
 
+/* Forces the compiler to always inline functions, also in debug mode */
+#undef DEAL_II_ALWAYS_INLINE
+
 /* Flag indicating whether there is a bug in the compiler that leads to bogus
    warnings for inline class members in anonymous namespaces */
 #undef DEAL_II_ANON_NAMESPACE_BOGUS_WARNING
 /* Defined if the compiler supports including <mpi.h> */
 #undef DEAL_II_COMPILER_SUPPORTS_MPI
 
+/* Defined if the compiler can use arithmetic operations on vectorized data
+   types */
+#undef DEAL_II_COMPILER_USE_VECTOR_ARITHMETICS
+
 /* Defined if the compiler has a bug in deducing the type of pointers to const
    member functions. */
 #undef DEAL_II_CONST_MEMBER_DEDUCTION_BUG
diff --git a/deal.II/include/deal.II/base/mpi.h b/deal.II/include/deal.II/base/mpi.h
new file mode 100644 (file)
index 0000000..fbb42e4
--- /dev/null
@@ -0,0 +1,421 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011, 2012 by deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__mpi_h
+#define __deal2__mpi_h
+
+#include <deal.II/base/config.h>
+#include <vector>
+
+#if defined(DEAL_II_COMPILER_SUPPORTS_MPI) || defined(DEAL_II_USE_PETSC)
+#  include <mpi.h>
+#else
+                                // without MPI, we would still like to use
+                                // some constructs with MPI data
+                                // types. Therefore, create some dummies
+typedef int MPI_Comm;
+const int MPI_COMM_SELF = 0;
+typedef int MPI_Datatype;
+typedef int MPI_Op;
+namespace MPI
+{
+  static const unsigned int UNSIGNED = 0;
+  static const unsigned int LONG_DOUBLE = 0;
+  static const unsigned int LONG_DOUBLE_COMPLEX = 0;
+  static const unsigned int MAX = 0;
+  static const unsigned int MIN = 0;
+  static const unsigned int SUM = 0;
+}
+#endif
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace Utilities
+{
+                                   /**
+                                    * A namespace for utility functions that
+                                    * abstract certain operations using the
+                                   * Message Passing Interface (MPI) or
+                                   * provide fallback operations in
+                                   * case deal.II is configured not to use
+                                   * MPI at all.
+                                    *
+                                    * @ingroup utilities
+                                    */
+  namespace MPI
+  {
+                                    /**
+                                     * Return the number of MPI processes
+                                     * there exist in the given communicator
+                                     * object. If this is a sequential job,
+                                     * it returns 1.
+                                     */
+    unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator);
+
+                                    /**
+                                     * Return the number of the present MPI
+                                     * process in the space of processes
+                                     * described by the given
+                                     * communicator. This will be a unique
+                                     * value for each process between zero
+                                     * and (less than) the number of all
+                                     * processes (given by
+                                     * get_n_mpi_processes()).
+                                     */
+    unsigned int this_mpi_process (const MPI_Comm &mpi_communicator);
+
+                                    /**
+                                     * Consider an unstructured
+                                     * communication pattern where
+                                     * every process in an MPI
+                                     * universe wants to send some
+                                     * data to a subset of the other
+                                     * processors. To do that, the
+                                     * other processors need to know
+                                     * who to expect messages
+                                     * from. This function computes
+                                     * this information.
+                                     *
+                                     * @param mpi_comm A communicator
+                                     * that describes the processors
+                                     * that are going to communicate
+                                     * with each other.
+                                     *
+                                     * @param destinations The list
+                                     * of processors the current
+                                     * process wants to send
+                                     * information to. This list need
+                                     * not be sorted in any way. If
+                                     * it contains duplicate entries
+                                     * that means that multiple
+                                     * messages are intended for a
+                                     * given destination.
+                                     *
+                                     * @return A list of processors
+                                     * that have indicated that they
+                                     * want to send something to the
+                                     * current processor. The
+                                     * resulting list is not
+                                     * sorted. It may contain
+                                     * duplicate entries if
+                                     * processors enter the same
+                                     * destination more than once in
+                                     * their destinations list.
+                                     */
+    std::vector<unsigned int>
+    compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+                                                 const std::vector<unsigned int> & destinations);
+
+                                    /**
+                                     * Given a communicator, generate a new
+                                     * communicator that contains the same
+                                     * set of processors but that has a
+                                     * different, unique identifier.
+                                     *
+                                     * This functionality can be used to
+                                     * ensure that different objects, such as
+                                     * distributed matrices, each have unique
+                                     * communicators over which they can
+                                     * interact without interfering with each
+                                     * other.
+                                     *
+                                     * When no longer needed, the
+                                     * communicator created here needs to
+                                     * be destroyed using
+                                     * <code>MPI_Comm_free</code>.
+                                     */
+    MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
+
+                                    /**
+                                     * Return the sum over all processors of the value @p t. This function
+                                     * is collective over all processors given in the communicator. If
+                                     * deal.II is not configured for use of MPI, this function simply
+                                     * returns the value of @p t. This function corresponds to the
+                                     * <code>MPI_Allreduce</code> function, i.e. all processors receive
+                                     * the result of this operation.
+                                     *
+                                     * @note This function is only implemented for certain template
+                                     * arguments <code>T</code>, namely <code>float, double, int,
+                                     * unsigned int</code>.
+                                     */
+    template <typename T>
+    T sum (const T &t,
+          const MPI_Comm &mpi_communicator);
+
+                                    /**
+                                     * Like the previous function,
+                                     * but take the sums over the
+                                     * elements of an array
+                                     * of length N. In other words,
+                                     * the i-th element of the
+                                     * results array is the sum over
+                                     * the i-th entries of the input
+                                     * arrays from each processor.
+                                     */
+    template <typename T, unsigned int N>
+    inline
+    void sum (const T (&values)[N],
+             const MPI_Comm &mpi_communicator,
+             T (&sums)[N]);
+
+                                    /**
+                                     * Return the maximum over all processors of the value @p t. This function
+                                     * is collective over all processors given in the communicator. If
+                                     * deal.II is not configured for use of MPI, this function simply
+                                     * returns the value of @p t. This function corresponds to the
+                                     * <code>MPI_Allreduce</code> function, i.e. all processors receive
+                                     * the result of this operation.
+                                     *
+                                     * @note This function is only implemented for certain template
+                                     * arguments <code>T</code>, namely <code>float, double, int,
+                                     * unsigned int</code>.
+                                     */
+    template <typename T>
+    T max (const T &t,
+          const MPI_Comm &mpi_communicator);
+
+                                    /**
+                                     * Like the previous function,
+                                     * but take the maxima over the
+                                     * elements of an array
+                                     * of length N. In other words,
+                                     * the i-th element of the
+                                     * results array is the maximum of
+                                     * the i-th entries of the input
+                                     * arrays from each processor.
+                                     */
+    template <typename T, unsigned int N>
+    inline
+    void max (const T (&values)[N],
+             const MPI_Comm &mpi_communicator,
+             T (&maxima)[N]);
+
+                                    /**
+                                     * Data structure to store the result of
+                                     * min_max_avg().
+                                     */
+    struct MinMaxAvg
+    {
+       double sum;
+       double min;
+       double max;
+       unsigned int min_index;
+       unsigned int max_index;
+       double avg;
+    };
+
+                                    /**
+                                     * Returns sum, average, minimum,
+                                     * maximum, processor id of minimum and
+                                     * maximum as a collective operation of
+                                     * on the given MPI communicator @param
+                                     * mpi_communicator . Each processor's
+                                     * value is given in @param my_value and
+                                     * the result will be returned in @p
+                                     * result . The result is available on all
+                                     * machines.
+                                     */
+    MinMaxAvg
+    min_max_avg (const double my_value,
+                const MPI_Comm &mpi_communicator);
+
+
+
+                                    /**
+                                     * A class that is used to initialize the
+                                     * MPI system at the beginning of a
+                                     * program and to shut it down again at
+                                     * the end.
+                                     *
+                                     * If a program uses MPI one would
+                                     * typically just create an object of
+                                     * this type at the beginning of
+                                     * <code>main()</code>. The constructor
+                                     * of this class then runs
+                                     * <code>MPI_Init()</code> with the given
+                                     * arguments. At the end of the program,
+                                     * the compiler will invoke the
+                                     * destructor of this object which in
+                                     * turns calls <code>MPI_Finalize</code>
+                                     * to shut down the MPI system.
+                                     *
+                                     * This class is used in step-32, for example.
+                                     */
+    class MPI_InitFinalize
+    {
+      public:
+                                        /**
+                                         * Constructor. Takes the arguments
+                                         * from the command line (in case of
+                                         * MPI, the number of processes is
+                                         * specified there), and sets up a
+                                         * respective communicator by calling
+                                         * <tt>MPI_Init()</tt>. This
+                                         * constructor can only be called once
+                                         * in a program, since MPI cannot be
+                                         * initialized twice.
+                                         */
+       MPI_InitFinalize (int    &argc,
+                         char** &argv);
+
+                                        /**
+                                         * Destructor. Calls
+                                         * <tt>MPI_Finalize()</tt> in
+                                         * case this class owns the MPI
+                                         * process.
+                                         */
+       ~MPI_InitFinalize();
+
+      private:
+                                        /**
+                                         * This flag tells the class
+                                         * whether it owns the MPI
+                                         * process (i.e., it has been
+                                         * constructed using the
+                                         * argc/argv input, or it has
+                                         * been copied). In the former
+                                         * case, the command
+                                         * <tt>MPI_Finalize()</tt> will
+                                         * be called at destruction.
+                                         */
+       const bool owns_mpi;
+    };
+
+    namespace internal
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+                                      /**
+                                       * Return the corresponding MPI data
+                                       * type id for the argument given.
+                                       */
+      inline MPI_Datatype mpi_type_id (const int *)
+      {
+       return MPI_INT;
+      }
+
+
+      inline MPI_Datatype mpi_type_id (const long int *)
+      {
+       return MPI_LONG;
+      }
+
+
+      inline MPI_Datatype mpi_type_id (const unsigned int *)
+      {
+       return MPI_UNSIGNED;
+      }
+
+
+      inline MPI_Datatype mpi_type_id (const unsigned long int *)
+      {
+       return MPI_UNSIGNED_LONG;
+      }
+
+
+      inline MPI_Datatype mpi_type_id (const float *)
+      {
+       return MPI_FLOAT;
+      }
+
+
+      inline MPI_Datatype mpi_type_id (const double *)
+      {
+       return MPI_DOUBLE;
+      }
+
+
+      inline MPI_Datatype mpi_type_id (const long double *)
+      {
+       return MPI_LONG_DOUBLE;
+      }
+#endif
+    }
+
+
+    template <typename T>
+    inline
+    T sum (const T &t,
+          const MPI_Comm &mpi_communicator)
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      T sum;
+      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&t)),
+                    &sum, 1, internal::mpi_type_id(&t), MPI_SUM,
+                    mpi_communicator);
+      return sum;
+#else
+      (void)mpi_communicator;
+      return t;
+#endif
+    }
+
+
+    template <typename T, unsigned int N>
+    inline
+    void sum (const T (&values)[N],
+             const MPI_Comm &mpi_communicator,
+             T (&sums)[N])
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&values[0])),
+                    &sums[0], N, internal::mpi_type_id(values), MPI_SUM,
+                    mpi_communicator);
+#else
+      (void)mpi_communicator;
+      for (unsigned int i=0; i<N; ++i)
+       sums[i] = values[i];
+#endif
+    }
+
+
+    template <typename T>
+    inline
+    T max (const T &t,
+          const MPI_Comm &mpi_communicator)
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      T sum;
+      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&t)),
+                    &sum, 1, internal::mpi_type_id(&t), MPI_MAX,
+                    mpi_communicator);
+      return sum;
+#else
+      (void)mpi_communicator;
+      return t;
+#endif
+    }
+
+
+    template <typename T, unsigned int N>
+    inline
+    void max (const T (&values)[N],
+             const MPI_Comm &mpi_communicator,
+             T (&maxima)[N])
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&values[0])),
+                    &maxima[0], N, internal::mpi_type_id(values), MPI_MAX,
+                    mpi_communicator);
+#else
+      (void)mpi_communicator;
+      for (unsigned int i=0; i<N; ++i)
+       maxima[i] = values[i];
+#endif
+    }
+  } // end of namespace MPI
+} // end of namespace Utilities
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/deal.II/include/deal.II/base/partitioner.h b/deal.II/include/deal.II/base/partitioner.h
new file mode 100644 (file)
index 0000000..c07d50e
--- /dev/null
@@ -0,0 +1,549 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011, 2012 by deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__partitioner_h
+#define __deal2__partitioner_h
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/types.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/memory_consumption.h>
+
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace Utilities
+{
+  namespace MPI
+  {
+  /**
+   * This class defines a model for the partitioning of a vector (or,
+   * in fact, any linear data structure) among processors using
+   * MPI.
+   *
+   * The partitioner stores the global vector size and the locally
+   * owned range as a half-open interval [@p lower, @p
+   * upper). Furthermore, it includes a structure for the
+   * point-to-point communication patterns. It allows the inclusion of
+   * ghost indices (i.e. indices that a current processor needs to
+   * have access to, but are owned by another process) through an
+   * IndexSet. In addition, it also stores the other processors' ghost
+   * indices belonging to the current processor, which are the indices
+   * where other processors might require information from. In a
+   * sense, these import indices form the dual of the ghost
+   * indices. This information is gathered once when constructing the
+   * partitioner, which obviates subsequent global communcation steps
+   * when exchanging data.
+   *
+   * The partitioner includes a mechanism for converting global to
+   * local and local to global indices. The storage convention for the
+   * partitioner is as follows. The local range is associated with
+   * local indices [0,@p local_size), and ghost indices are stored
+   * consecutively in [@p local_size, @p local_size + @p
+   * n_ghost_indices). The ghost indices are sorted according to their
+   * global index.
+   *
+   *
+   * @author Katharina Kormann, Martin Kronbichler, 2010, 2011
+   */
+  class Partitioner
+  {
+  public:
+                               /**
+                                * Empty Constructor.
+                                */
+    Partitioner ();
+
+                               /**
+                                * Constructor with size argument. Creates an
+                                * MPI_COMM_SELF structure where there is no
+                                * real parallel layout.
+                                */
+    Partitioner (const unsigned int size);
+
+                               /**
+                                * Constructor with index set arguments. This
+                                *  constructor creates a distributed layout
+                                *  based on a given communicators, an
+                                *  IndexSet describing the locally owned
+                                *  range and another one for describing ghost
+                                *  indices that are owned by other
+                                *  processors, but we need to have read or
+                                *  write access to.
+                                */
+    Partitioner (const IndexSet &locally_owned_indices,
+                const IndexSet &ghost_indices_in,
+                const MPI_Comm  communicator_in);
+
+                               /**
+                                * Constructor with one index set argument. This
+                                * constructor creates a distributed layout
+                                * based on a given communicator, and an IndexSet
+                                * describing the locally owned range. It
+                                * allows to set the ghost indices at a later
+                                * time. Apart from this, it is similar to the
+                                * other constructor with two index sets.
+                                */
+    Partitioner (const IndexSet &locally_owned_indices,
+                const MPI_Comm  communicator_in);
+
+                               /**
+                                * Sets the locally owned indices. Used in the
+                                * constructor.
+                                */
+    void set_owned_indices (const IndexSet &locally_owned_indices);
+
+                               /**
+                                * Allows to set the ghost indices after the
+                                * constructor has been called.
+                                */
+    void set_ghost_indices (const IndexSet &ghost_indices);
+
+                               /**
+                                * Returns the global size.
+                                */
+    types::global_dof_index size() const;
+
+                               /**
+                                * Returns the local size, i.e.
+                                * local_range().second minus
+                                * local_range().first.
+                                */
+    unsigned int local_size() const;
+
+                               /**
+                                * Returns an IndexSet representation of the
+                                * local range. This class only supports
+                                * contiguous local ranges, so the IndexSet
+                                * actually only consists of one single range
+                                * of data, and is equivalent to the result of
+                                * local_range().
+                                */
+    const IndexSet & locally_owned_range() const;
+
+                               /**
+                                * Returns the local range. The returned pair
+                                * consists of the index of the first element
+                                * and the index of the element one past the
+                                * last locally owned one.
+                                */
+    std::pair<types::global_dof_index,types::global_dof_index>
+    local_range() const;
+
+                               /**
+                                * Returns true if the given global index is
+                                * in the local range of this processor.
+                                */
+    bool in_local_range (const types::global_dof_index global_index) const;
+
+                               /**
+                                * Returns the local index corresponding to
+                                * the given global index. If the given global
+                                * index is neither locally owned nor a ghost,
+                                * an exception is thrown.
+                                *
+                                * Note that the local index for locally owned
+                                * indices is between 0 and local_size()-1,
+                                * and the local index for ghosts is between
+                                * local_size() and
+                                * local_size()+n_ghost_indices()-1.
+                                */
+    unsigned int
+    global_to_local (const types::global_dof_index global_index) const;
+
+                               /**
+                                * Returns the global index corresponding to
+                                * the given local index.
+                                *
+                                * Note that the local index for locally owned
+                                * indices is between 0 and local_size()-1,
+                                * and the local index for ghosts is between
+                                * local_size() and
+                                * local_size()+n_ghost_indices()-1.
+                                */
+    types::global_dof_index
+    local_to_global (const unsigned int local_index) const;
+
+                               /**
+                                * Returns whether the given global index is a
+                                * ghost index on the present
+                                * processor. Returns false for indices that
+                                * are owned locally and for indices not
+                                * present at all.
+                                */
+    bool is_ghost_entry (const types::global_dof_index global_index) const;
+
+                               /**
+                                * Returns an IndexSet representation of all
+                                * ghost indices.
+                                */
+    const IndexSet& ghost_indices() const;
+
+                               /**
+                                * Returns the number of ghost indices. Same
+                                * as ghost_indices().n_elements(), but cached
+                                * for simpler access.
+                                */
+    unsigned int n_ghost_indices() const;
+
+                               /**
+                                * Returns a list of processors (first entry)
+                                * and the number of degrees of freedom for
+                                * the individual processor on the ghost
+                                * elements present (second entry).
+                                */
+    const std::vector<std::pair<unsigned int,unsigned int> >&
+    ghost_targets() const;
+
+                               /**
+                                * The set of (local) indices that we are
+                                * importing during compress(), i.e., others'
+                                * ghosts that belong to the local
+                                * range. Similar structure as in an IndexSet,
+                                * but tailored to be iterated over, and some
+                                * indices may be duplicates.
+                                */
+    const std::vector<std::pair<unsigned int, unsigned int> >&
+    import_indices() const;
+
+                               /**
+                                * Number of import indices, i.e., indices
+                                * that are ghosts on other processors and we
+                                * will receive data from.
+                                */
+    unsigned int n_import_indices() const;
+
+                               /**
+                                * Returns a list of processors (first entry)
+                                * and the number of degrees of freedom for
+                                * all the processors that data is obtained
+                                * from (second entry), i.e., locally owned
+                                * indices that are ghosts on other
+                                * processors.
+                                */
+    const std::vector<std::pair<unsigned int,unsigned int> >&
+    import_targets() const;
+
+                               /**
+                                * Returns the MPI ID of the calling
+                                * processor. Cached to have simple access.
+                                */
+    unsigned int this_mpi_process () const;
+
+                               /**
+                                * Returns the total number of MPI processor
+                                * participating in the given
+                                * partitioner. Cached to have simple access.
+                                */
+    unsigned int n_mpi_processes () const;
+
+                               /**
+                                * Returns the MPI communicator underlying the
+                                * partitioner object.
+                                */
+    MPI_Comm get_communicator() const;
+
+                               /**
+                                * Computes the memory consumption of this
+                                * structure.
+                                */
+    std::size_t memory_consumption() const;
+
+                                       /**
+                                       * Exception
+                                       */
+    DeclException2 (ExcIndexNotPresent,
+                   unsigned int, unsigned int,
+                   << "Global index " << arg1
+                   << " neither owned nor ghost on proc " << arg2);
+
+  private:                     /**
+                                * The global size of the vector over all
+                                * processors
+                                */
+    const types::global_dof_index global_size;
+
+                               /**
+                                * The range of the vector that is stored
+                                * locally.
+                                */
+    IndexSet locally_owned_range_data;
+
+                               /**
+                                * The range of the vector that is stored
+                                * locally. Extracted from locally_owned_range
+                                * for performance reasons.
+                                */
+    std::pair<types::global_dof_index,types::global_dof_index> local_range_data;
+
+                               /**
+                                * The set of indices to which we need to have
+                                * read access but that are not locally owned
+                                */
+    IndexSet ghost_indices_data;
+
+                               /**
+                                * Caches the number of ghost indices. It
+                                * would be expensive to use @p
+                                * ghost_indices.n_elements() to compute this.
+                                */
+    unsigned int n_ghost_indices_data;
+
+                               /**
+                                * Contains information which processors my
+                                * ghost indices belong to and how many those
+                                * indices are
+                                */
+    std::vector<std::pair<unsigned int,unsigned int> > ghost_targets_data;
+
+                               /**
+                                * The set of (local) indices that we are
+                                * importing during compress(), i.e., others'
+                                * ghosts that belong to the local
+                                * range. Similar structure as in an IndexSet,
+                                * but tailored to be iterated over, and some
+                                * indices may be duplicates.
+                                */
+    std::vector<std::pair<unsigned int, unsigned int> > import_indices_data;
+
+                               /**
+                                * Caches the number of ghost indices. It
+                                * would be expensive to compute it by
+                                * iterating over the import indices and
+                                * accumulate them.
+                                */
+    unsigned int n_import_indices_data;
+
+                               /**
+                                * The set of processors and length of data
+                                * field which send us their ghost data
+                                */
+    std::vector<std::pair<unsigned int,unsigned int> > import_targets_data;
+
+                               /**
+                                * The ID of the current processor in the MPI
+                                * network
+                                */
+    unsigned int my_pid;
+
+                               /**
+                                * The total number of processors active in
+                                * the problem
+                                */
+    unsigned int n_procs;
+
+                               /**
+                                * The MPI communicator involved in the
+                                * problem
+                                */
+    const MPI_Comm communicator;
+  };
+
+
+
+/*----------------------- Inline functions ----------------------------------*/
+
+#ifndef DOXYGEN
+
+    inline
+    types::global_dof_index Partitioner::size() const
+    {
+      return global_size;
+    }
+
+
+
+    inline
+    const IndexSet& Partitioner::locally_owned_range() const
+    {
+      return locally_owned_range_data;
+    }
+
+
+
+    inline
+    std::pair<types::global_dof_index,types::global_dof_index>
+    Partitioner::local_range() const
+    {
+      return local_range_data;
+    }
+
+
+
+    inline
+    unsigned int
+    Partitioner::local_size () const
+    {
+      return local_range_data.second - local_range_data.first;
+    }
+
+
+
+    inline
+    bool
+    Partitioner::in_local_range (const types::global_dof_index global_index) const
+    {
+      return (local_range_data.first <= global_index &&
+             global_index < local_range_data.second);
+    }
+
+
+
+    inline
+    bool
+    Partitioner::is_ghost_entry (const types::global_dof_index global_index) const
+    {
+                               // if the index is in the global range, it is
+                               // trivially not a ghost
+      if (in_local_range(global_index) == true)
+       return false;
+      else
+       return ghost_indices().is_element(global_index);
+    }
+
+
+
+    inline
+    unsigned int
+    Partitioner::global_to_local (const types::global_dof_index global_index) const
+    {
+      Assert(in_local_range(global_index) || is_ghost_entry (global_index),
+            ExcIndexNotPresent(global_index, my_pid));
+      if (in_local_range(global_index))
+       return global_index - local_range_data.first;
+      else if (is_ghost_entry (global_index))
+       return (local_size() +
+               ghost_indices_data.index_within_set (global_index));
+      else
+                               // should only end up here in
+                               // optimized mode, when we use this
+                               // large number to trigger a segfault
+                               // when using this method for array
+                               // access
+       return numbers::invalid_unsigned_int;
+    }
+
+
+
+    inline
+    types::global_dof_index
+    Partitioner::local_to_global (const unsigned int local_index) const
+    {
+      AssertIndexRange (local_index, local_size() + n_ghost_indices_data);
+      if (local_index < local_size())
+       return local_range_data.first + local_index;
+      else
+       return ghost_indices_data.nth_index_in_set (local_index-local_size());
+    }
+
+
+
+    inline
+    const IndexSet&  Partitioner::ghost_indices() const
+    {
+      return ghost_indices_data;
+    }
+
+
+
+    inline
+    unsigned int
+    Partitioner::n_ghost_indices() const
+    {
+      return n_ghost_indices_data;
+    }
+
+
+
+    inline
+    const std::vector<std::pair<unsigned int,unsigned int> >&
+    Partitioner::ghost_targets() const
+    {
+      return ghost_targets_data;
+    }
+
+
+    inline
+    const std::vector<std::pair<unsigned int, unsigned int> >&
+    Partitioner::import_indices() const
+    {
+      return import_indices_data;
+    }
+
+
+
+    inline
+    unsigned int
+    Partitioner::n_import_indices() const
+    {
+      return n_import_indices_data;
+    }
+
+
+
+    inline
+    const std::vector<std::pair<unsigned int,unsigned int> >&
+    Partitioner::import_targets() const
+    {
+      return import_targets_data;
+    }
+
+
+
+    inline
+    unsigned int
+    Partitioner::this_mpi_process() const
+    {
+                               // return the id from the variable stored in
+                               // this class instead of
+                               // Utilities::MPI::this_mpi_process() in order
+                               // to make this query also work when MPI is
+                               // not initialized.
+      return my_pid;
+    }
+
+
+
+    inline
+    unsigned int
+    Partitioner::n_mpi_processes() const
+    {
+                               // return the number of MPI processes from the
+                               // variable stored in this class instead of
+                               // Utilities::MPI::n_mpi_processes() in order
+                               // to make this query also work when MPI is
+                               // not initialized.
+      return n_procs;
+    }
+
+
+
+    inline
+    MPI_Comm
+    Partitioner::get_communicator() const
+    {
+      return communicator;
+    }
+
+#endif  // ifndef DOXYGEN
+
+  } // end of namespace MPI
+
+} // end of namespace Utilities
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index ee1a176bc3d40dfa82a1db2c5d12a15f895754df..a0280769c1c1b29ec38657ebef59f1b4ca562145 100644 (file)
@@ -113,6 +113,18 @@ class Tensor
                                      */
     Tensor (const array_type &initializer);
 
+                                    /**
+                                     * Conversion operator from tensor of
+                                     * tensors.
+                                     */
+    Tensor (const Tensor<1,dim,Tensor<rank_-1,dim,Number> > &tensor_in);
+
+                                    /**
+                                     * Conversion operator to tensor of
+                                     * tensors.
+                                     */
+    operator Tensor<1,dim,Tensor<rank_-1,dim,Number> > () const;
+
                                     /**
                                      * Read-Write access operator.
                                      */
@@ -325,13 +337,33 @@ Tensor<rank_,dim,Number>::Tensor (const array_type &initializer)
 
 
 
+template <int rank_, int dim, typename Number>
+inline
+Tensor<rank_,dim,Number>::Tensor
+(const Tensor<1,dim,Tensor<rank_-1,dim,Number> > &tensor_in)
+{
+  for (unsigned int i=0; i<dim; ++i)
+    subtensor[i] = tensor_in[i];
+}
+
+
+
+template <int rank_, int dim, typename Number>
+inline
+Tensor<rank_,dim,Number>::operator
+  Tensor<1,dim,Tensor<rank_-1,dim,Number> > () const
+{
+  return Tensor<1,dim,Tensor<rank_-1,dim,Number> > (subtensor);
+}
+
+
+
 template <int rank_, int dim, typename Number>
 inline
 typename Tensor<rank_,dim,Number>::value_type&
 Tensor<rank_,dim,Number>::operator[] (const unsigned int i)
 {
   Assert (i<dim, ExcIndexRange(i, 0, dim));
-
   return subtensor[i];
 }
 
@@ -671,10 +703,12 @@ void contract (Tensor<1,dim,Number>       &dest,
               const Tensor<2,dim,Number> &src1,
               const Tensor<1,dim,Number> &src2)
 {
-  dest.clear ();
   for (unsigned int i=0; i<dim; ++i)
-    for (unsigned int j=0; j<dim; ++j)
-      dest[i] += src1[i][j] * src2[j];
+    {
+      dest[i] = src1[i][0] * src2[0];
+      for (unsigned int j=1; j<dim; ++j)
+       dest[i] += src1[i][j] * src2[j];
+    }
 }
 
 
@@ -699,10 +733,13 @@ Tensor<1,dim,Number>
 operator * (const Tensor<2,dim,Number> &src1,
             const Tensor<1,dim,Number> &src2)
 {
-  Tensor<1,dim,Number> dest;
+  Tensor<1,dim,Number> dest (false);
   for (unsigned int i=0; i<dim; ++i)
-    for (unsigned int j=0; j<dim; ++j)
-      dest[i] += src1[i][j] * src2[j];
+    {
+      dest[i] = src1[i][0] * src2[0];
+      for (unsigned int j=1; j<dim; ++j)
+       dest[i] += src1[i][j] * src2[j];
+    }
   return dest;
 }
 
@@ -720,10 +757,12 @@ void contract (Tensor<1,dim,Number>       &dest,
               const Tensor<1,dim,Number> &src1,
               const Tensor<2,dim,Number> &src2)
 {
-  dest.clear ();
   for (unsigned int i=0; i<dim; ++i)
-    for (unsigned int j=0; j<dim; ++j)
-      dest[i] += src1[j] * src2[j][i];
+    {
+      dest[i] = src1[0] * src2[0][i];
+      for (unsigned int j=1; j<dim; ++j)
+       dest[i] += src1[j] * src2[j][i];
+    }
 }
 
 
@@ -749,10 +788,13 @@ Tensor<1,dim,Number>
 operator * (const Tensor<1,dim,Number> &src1,
             const Tensor<2,dim,Number> &src2)
 {
-  Tensor<1,dim,Number> dest;
+  Tensor<1,dim,Number> dest (false);
   for (unsigned int i=0; i<dim; ++i)
-    for (unsigned int j=0; j<dim; ++j)
-      dest[i] += src1[j] * src2[j][i];
+    {
+      dest[i] = src1[0] * src2[0][i];
+      for (unsigned int j=1; j<dim; ++j)
+       dest[i] += src1[j] * src2[j][i];
+    }
   return dest;
 }
 
@@ -770,14 +812,17 @@ void contract (Tensor<2,dim,Number>       &dest,
               const Tensor<2,dim,Number> &src1,
               const Tensor<2,dim,Number> &src2)
 {
-  dest.clear ();
   for (unsigned int i=0; i<dim; ++i)
     for (unsigned int j=0; j<dim; ++j)
-      for (unsigned int k=0; k<dim; ++k)
-       dest[i][j] += src1[i][k] * src2[k][j];
+      {
+       dest[i][j] = src1[i][0] * src2[0][j];
+       for (unsigned int k=1; k<dim; ++k)
+         dest[i][j] += src1[i][k] * src2[k][j];
+      }
 }
 
 
+
 /**
  * Multiplication operator performing a contraction of the last index
  * of the first argument and the first index of the second
@@ -1611,7 +1656,7 @@ inline
 Tensor<2,dim,Number>
 invert (const Tensor<2,dim,Number> &t)
 {
-  Tensor<2,dim,Number> return_tensor;
+  Number return_tensor [dim][dim];
   switch (dim)
     {
       case 1:
@@ -1641,13 +1686,13 @@ invert (const Tensor<2,dim,Number> &t)
                    t07 = 1.0/(t4*t[2][2]-t6*t[2][1]-t8*t[2][2]+
                               t00*t[2][1]+t01*t[1][2]-t04*t[1][1]);
        return_tensor[0][0] = (t[1][1]*t[2][2]-t[1][2]*t[2][1])*t07;
-       return_tensor[0][1] = -(t[0][1]*t[2][2]-t[0][2]*t[2][1])*t07;
-       return_tensor[0][2] = -(-t[0][1]*t[1][2]+t[0][2]*t[1][1])*t07;
-       return_tensor[1][0] = -(t[1][0]*t[2][2]-t[1][2]*t[2][0])*t07;
+       return_tensor[0][1] = (t[0][2]*t[2][1]-t[0][1]*t[2][2])*t07;
+       return_tensor[0][2] = (t[0][1]*t[1][2]-t[0][2]*t[1][1])*t07;
+       return_tensor[1][0] = (t[1][2]*t[2][0]-t[1][0]*t[2][2])*t07;
        return_tensor[1][1] = (t[0][0]*t[2][2]-t04)*t07;
-       return_tensor[1][2] = -(t6-t00)*t07;
-       return_tensor[2][0] = -(-t[1][0]*t[2][1]+t[1][1]*t[2][0])*t07;
-       return_tensor[2][1] = -(t[0][0]*t[2][1]-t01)*t07;
+       return_tensor[1][2] = (t00-t6)*t07;
+       return_tensor[2][0] = (t[1][0]*t[2][1]-t[1][1]*t[2][0])*t07;
+       return_tensor[2][1] = (t01-t[0][0]*t[2][1])*t07;
        return_tensor[2][2] = (t4-t8)*t07;
 
        break;
@@ -1660,7 +1705,7 @@ invert (const Tensor<2,dim,Number> &t)
       default:
            AssertThrow (false, ExcNotImplemented());
     }
-  return return_tensor;
+  return Tensor<2,dim,Number>(return_tensor);
 }
 
 
@@ -1680,15 +1725,17 @@ inline
 Tensor<2,dim,Number>
 transpose (const Tensor<2,dim,Number> &t)
 {
-  Tensor<2,dim,Number> tt = t;
+  Number tt[dim][dim];
   for (unsigned int i=0; i<dim; ++i)
-    for (unsigned int j=i+1; j<dim; ++j)
-      {
-        const Number x = tt[i][j];
-        tt[i][j] = tt[j][i];
-        tt[j][i] = x;
-      };
-  return tt;
+    {
+      tt[i][i] = t[i][i];
+      for (unsigned int j=i+1; j<dim; ++j)
+       {
+         tt[i][j] = t[j][i];
+         tt[j][i] = t[i][j];
+       };
+    }
+  return Tensor<2,dim,Number>(tt);
 }
 
 #ifndef DOXYGEN
index c91cf650f7cc4851418d9818176171f51baf38cc..23346316d428598c16524076c2d1959e362653f3 100644 (file)
@@ -1125,8 +1125,8 @@ Tensor<1,dim,Number>::operator * (const Tensor<1,dim,Number> &p) const
              values[2] * p.values[2]);
       break;
     default:
-      Number q=0;
-      for (unsigned int i=0; i<dim; ++i)
+      Number q = values[0] * values[0];
+      for (unsigned int i=1; i<dim; ++i)
        q += values[i] * p.values[i];
       return q;
     }
@@ -1156,7 +1156,7 @@ template <int dim, typename Number>
 inline
 Tensor<1,dim,Number> Tensor<1,dim,Number>::operator - () const
 {
-  Tensor<1,dim,Number> result;
+  Tensor<1,dim,Number> result (false);
   for (unsigned int i=0; i<dim; ++i)
     result.values[i] = -values[i];
   return result;
@@ -1179,8 +1179,8 @@ inline
 typename Tensor<1,dim,Number>::real_type
 Tensor<1,dim,Number>::norm_square () const
 {
-  real_type s = 0;
-  for (unsigned int i=0; i<dim; ++i)
+  real_type s = numbers::NumberTraits<Number>::abs_square(values[0]);
+  for (unsigned int i=1; i<dim; ++i)
     s += numbers::NumberTraits<Number>::abs_square(values[i]);
 
   return s;
@@ -1308,7 +1308,7 @@ Tensor<1,dim,Number>
 operator * (const Tensor<1,dim,Number> &t,
            const Number                factor)
 {
-  Tensor<1,dim,Number> tt;
+  Tensor<1,dim,Number> tt (false);
   for (unsigned int d=0; d<dim; ++d)
     tt[d] = t[d] * factor;
   return tt;
@@ -1327,7 +1327,7 @@ Tensor<1,dim,Number>
 operator * (const Number                factor,
            const Tensor<1,dim,Number> &t)
 {
-  Tensor<1,dim,Number> tt;
+  Tensor<1,dim,Number> tt (false);
   for (unsigned int d=0; d<dim; ++d)
     tt[d] = t[d] * factor;
   return tt;
@@ -1346,7 +1346,7 @@ Tensor<1,dim,Number>
 operator / (const Tensor<1,dim,Number> &t,
            const Number                factor)
 {
-  Tensor<1,dim,Number> tt;
+  Tensor<1,dim,Number> tt (false);
   for (unsigned int d=0; d<dim; ++d)
     tt[d] = t[d] / factor;
   return tt;
@@ -1365,7 +1365,7 @@ Tensor<1,dim>
 operator * (const Tensor<1,dim> &t,
            const double         factor)
 {
-  Tensor<1,dim> tt;
+  Tensor<1,dim> tt (false);
   for (unsigned int d=0; d<dim; ++d)
     tt[d] = t[d] * factor;
   return tt;
@@ -1384,7 +1384,7 @@ Tensor<1,dim>
 operator * (const double         factor,
            const Tensor<1,dim> &t)
 {
-  Tensor<1,dim> tt;
+  Tensor<1,dim> tt (false);
   for (unsigned int d=0; d<dim; ++d)
     tt[d] = t[d] * factor;
   return tt;
@@ -1403,7 +1403,7 @@ Tensor<1,dim>
 operator / (const Tensor<1,dim> &t,
            const double         factor)
 {
-  Tensor<1,dim> tt;
+  Tensor<1,dim> tt (false);
   for (unsigned int d=0; d<dim; ++d)
     tt[d] = t[d] / factor;
   return tt;
index 0894d520c8638375260676f6a3544a60e4bfc7f3..ad51855a31e1174d9ce6f3716e12d5f279081ec2 100644 (file)
@@ -1,7 +1,7 @@
 //---------------------------------------------------------------------------
 //    $Id$
 //
-//    Copyright (C) 2009 by the deal.II authors
+//    Copyright (C) 2009, 2012 by the deal.II authors
 //
 //    This file is subject to QPL and may not be  distributed
 //    without copyright and license information. Please refer
@@ -65,6 +65,18 @@ namespace types
                                    * more information.
                                    */
   const unsigned int artificial_subdomain_id = static_cast<subdomain_id_t>(-2);
+
+                                  /**
+                                   * The type used to denote global dof
+                                   * indices.
+                                   */
+  typedef unsigned int global_dof_index;
+
+                                  /**
+                                   * An invalid value for indices of degrees
+                                   * of freedom.
+                                   */
+  const global_dof_index invalid_dof_index = static_cast<global_dof_index>(-1);
 }
 
 
index dd35fde65af7027d8c485acb4657a7dfe2fc28ec..30cb6728bd9262493f468531ba0d184ec33af692 100644 (file)
 
 #include <deal.II/base/config.h>
 #include <deal.II/base/exceptions.h>
+#include <deal.II/base/mpi.h>
 
 #include <vector>
 #include <utility>
 #include <functional>
 #include <string>
 
-#if defined(DEAL_II_COMPILER_SUPPORTS_MPI) || defined(DEAL_II_USE_PETSC)
-#include <mpi.h>
-#else
-  typedef int MPI_Comm;
-#endif
-
 #ifdef DEAL_II_USE_TRILINOS
 #  include <Epetra_Comm.h>
 #  include <Epetra_Map.h>
@@ -276,270 +271,6 @@ namespace Utilities
   std::vector<unsigned int>
   invert_permutation (const std::vector<unsigned int> &permutation);
 
-                                   /**
-                                    * A namespace for utility functions that
-                                    * abstract certain operations using the
-                                   * Message Passing Interface (MPI) or
-                                   * provide fallback operations in
-                                   * case deal.II is configured not to use
-                                   * MPI at all.
-                                    *
-                                    * @ingroup utilities
-                                    */
-  namespace MPI
-  {
-                                    /**
-                                     * Return the number of MPI processes
-                                     * there exist in the given communicator
-                                     * object. If this is a sequential job,
-                                     * it returns 1.
-                                     */
-    unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator);
-
-                                    /**
-                                     * Return the number of the present MPI
-                                     * process in the space of processes
-                                     * described by the given
-                                     * communicator. This will be a unique
-                                     * value for each process between zero
-                                     * and (less than) the number of all
-                                     * processes (given by
-                                     * get_n_mpi_processes()).
-                                     */
-    unsigned int this_mpi_process (const MPI_Comm &mpi_communicator);
-
-                                    /**
-                                     * Consider an unstructured
-                                     * communication pattern where
-                                     * every process in an MPI
-                                     * universe wants to send some
-                                     * data to a subset of the other
-                                     * processors. To do that, the
-                                     * other processors need to know
-                                     * who to expect messages
-                                     * from. This function computes
-                                     * this information.
-                                     *
-                                     * @param mpi_comm A communicator
-                                     * that describes the processors
-                                     * that are going to communicate
-                                     * with each other.
-                                     *
-                                     * @param destinations The list
-                                     * of processors the current
-                                     * process wants to send
-                                     * information to. This list need
-                                     * not be sorted in any way. If
-                                     * it contains duplicate entries
-                                     * that means that multiple
-                                     * messages are intended for a
-                                     * given destination.
-                                     *
-                                     * @return A list of processors
-                                     * that have indicated that they
-                                     * want to send something to the
-                                     * current processor. The
-                                     * resulting list is not
-                                     * sorted. It may contain
-                                     * duplicate entries if
-                                     * processors enter the same
-                                     * destination more than once in
-                                     * their destinations list.
-                                     */
-    std::vector<unsigned int>
-    compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
-                                                 const std::vector<unsigned int> & destinations);
-
-                                    /**
-                                     * Given a communicator, generate a new
-                                     * communicator that contains the same
-                                     * set of processors but that has a
-                                     * different, unique identifier.
-                                     *
-                                     * This functionality can be used to
-                                     * ensure that different objects, such as
-                                     * distributed matrices, each have unique
-                                     * communicators over which they can
-                                     * interact without interfering with each
-                                     * other.
-                                     *
-                                     * When no longer needed, the
-                                     * communicator created here needs to
-                                     * be destroyed using
-                                     * <code>MPI_Comm_free</code>.
-                                     */
-    MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
-
-    /**
-     * Return the sum over all processors of the value @p t. This function
-     * is collective over all processors given in the communicator. If
-     * deal.II is not configured for use of MPI, this function simply
-     * returns the value of @p t. This function corresponds to the
-     * <code>MPI_Allreduce</code> function, i.e. all processors receive
-     * the result of this operation.
-     *
-     * @note This function is only implemented for certain template
-     * arguments <code>T</code>, namely <code>float, double, int,
-     * unsigned int</code>.
-     */
-    template <typename T>
-    T sum (const T &t,
-          const MPI_Comm &mpi_communicator);
-
-                                    /**
-                                     * Like the previous function,
-                                     * but take the sums over the
-                                     * elements of an array
-                                     * of length N. In other words,
-                                     * the i-th element of the
-                                     * results array is the sum over
-                                     * the i-th entries of the input
-                                     * arrays from each processor.
-                                     */
-    template <typename T, unsigned int N>
-    inline
-    void sum (const T (&values)[N],
-             const MPI_Comm &mpi_communicator,
-             T (&sums)[N]);
-
-                                     /**
-                                      * Like the previous function,
-                                      * but take the sums over the
-                                      * elements of a std::vector. In other words,
-                                      * the i-th element of the
-                                      * results array is the sum over
-                                      * the i-th entries of the input
-                                      * arrays from each processor.
-                                      */
-    template <typename T>
-    inline
-    void sum (const std::vector<T> &values,
-              const MPI_Comm &mpi_communicator,
-              std::vector<T> &sums);
-    
-    /**
-     * Return the maximum over all processors of the value @p t. This function
-     * is collective over all processors given in the communicator. If
-     * deal.II is not configured for use of MPI, this function simply
-     * returns the value of @p t. This function corresponds to the
-     * <code>MPI_Allreduce</code> function, i.e. all processors receive
-     * the result of this operation.
-     *
-     * @note This function is only implemented for certain template
-     * arguments <code>T</code>, namely <code>float, double, int,
-     * unsigned int</code>.
-     */
-    template <typename T>
-    T max (const T &t,
-          const MPI_Comm &mpi_communicator);
-
-                                    /**
-                                     * Like the previous function,
-                                     * but take the maxima over the
-                                     * elements of an array
-                                     * of length N. In other words,
-                                     * the i-th element of the
-                                     * results array is the maximum of
-                                     * the i-th entries of the input
-                                     * arrays from each processor.
-                                     */
-    template <typename T, unsigned int N>
-    inline
-    void max (const T (&values)[N],
-             const MPI_Comm &mpi_communicator,
-             T (&maxima)[N]);
-
-                                    /**
-                                     * Data structure to store the result of
-                                     * min_max_avg().
-                                     */
-    struct MinMaxAvg
-    {
-       double sum;
-       double min;
-       double max;
-       unsigned int min_index;
-       unsigned int max_index;
-       double avg;
-    };
-
-                                    /**
-                                     * Returns sum, average, minimum,
-                                     * maximum, processor id of minimum and
-                                     * maximum as a collective operation of
-                                     * on the given MPI communicator @param
-                                     * mpi_communicator . Each processor's
-                                     * value is given in @param my_value and
-                                     * the result will be returned in @p
-                                     * result . The result is available on all
-                                     * machines.
-                                     */
-    MinMaxAvg
-    min_max_avg (const double my_value,
-                const MPI_Comm &mpi_communicator);
-
-
-
-                                    /**
-                                     * A class that is used to initialize the
-                                     * MPI system at the beginning of a
-                                     * program and to shut it down again at
-                                     * the end.
-                                     *
-                                     * If a program uses MPI one would
-                                     * typically just create an object of
-                                     * this type at the beginning of
-                                     * <code>main()</code>. The constructor
-                                     * of this class then runs
-                                     * <code>MPI_Init()</code> with the given
-                                     * arguments. At the end of the program,
-                                     * the compiler will invoke the
-                                     * destructor of this object which in
-                                     * turns calls <code>MPI_Finalize</code>
-                                     * to shut down the MPI system.
-                                     *
-                                     * This class is used in step-32, for example.
-                                     */
-    class MPI_InitFinalize
-    {
-      public:
-                                        /**
-                                         * Constructor. Takes the arguments
-                                         * from the command line (in case of
-                                         * MPI, the number of processes is
-                                         * specified there), and sets up a
-                                         * respective communicator by calling
-                                         * <tt>MPI_Init()</tt>. This
-                                         * constructor can only be called once
-                                         * in a program, since MPI cannot be
-                                         * initialized twice.
-                                         */
-       MPI_InitFinalize (int    &argc,
-                         char** &argv);
-
-                                        /**
-                                         * Destructor. Calls
-                                         * <tt>MPI_Finalize()</tt> in
-                                         * case this class owns the MPI
-                                         * process.
-                                         */
-       ~MPI_InitFinalize();
-
-      private:
-                                        /**
-                                         * This flag tells the class
-                                         * whether it owns the MPI
-                                         * process (i.e., it has been
-                                         * constructed using the
-                                         * argc/argv input, or it has
-                                         * been copied). In the former
-                                         * case, the command
-                                         * <tt>MPI_Finalize()</tt> will
-                                         * be called at destruction.
-                                         */
-       const bool owns_mpi;
-    };
-  }
                                    /**
                                     * A namespace for utility functions that
                                     * probe system properties.
@@ -1014,143 +745,6 @@ namespace Utilities
          len = half;
       }
   }
-
-
-  namespace MPI
-  {
-    namespace internal
-    {
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-      /**
-       * Return the corresponding MPI data type id for the argument given.
-       */
-      inline MPI_Datatype mpi_type_id (const int *)
-      {
-       return MPI_INT;
-      }
-
-
-      inline MPI_Datatype mpi_type_id (const long int *)
-      {
-       return MPI_LONG;
-      }
-
-
-      inline MPI_Datatype mpi_type_id (const unsigned int *)
-      {
-       return MPI_UNSIGNED;
-      }
-
-
-      inline MPI_Datatype mpi_type_id (const unsigned long int *)
-      {
-       return MPI_UNSIGNED_LONG;
-      }
-
-
-      inline MPI_Datatype mpi_type_id (const float *)
-      {
-       return MPI_FLOAT;
-      }
-
-
-      inline MPI_Datatype mpi_type_id (const double *)
-      {
-       return MPI_DOUBLE;
-      }
-#endif
-    }
-
-
-    template <typename T>
-    inline
-    T sum (const T &t,
-          const MPI_Comm &mpi_communicator)
-    {
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-      T sum;
-      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&t)),
-                    &sum, 1, internal::mpi_type_id(&t), MPI_SUM,
-                    mpi_communicator);
-      return sum;
-#else
-      (void)mpi_communicator;
-      return t;
-#endif
-    }
-
-
-    template <typename T, unsigned int N>
-    inline
-    void sum (const T (&values)[N],
-             const MPI_Comm &mpi_communicator,
-             T (&sums)[N])
-    {
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&values[0])),
-                    &sums[0], N, internal::mpi_type_id(values), MPI_SUM,
-                    mpi_communicator);
-#else
-      (void)mpi_communicator;
-      for (unsigned int i=0; i<N; ++i)
-       sums[i] = values[i];
-#endif
-    }
-
-
-    template <typename T>
-    inline
-    void sum (const std::vector<T> &values,
-              const MPI_Comm       &mpi_communicator,
-              std::vector<T>       &sums)
-    {
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-      sums.resize (values.size());
-      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&values[0])),
-                     &sums[0], values.size(), internal::mpi_type_id((T*)0), MPI_SUM,
-                     mpi_communicator);
-#else
-      (void)mpi_communicator;
-      sums = values;
-#endif
-    }
-
-
-    template <typename T>
-    inline
-    T max (const T &t,
-          const MPI_Comm &mpi_communicator)
-    {
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-      T sum;
-      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&t)),
-                    &sum, 1, internal::mpi_type_id(&t), MPI_MAX,
-                    mpi_communicator);
-      return sum;
-#else
-      (void)mpi_communicator;
-      return t;
-#endif
-    }
-
-
-    template <typename T, unsigned int N>
-    inline
-    void max (const T (&values)[N],
-             const MPI_Comm &mpi_communicator,
-             T (&maxima)[N])
-    {
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-      MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&values[0])),
-                    &maxima[0], N, internal::mpi_type_id(values), MPI_MAX,
-                    mpi_communicator);
-#else
-      (void)mpi_communicator;
-      for (unsigned int i=0; i<N; ++i)
-       maxima[i] = values[i];
-#endif
-    }
-  }
 }
 
 
index a73384f29fbff6ad83146ae835a85b5689b79214..a7cac2ec3fdcba2b4fe633d5ec64b13593da3917 100644 (file)
@@ -1643,7 +1643,7 @@ class FEValuesData
  * <h3>UpdateFlags</h3>
  *
  * The UpdateFlags object handed to the constructor is used to
- * determine, which of the data fields to compute. This way, it is
+ * determine which of the data fields to compute. This way, it is
  * possible to avoid expensive computations of useless derivatives.
  * In the beginning, these flags are processed through the functions
  * Mapping::update_once(), Mapping::update_each(),
diff --git a/deal.II/include/deal.II/lac/parallel_block_vector.h b/deal.II/include/deal.II/lac/parallel_block_vector.h
new file mode 100644 (file)
index 0000000..d0a144b
--- /dev/null
@@ -0,0 +1,549 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//
+//    Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__parallel_block_vector_h
+#define __deal2__parallel_block_vector_h
+
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/exceptions.h>
+#include <deal.II/lac/block_indices.h>
+#include <deal.II/lac/block_vector_base.h>
+
+#include <cstdio>
+#include <vector>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+// TODO: global reduction operations (operator *, {l1,l2,lp,linfty}norm, mean
+// value) should use one MPI communication with several Number values, not use
+// the parallel::distributed::Vector operation directly.
+
+namespace parallel
+{
+  namespace distributed
+  {
+
+/*! @addtogroup Vectors
+ *@{
+ */
+
+
+/**
+ * An implementation of block vectors based on distribued deal.II
+ * vectors. While the base class provides for most of the interface, this
+ * class handles the actual allocation of vectors and provides functions that
+ * are specific to the underlying vector type.
+ *
+ * @note Instantiations for this template are provided for <tt>@<float@> and
+ * @<double@></tt>; others can be generated in application programs (see the
+ * section on @ref Instantiations in the manual).
+ *
+ * @see @ref GlossBlockLA "Block (linear algebra)"
+ * @author Katharina Kormann, Martin Kronbichler, 2011
+ */
+    template <typename Number>
+    class BlockVector : public BlockVectorBase<Vector<Number> >
+    {
+    public:
+                                     /**
+                                      * Typedef the base class for simpler
+                                      * access to its own typedefs.
+                                      */
+      typedef BlockVectorBase<Vector<Number> > BaseClass;
+
+                                     /**
+                                      * Typedef the type of the underlying
+                                      * vector.
+                                      */
+      typedef typename BaseClass::BlockType  BlockType;
+
+                                    /**
+                                     * Import the typedefs from the base
+                                     * class.
+                                     */
+      typedef typename BaseClass::value_type      value_type;
+      typedef typename BaseClass::real_type       real_type;
+      typedef typename BaseClass::pointer         pointer;
+      typedef typename BaseClass::const_pointer   const_pointer;
+      typedef typename BaseClass::reference       reference;
+      typedef typename BaseClass::const_reference const_reference;
+      typedef typename BaseClass::size_type       size_type;
+      typedef typename BaseClass::iterator        iterator;
+      typedef typename BaseClass::const_iterator  const_iterator;
+
+                                    /**
+                                     *  Constructor. There are three
+                                     *  ways to use this
+                                     *  constructor. First, without
+                                     *  any arguments, it generates
+                                     *  an object with no
+                                     *  blocks. Given one argument,
+                                     *  it initializes <tt>num_blocks</tt>
+                                     *  blocks, but these blocks have
+                                     *  size zero. The third variant
+                                     *  finally initializes all
+                                     *  blocks to the same size
+                                     *  <tt>block_size</tt>.
+                                     *
+                                     *  Confer the other constructor
+                                     *  further down if you intend to
+                                     *  use blocks of different
+                                     *  sizes.
+                                     */
+      explicit BlockVector (const unsigned int num_blocks = 0,
+                           const unsigned int block_size = 0);
+
+                                    /**
+                                     * Copy-Constructor. Dimension set to
+                                     * that of V, all components are copied
+                                     * from V
+                                     */
+      BlockVector (const BlockVector<Number>& V);
+
+
+#ifndef DEAL_II_EXPLICIT_CONSTRUCTOR_BUG
+                                    /**
+                                     * Copy constructor taking a BlockVector of
+                                     * another data type. This will fail if
+                                     * there is no conversion path from
+                                     * <tt>OtherNumber</tt> to <tt>Number</tt>. Note that
+                                     * you may lose accuracy when copying
+                                     * to a BlockVector with data elements with
+                                     * less accuracy.
+                                     *
+                                     * Older versions of gcc did not honor
+                                     * the @p explicit keyword on template
+                                     * constructors. In such cases, it is
+                                     * easy to accidentally write code that
+                                     * can be very inefficient, since the
+                                     * compiler starts performing hidden
+                                     * conversions. To avoid this, this
+                                     * function is disabled if we have
+                                     * detected a broken compiler during
+                                     * configuration.
+                                     */
+      template <typename OtherNumber>
+      explicit
+      BlockVector (const BlockVector<OtherNumber> &v);
+#endif
+
+                                     /**
+                                      * Constructor. Set the number of
+                                      * blocks to
+                                      * <tt>block_sizes.size()</tt> and
+                                      * initialize each block with
+                                      * <tt>block_sizes[i]</tt> zero
+                                      * elements.
+                                      */
+      BlockVector (const std::vector<unsigned int> &block_sizes);
+
+                                     /**
+                                     * Destructor. Clears memory
+                                     */
+      ~BlockVector ();
+
+                                    /**
+                                     * Copy operator: fill all components of
+                                     * the vector with the given scalar
+                                     * value.
+                                     */
+      BlockVector & operator = (const value_type s);
+
+                                    /**
+                                     * Copy operator for arguments of the
+                                     * same type. Resize the
+                                     * present vector if necessary.
+                                     */
+      BlockVector &
+      operator= (const BlockVector &V);
+
+                                    /**
+                                     * Copy operator for template arguments
+                                     * of different types. Resize the
+                                     * present vector if necessary.
+                                     */
+      template <class Number2>
+      BlockVector &
+      operator= (const BlockVector<Number2> &V);
+
+                                    /**
+                                     * Copy a regular vector into a
+                                     * block vector.
+                                     */
+      BlockVector &
+      operator= (const Vector<Number> &V);
+
+                                     /**
+                                     * Reinitialize the BlockVector to
+                                     * contain <tt>num_blocks</tt> blocks of
+                                     * size <tt>block_size</tt> each.
+                                     *
+                                     * If the second argument is left
+                                     * at its default value, then the
+                                     * block vector allocates the
+                                     * specified number of blocks but
+                                     * leaves them at zero size. You
+                                     * then need to later
+                                     * reinitialize the individual
+                                     * blocks, and call
+                                     * collect_sizes() to update the
+                                     * block system's knowledge of
+                                     * its individual block's sizes.
+                                     *
+                                     * If <tt>fast==false</tt>, the vector
+                                     * is filled with zeros.
+                                     */
+      void reinit (const unsigned int num_blocks,
+                  const unsigned int block_size = 0,
+                  const bool fast = false);
+
+                                    /**
+                                     * Reinitialize the BlockVector such that
+                                     * it contains
+                                     * <tt>block_sizes.size()</tt>
+                                     * blocks. Each block is reinitialized to
+                                     * dimension <tt>block_sizes[i]</tt>.
+                                     *
+                                     * If the number of blocks is the
+                                     * same as before this function
+                                     * was called, all vectors remain
+                                     * the same and reinit() is
+                                     * called for each vector.
+                                     *
+                                     * If <tt>fast==false</tt>, the vector
+                                     * is filled with zeros.
+                                     *
+                                     * Note that you must call this
+                                     * (or the other reinit()
+                                     * functions) function, rather
+                                     * than calling the reinit()
+                                     * functions of an individual
+                                     * block, to allow the block
+                                     * vector to update its caches of
+                                     * vector sizes. If you call
+                                     * reinit() on one of the
+                                     * blocks, then subsequent
+                                     * actions on this object may
+                                     * yield unpredictable results
+                                     * since they may be routed to
+                                     * the wrong block.
+                                     */
+      void reinit (const std::vector<unsigned int> &N,
+                  const bool                       fast=false);
+
+                                    /**
+                                     * Change the dimension to that
+                                     * of the vector <tt>V</tt>. The same
+                                     * applies as for the other
+                                     * reinit() function.
+                                     *
+                                     * The elements of <tt>V</tt> are not
+                                     * copied, i.e.  this function is
+                                     * the same as calling <tt>reinit
+                                     * (V.size(), fast)</tt>.
+                                     *
+                                     * Note that you must call this
+                                     * (or the other reinit()
+                                     * functions) function, rather
+                                     * than calling the reinit()
+                                     * functions of an individual
+                                     * block, to allow the block
+                                     * vector to update its caches of
+                                     * vector sizes. If you call
+                                     * reinit() of one of the
+                                     * blocks, then subsequent
+                                     * actions of this object may
+                                     * yield unpredictable results
+                                     * since they may be routed to
+                                     * the wrong block.
+                                     */
+      template <typename Number2>
+      void reinit (const BlockVector<Number2> &V,
+                  const bool                 fast=false);
+
+                                    /**
+                                     * Scale each element of the
+                                     * vector by the given factor.
+                                     *
+                                     * This function is deprecated
+                                     * and will be removed in a
+                                     * future version. Use
+                                     * <tt>operator *=</tt> and
+                                     * <tt>operator /=</tt> instead.
+                                     *
+                                     * @deprecated Use <tt>operator*=</tt>
+                                     * instead.
+                                     */
+      void scale (const value_type factor);
+
+                                    /**
+                                     * Multiply each element of this
+                                     * vector by the corresponding
+                                     * element of <tt>v</tt>.
+                                     */
+      template <class BlockVector2>
+      void scale (const BlockVector2 &v);
+
+                                    /**
+                                     * Swap the contents of this
+                                     * vector and the other vector
+                                     * <tt>v</tt>. One could do this
+                                     * operation with a temporary
+                                     * variable and copying over the
+                                     * data elements, but this
+                                     * function is significantly more
+                                     * efficient since it only swaps
+                                     * the pointers to the data of
+                                     * the two vectors and therefore
+                                     * does not need to allocate
+                                     * temporary storage and move
+                                     * data around.
+                                     *
+                                     * Limitation: right now this
+                                     * function only works if both
+                                     * vectors have the same number
+                                     * of blocks. If needed, the
+                                     * numbers of blocks should be
+                                     * exchanged, too.
+                                     *
+                                     * This function is analog to the
+                                     * the swap() function of all C++
+                                     * standard containers. Also,
+                                     * there is a global function
+                                     * swap(u,v) that simply calls
+                                     * <tt>u.swap(v)</tt>, again in analogy
+                                     * to standard functions.
+                                     */
+      void swap (BlockVector<Number> &v);
+
+                                    /** @addtogroup Exceptions
+                                     * @{ */
+
+                                    /**
+                                     * Exception
+                                     */
+      DeclException0 (ExcIteratorRangeDoesNotMatchVectorSize);
+                                    //@}
+    };
+
+/*@}*/
+
+#ifndef DOXYGEN
+/*----------------------- Inline functions ----------------------------------*/
+
+
+    template <typename Number>
+    inline
+    BlockVector<Number>::BlockVector (const unsigned int n_blocks,
+                                     const unsigned int block_size)
+    {
+      reinit (n_blocks, block_size);
+    }
+
+
+
+    template <typename Number>
+    inline
+    BlockVector<Number>::BlockVector (const std::vector<unsigned int> &n)
+    {
+      reinit (n, false);
+    }
+
+
+
+    template <typename Number>
+    inline
+    BlockVector<Number>::BlockVector (const BlockVector<Number>& v)
+      :
+      BlockVectorBase<Vector<Number> > ()
+    {
+      this->components.resize (v.n_blocks());
+      this->block_indices = v.block_indices;
+
+      for (unsigned int i=0; i<this->n_blocks(); ++i)
+       this->components[i] = v.components[i];
+    }
+
+
+#ifndef DEAL_II_EXPLICIT_CONSTRUCTOR_BUG
+
+    template <typename Number>
+    template <typename OtherNumber>
+    inline
+    BlockVector<Number>::BlockVector (const BlockVector<OtherNumber>& v)
+    {
+      reinit (v, true);
+      *this = v;
+    }
+
+#endif
+
+
+
+    template <typename Number>
+    inline
+    void BlockVector<Number>::reinit (const unsigned int n_bl,
+                                     const unsigned int bl_sz,
+                                     const bool         fast)
+    {
+      std::vector<unsigned int> n(n_bl, bl_sz);
+      reinit(n, fast);
+    }
+
+
+    template <typename Number>
+    inline
+    void BlockVector<Number>::reinit (const std::vector<unsigned int> &n,
+                                     const bool                       fast)
+    {
+      this->block_indices.reinit (n);
+      if (this->components.size() != this->n_blocks())
+       this->components.resize(this->n_blocks());
+
+      for (unsigned int i=0; i<this->n_blocks(); ++i)
+       this->components[i].reinit(n[i], fast);
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    inline
+    void BlockVector<Number>::reinit (const BlockVector<Number2>& v,
+                                     const bool fast)
+    {
+      this->block_indices = v.get_block_indices();
+      if (this->components.size() != this->n_blocks())
+       this->components.resize(this->n_blocks());
+
+      for (unsigned int i=0;i<this->n_blocks();++i)
+       this->block(i).reinit(v.block(i), fast);
+    }
+
+
+
+    template <typename Number>
+    inline
+    BlockVector<Number>::~BlockVector ()
+    {}
+
+
+
+    template <typename Number>
+    inline
+    BlockVector<Number> &
+    BlockVector<Number>::operator = (const value_type s)
+    {
+
+      Assert (numbers::is_finite(s), ExcNumberNotFinite());
+
+      BaseClass::operator = (s);
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    inline
+    BlockVector<Number> &
+    BlockVector<Number>::operator = (const BlockVector &v)
+    {
+      reinit (v, true);
+      BaseClass::operator = (v);
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    inline
+    BlockVector<Number> &
+    BlockVector<Number>::operator = (const Vector<Number> &v)
+    {
+      BaseClass::operator = (v);
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    inline
+    BlockVector<Number> &
+    BlockVector<Number>::operator = (const BlockVector<Number2> &v)
+    {
+      reinit (v, true);
+      BaseClass::operator = (v);
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    inline
+    void BlockVector<Number>::swap (BlockVector<Number> &v)
+    {
+      Assert (this->n_blocks() == v.n_blocks(),
+             ExcDimensionMismatch(this->n_blocks(), v.n_blocks()));
+
+      for (unsigned int i=0; i<this->n_blocks(); ++i)
+       dealii::swap (this->components[i], v.components[i]);
+      dealii::swap (this->block_indices, v.block_indices);
+    }
+
+
+
+    template <typename Number>
+    void BlockVector<Number>::scale (const value_type factor)
+    {
+
+      Assert (numbers::is_finite(factor), ExcNumberNotFinite());
+
+      for (unsigned int i=0; i<this->n_blocks();++i)
+       this->components[i].scale(factor);
+    }
+
+
+
+    template <typename Number>
+    template <class BlockVector2>
+    void BlockVector<Number>::scale (const BlockVector2 &v)
+    {
+      BaseClass::scale (v);
+    }
+
+#endif // DOXYGEN
+
+  } // end of namespace distributed
+
+} // end of namespace parallel
+
+/**
+ * Global function which overloads the default implementation
+ * of the C++ standard library which uses a temporary object. The
+ * function simply exchanges the data of the two vectors.
+ *
+ * @relates BlockVector
+ * @author Katharina Kormann, Martin Kronbichler, 2011
+ */
+template <typename Number>
+inline
+void swap (parallel::distributed::BlockVector<Number> &u,
+          parallel::distributed::BlockVector<Number> &v)
+{
+  u.swap (v);
+}
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/deal.II/include/deal.II/lac/parallel_vector.h b/deal.II/include/deal.II/lac/parallel_vector.h
new file mode 100644 (file)
index 0000000..1fdb206
--- /dev/null
@@ -0,0 +1,1856 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011, 2012 by deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__parallel_vector_h
+#define __deal2__parallel_vector_h
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/template_constraints.h>
+#include <deal.II/base/types.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/partitioner.h>
+#include <deal.II/base/thread_management.h>
+#include <deal.II/lac/vector_view.h>
+
+#include <cstring>
+
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace parallel
+{
+  namespace distributed
+  {
+
+/*! @addtogroup Vectors
+ *@{
+ */
+
+
+                                    /**
+                                     * Implementation of a parallel vector class. The design of this class is
+                                     * similar to the standard dealii::Vector<Number> class in deal.II, with the
+                                     * exception that storage is distributed with MPI.
+                                     *
+                                     * The vector is designed for the following scheme of parallel partitioning:
+                                     * - The indices held by individual processes (locally owned part) in the
+                                     *   MPI parallelization form a contiguous range
+                                     *   <code>[my_first_index,my_last_index)</code>.
+                                     * - Ghost indices residing on arbitrary positions of other processors are
+                                     *   allowed. It is in general more efficient if ghost indices are
+                                     *   clustered, since they are stored as a set of intervals. The
+                                     *   communication pattern of the ghost indices is determined when calling
+                                     *   the function <code>reinit (locally_owned, ghost_indices,
+                                     *   communicator)</code>, and retained until the partitioning is changed
+                                     *   again. This allows for efficient parallel communication of indices. In
+                                     *   particular, it stores the communication pattern, rather than having
+                                     *   to compute it again for every communication.
+                                     * - Besides the usual global access operator () it is also possible to
+                                     *   access vector entries in the local index space with the function @p
+                                     *   local_element(). Locally owned indices are placed first, [0,
+                                     *   local_size()), and then all ghost indices follow after them
+                                     *   contiguously, [local_size(), local_size()+n_ghost_indices()).
+                                     *
+                                     * Functions related to parallel functionality:
+                                     * - The function <code>compress()</code> goes through the data associated
+                                     *   with ghost indices and communicates it to the owner process, which can
+                                     *   then add/set it to the correct position. This can be used e.g. after
+                                     *   having run an assembly routine involving ghosts that fill this vector.
+                                     * - The <code>update_ghost_values()</code> function imports the data from the owning
+                                     *   processor to the ghost indices in order to provide read access to the
+                                     *   data associated with ghosts.
+                                     * - It is possible to split the above functions into two phases, where the first
+                                     *   initiates the communication and the second one finishes it. These
+                                     *   functions can be used to overlap communication with computations in
+                                     *   other parts of the code.
+                                     * - Of course, reduction operations (like norms) make use of collective
+                                     *   all-to-all MPI communications.
+                                     *
+                                     * @author Katharina Kormann, Martin Kronbichler, 2010, 2011
+                                     */
+    template <typename Number>
+    class Vector : public Subscriptor
+    {
+      public:
+                                        /**
+                                         * Declare standard types used in all
+                                         * containers. These types parallel those in
+                                         * the <tt>C++</tt> standard libraries
+                                         * <tt>vector<...></tt> class.
+                                         */
+       typedef Number                                            value_type;
+       typedef value_type                                       *pointer;
+       typedef const value_type                                 *const_pointer;
+       typedef value_type                                       *iterator;
+       typedef const value_type                                 *const_iterator;
+       typedef value_type                                       &reference;
+       typedef const value_type                                 &const_reference;
+       typedef size_t                                            size_type;
+       typedef typename numbers::NumberTraits<Number>::real_type real_type;
+
+                                        /**
+                                         * @name 1: Basic Object-handling
+                                         */
+                                        //@{
+                                        /**
+                                         * Empty constructor.
+                                         */
+       Vector ();
+
+                                        /**
+                                         * Copy constructor. Uses the parallel
+                                         * partitioning of @p in_vector.
+                                         */
+       Vector (const Vector<Number> &in_vector);
+
+                                        /**
+                                         * Constructs a parallel vector of the given
+                                         * global size without any actual parallel
+                                         * distribution.
+                                         */
+       Vector (const unsigned int size);
+
+                                        /**
+                                         * Constructs a parallel vector. The local
+                                         * range is specified by @p locally_owned_set
+                                         * (note that this must be a contiguous
+                                         * interval, multiple intervals are not
+                                         * possible). The IndexSet @p ghost_indices
+                                         * specifies ghost indices, i.e., indices
+                                         * which one might need to read data from or
+                                         * accumulate data from. It is allowed that
+                                         * the set of ghost indices also contains the
+                                         * local range, but it does not need to.
+                                         *
+                                         * This function involves global
+                                         * communication, so it should only be called
+                                         * once for a given layout. Use the
+                                         * constructor with Vector<Number> argument to
+                                         * create additional vectors with the same
+                                         * parallel layout.
+                                         */
+       Vector (const IndexSet &local_range,
+               const IndexSet &ghost_indices,
+               const MPI_Comm  communicator);
+
+                                        /**
+                                         * Create the vector based on the parallel
+                                         * partitioning described in @p
+                                         * partitioner. The input argument is a shared
+                                         * pointer, which store the partitioner data
+                                         * only once and share it between several
+                                         * vectors with the same layout.
+                                         */
+       Vector (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+                                        /**
+                                         * Destructor.
+                                         */
+       ~Vector ();
+
+                                        /**
+                                         * Sets the global size of the vector to @p
+                                         * size without any actual parallel
+                                         * distribution.
+                                         */
+       void reinit (const unsigned int size,
+                    const bool         fast = false);
+
+                                        /**
+                                         * Uses the parallel layout of the input
+                                         * vector @p in_vector and allocates memory
+                                         * for this vector. Recommended initialization
+                                         * function when several vectors with the same
+                                         * layout should be created.
+                                         *
+                                         * If the flag @p fast is set to false, the
+                                         * memory will be initialized with zero,
+                                         * otherwise the memory will be untouched (and
+                                         * the user must make sure to fill it with
+                                         * reasonable data before using it).
+                                         */
+       template <typename Number2>
+       void reinit(const Vector<Number2> &in_vector,
+                   const bool             fast = false);
+
+                                        /**
+                                         * Initialize the vector. The local range is
+                                         * specified by @p locally_owned_set (note
+                                         * that this must be a contiguous interval,
+                                         * multiple intervals are not possible). The
+                                         * IndexSet @p ghost_indices specifies ghost
+                                         * indices, i.e., indices which one might need
+                                         * to read data from or accumulate data
+                                         * from. It is allowed that the set of ghost
+                                         * indices also contains the local range, but
+                                         * it does not need to.
+                                         *
+                                         * This function involves global
+                                         * communication, so it should only be called
+                                         * once for a given layout. Use the @p reinit
+                                         * function with Vector<Number> argument to
+                                         * create additional vectors with the same
+                                         * parallel layout.
+                                         */
+       void reinit (const IndexSet &local_range,
+                    const IndexSet &ghost_indices,
+                    const MPI_Comm  communicator);
+
+                                        /**
+                                         * Initialize the vector given to the parallel
+                                         * partitioning described in @p
+                                         * partitioner. The input argument is a shared
+                                         * pointer, which store the partitioner data
+                                         * only once and share it between several
+                                         * vectors with the same layout.
+                                         */
+       void reinit (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+
+                                        /**
+                                         * Swap the contents of this
+                                         * vector and the other vector
+                                         * @p v. One could do this
+                                         * operation with a temporary
+                                         * variable and copying over the
+                                         * data elements, but this
+                                         * function is significantly more
+                                         * efficient since it only swaps
+                                         * the pointers to the data of
+                                         * the two vectors and therefore
+                                         * does not need to allocate
+                                         * temporary storage and move
+                                         * data around.
+                                         *
+                                         * This function is analog to the
+                                         * the @p swap function of all C++
+                                         * standard containers. Also,
+                                         * there is a global function
+                                         * <tt>swap(u,v)</tt> that simply calls
+                                         * <tt>u.swap(v)</tt>, again in analogy
+                                         * to standard functions.
+                                         *
+                                         * This function is virtual in
+                                         * order to allow for derived
+                                         * classes to handle memory
+                                         * separately.
+                                         */
+       void swap (Vector<Number> &v);
+
+                                        /**
+                                         * Assigns the vector to the parallel
+                                         * partitioning of the input vector @p
+                                         * in_vector, and copies all the data.
+                                         */
+       Vector<Number> &
+       operator = (const Vector<Number>  &in_vector);
+
+                                        /**
+                                         * Assigns the vector to the parallel
+                                         * partitioning of the input vector @p
+                                         * in_vector, and copies all the data.
+                                         */
+       template <typename Number2>
+       Vector<Number> &
+       operator = (const Vector<Number2> &in_vector);
+
+                                        /**
+                                         * This method copies the local range from
+                                         * another vector with the same local range,
+                                         * but possibly different layout of ghost
+                                         * indices.
+                                         */
+       void copy_from (const Vector<Number> &in_vector,
+                       const bool            call_update_ghost_values = false);
+
+                                        /**
+                                         * Sets all elements of the vector to the
+                                         * scalar @p s. If the scalar is zero, also
+                                         * ghost elements are set to zero, otherwise
+                                         * they remain unchanged.
+                                         */
+       Vector<Number>& operator = (const Number s);
+
+                                        /**
+                                         * This function copies the data that has
+                                         * accumulated in the data buffer for ghost
+                                         * indices to the owning processor. If the
+                                         * optional argument @p add_ghost_data is set
+                                         * to true, the data is added into the
+                                         * respective positions of the owning
+                                         * processor, otherwise the new data
+                                         * overwrites the old content in the host
+                                         * vector. In that case, data coming from
+                                         * different processor to the same target
+                                         * entry should be identical. However, no
+                                         * checking is performed, so it is the user's
+                                         * responsibility to ensure consistency of
+                                         * data.
+                                         *
+                                         * For the meaning of this argument, see the
+                                         * entry on
+                                         * @ref GlossCompress "Compressing distributed vectors and matrices"
+                                         * in the glossary.
+                                         */
+       void compress (const bool add_ghost_data = true);
+
+                                        /**
+                                         * Fills the data field for ghost indices with
+                                         * the values stored in the respective
+                                         * positions of the owning processor. This
+                                         * function is needed before reading from
+                                         * ghosts. The function is @p const even
+                                         * though ghost data is changed. This is
+                                         * needed to allow functions with a @p const
+                                         * vector to perform the data exchange without
+                                         * creating temporaries.
+                                         */
+       void update_ghost_values () const;
+
+                                        /**
+                                         * Initiates communication for the @p
+                                         * compress() function with non-blocking
+                                         * communication. This function does not wait
+                                         * for the transfer to finish, in order to
+                                         * allow for other computations during the
+                                         * time it takes until all data arrives.
+                                         *
+                                         * Before the data is actually exchanged, the
+                                         * function must be followed by a call to @p
+                                         * compress_finish().
+                                         *
+                                         * In case this function is called for more
+                                         * than one vector before @p
+                                         * compress_finish() is invoked, it is
+                                         * mandatory to specify a unique
+                                         * communication channel to each such call, in
+                                         * order to avoid several messages with the
+                                         * same ID that will corrupt this operation.
+                                         */
+       void compress_start (const unsigned int communication_channel = 0);
+
+                                        /**
+                                         * For all requests that have been initiated
+                                         * in compress_start, wait for the
+                                         * communication to finish. Once it is
+                                         * finished, add or set the data (depending on
+                                         * whether @p add_ghost_data is @p true or @p
+                                         * false) to the respective positions in the
+                                         * owning processor, and clear the contents in
+                                         * the ghost data fields. The meaning of
+                                         * this argument is the same as in compress().
+                                         *
+                                         * Must follow a call to the @p compress_start
+                                         * function.
+                                         */
+       void compress_finish (const bool add_ghost_data = true);
+
+
+                                        /**
+                                         * Initiates communication for the @p
+                                         * update_ghost_values() function with non-blocking
+                                         * communication. This function does not wait
+                                         * for the transfer to finish, in order to
+                                         * allow for other computations during the
+                                         * time it takes until all data arrives.
+                                         *
+                                         * Before the data is actually exchanged, the
+                                         * function must be followed by a call to @p
+                                         * update_ghost_values_finish().
+                                         *
+                                         * In case this function is called for more
+                                         * than one vector before @p
+                                         * update_ghost_values_finish() is invoked, it is
+                                         * mandatory to specify a unique communication
+                                         * channel to each such call, in order to
+                                         * avoid several messages with the same ID
+                                         * that will corrupt this operation.
+                                         */
+       void update_ghost_values_start (const unsigned int communication_channel = 0) const;
+
+
+                                        /**
+                                         * For all requests that have been started in
+                                         * update_ghost_values_start, wait for the communication
+                                         * to finish.
+                                         *
+                                         * Must follow a call to the @p
+                                         * update_ghost_values_start function before reading
+                                         * data from ghost indices.
+                                         */
+       void update_ghost_values_finish () const;
+
+                                        /**
+                                         * This method zeros the entries on ghost
+                                         * dofs, but does not touch locally owned
+                                         * DoFs.
+                                         */
+       void zero_out_ghosts ();
+
+                                        /**
+                                         * Return whether the vector contains only
+                                         * elements with value zero. This function
+                                         * is mainly for internal consistency
+                                         * checks and should seldomly be used when
+                                         * not in debug mode since it uses quite
+                                         * some time.
+                                         */
+       bool all_zero () const;
+
+                                        /**
+                                         * Return @p true if the vector has no
+                                         * negative entries, i.e. all entries are
+                                         * zero or positive. This function is
+                                         * used, for example, to check whether
+                                         * refinement indicators are really all
+                                         * positive (or zero).
+                                         *
+                                         * The function obviously only makes
+                                         * sense if the template argument of this
+                                         * class is a real type. If it is a
+                                         * complex type, then an exception is
+                                         * thrown.
+                                         */
+       bool is_non_negative () const;
+
+                                        /**
+                                         * Checks for equality of the two vectors.
+                                         */
+       template <typename Number2>
+       bool operator == (const Vector<Number2> &v) const;
+
+                                        /**
+                                         * Checks for inequality of the two vectors.
+                                         */
+       template <typename Number2>
+       bool operator != (const Vector<Number2> &v) const;
+
+                                        /**
+                                         * Perform the inner product of two vectors.
+                                         */
+       template <typename Number2>
+       Number operator * (const Vector<Number2> &V) const;
+
+                                        /**
+                                         * Computes the square of the l<sub>2</sub>
+                                         * norm of the vector (i.e., the sum of the
+                                         * squares of all entries among all
+                                         * processors).
+                                         */
+       real_type norm_sqr () const;
+
+                                        /**
+                                         * Computes the mean value of all the entries
+                                         * in the vector.
+                                         */
+       Number mean_value () const;
+
+                                        /**
+                                         * Returns the l<sub>1</sub> norm of the
+                                         * vector (i.e., the sum of the absolute
+                                         * values of all entries among all
+                                         * processors).
+                                         */
+       real_type l1_norm () const;
+
+                                        /**
+                                         * Returns the l<sub>2</sub> norm of the
+                                         * vector (i.e., square root of the sum of the
+                                         * square of all entries among all
+                                         * processors).
+                                         */
+       real_type l2_norm () const;
+
+                                        /**
+                                         * Returns the l<sub>p</sub> norm with real @p
+                                         * p of the vector (i.e., the pth root of sum
+                                         * of the pth power of all entries among all
+                                         * processors).
+                                         */
+       real_type lp_norm (const real_type p) const;
+
+                                        /**
+                                         * Returns the maximum norm of the vector
+                                         * (i.e., maximum absolute value among all
+                                         * entries among all processors).
+                                         */
+       real_type linfty_norm () const;
+
+                                        /**
+                                         * Returns the global size of the vector,
+                                         * equal to the sum of the number of locally
+                                         * owned indices among all the processors.
+                                         */
+       types::global_dof_index size () const;
+
+                                        /**
+                                         * Returns the local size of the vector, i.e.,
+                                         * the number of indices owned locally.
+                                         */
+       unsigned int local_size() const;
+
+                                        /**
+                                         * Returns the half-open interval that
+                                         * specifies the locally owned range of the
+                                         * vector. Note that <code>local_size() ==
+                                         * local_range().second -
+                                         * local_range().first</code>.
+                                         */
+       std::pair<types::global_dof_index, types::global_dof_index> local_range () const;
+
+                                        /**
+                                         * Returns true if the given global index is
+                                         * in the local range of this processor.
+                                         */
+       bool in_local_range (const types::global_dof_index global_index) const;
+
+                                        /**
+                                         * Returns the number of ghost elements
+                                         * present on the vector.
+                                         */
+       unsigned int n_ghost_entries () const;
+
+                                        /**
+                                         * Returns whether the given global index is a
+                                         * ghost index on the present
+                                         * processor. Returns false for indices that
+                                         * are owned locally and for indices not
+                                         * present at all.
+                                         */
+       bool is_ghost_entry (const types::global_dof_index global_index) const;
+
+                                        /**
+                                         * Make the @p Vector class a bit like
+                                         * the <tt>vector<></tt> class of the C++
+                                         * standard library by returning
+                                         * iterators to the start and end of the
+                                         * locally owned elements of this vector.
+                                         */
+       iterator begin ();
+
+                                        /**
+                                         * Return constant iterator to the start of
+                                         * the vector.
+                                         */
+       const_iterator begin () const;
+
+                                        /**
+                                         * Return an iterator pointing to the
+                                         * element past the end of the array of
+                                         * locally owned entries.
+                                         */
+       iterator end ();
+
+                                        /**
+                                         * Return a constant iterator pointing to
+                                         * the element past the end of the array
+                                         * of the locally owned entries.
+                                         */
+       const_iterator end () const;
+                                        //@}
+
+
+                                        /**
+                                         * @name 2: Data-Access
+                                         */
+                                        //@{
+
+                                        /**
+                                         * Read access to the data in the position
+                                         * corresponding to @p global_index. The index
+                                         * must be either in the local range of the
+                                         * vector or be specified as a ghost index at
+                                         * construction.
+                                         */
+       Number operator() (const types::global_dof_index global_index) const;
+
+                                        /**
+                                         * Read and write access to the data in the
+                                         * position corresponding to @p
+                                         * global_index. The index must be either in
+                                         * the local range of the vector or be
+                                         * specified as a ghost index at construction.
+                                         */
+       Number& operator() (const types::global_dof_index global_index);
+
+                                        /**
+                                         * Read access to the data field specified by
+                                         * @p local_index. Locally owned indices can
+                                         * be accessed with indices
+                                         * <code>[0,local_size)</code>, and ghost
+                                         * indices with indices
+                                         * <code>[local_size,local_size+
+                                         * n_ghost_entries]</code>.
+                                         */
+       Number local_element (const unsigned int local_index) const;
+
+                                        /**
+                                         * Read and write access to the data field
+                                         * specified by @p local_index. Locally owned
+                                         * indices can be accessed with indices
+                                         * <code>[0,local_size)</code>, and ghost
+                                         * indices with indices
+                                         * <code>[local_size,local_size+n_ghosts]</code>.
+                                         */
+       Number& local_element (const unsigned int local_index);
+                                        //@}
+
+
+                                        /**
+                                         * @name 3: Modification of vectors
+                                         */
+                                        //@{
+
+                                        /**
+                                         * Add the given vector to the present
+                                         * one.
+                                         */
+       Vector<Number> & operator += (const Vector<Number> &V);
+
+                                        /**
+                                         * Subtract the given vector from the
+                                         * present one.
+                                         */
+       Vector<Number> & operator -= (const Vector<Number> &V);
+
+                                        /**
+                                         * A collective add operation:
+                                         * This funnction adds a whole
+                                         * set of values stored in @p
+                                         * values to the vector
+                                         * components specified by @p
+                                         * indices.
+                                         */
+       template <typename OtherNumber>
+       void add (const std::vector<unsigned int> &indices,
+                 const std::vector<OtherNumber>  &values);
+
+                                        /**
+                                         * This is a second collective
+                                         * add operation. As a
+                                         * difference, this function
+                                         * takes a deal.II vector of
+                                         * values.
+                                         */
+       template <typename OtherNumber>
+       void add (const std::vector<unsigned int>     &indices,
+                 const ::dealii::Vector<OtherNumber> &values);
+
+                                        /**
+                                         * Take an address where
+                                         * <tt>n_elements</tt> are stored
+                                         * contiguously and add them into
+                                         * the vector. Handles all cases
+                                         * which are not covered by the
+                                         * other two <tt>add()</tt>
+                                         * functions above.
+                                         */
+       template <typename OtherNumber>
+       void add (const unsigned int  n_elements,
+                 const unsigned int *indices,
+                 const OtherNumber  *values);
+
+                                        /**
+                                         * Addition of @p s to all
+                                         * components. Note that @p s is a
+                                         * scalar and not a vector.
+                                         */
+       void add (const Number s);
+
+                                        /**
+                                         * Simple vector addition, equal to the
+                                         * <tt>operator +=</tt>.
+                                         */
+       void add (const Vector<Number> &V);
+
+                                        /**
+                                         * Simple addition of a multiple of a
+                                         * vector, i.e. <tt>*this += a*V</tt>.
+                                         */
+       void add (const Number a, const Vector<Number> &V);
+
+                                        /**
+                                         * Multiple addition of scaled vectors,
+                                         * i.e. <tt>*this += a*V+b*W</tt>.
+                                         */
+       void add (const Number a, const Vector<Number> &V,
+                 const Number b, const Vector<Number> &W);
+
+                                        /**
+                                         * Scaling and simple vector addition,
+                                         * i.e.
+                                         * <tt>*this = s*(*this)+V</tt>.
+                                         */
+       void sadd (const Number          s,
+                  const Vector<Number> &V);
+
+                                        /**
+                                         * Scaling and simple addition, i.e.
+                                         * <tt>*this = s*(*this)+a*V</tt>.
+                                         */
+       void sadd (const Number          s,
+                  const Number          a,
+                  const Vector<Number> &V);
+
+                                        /**
+                                         * Scaling and multiple addition.
+                                         */
+       void sadd (const Number          s,
+                  const Number          a,
+                  const Vector<Number> &V,
+                  const Number          b,
+                  const Vector<Number> &W);
+
+                                        /**
+                                         * Scaling and multiple addition.
+                                         * <tt>*this = s*(*this)+a*V + b*W + c*X</tt>.
+                                         */
+       void sadd (const Number          s,
+                  const Number          a,
+                  const Vector<Number> &V,
+                  const Number          b,
+                  const Vector<Number> &W,
+                  const Number          c,
+                  const Vector<Number> &X);
+
+                                        /**
+                                         * Scale each element of the
+                                         * vector by the given factor.
+                                         *
+                                         * This function is deprecated
+                                         * and will be removed in a
+                                         * future version. Use
+                                         * <tt>operator *=</tt> and
+                                         * <tt>operator /=</tt> instead.
+                                         */
+       void scale (const Number factor);
+
+
+                                        /**
+                                         * Scale each element of the
+                                         * vector by a constant
+                                         * value.
+                                         */
+       Vector<Number> & operator *= (const Number factor);
+
+                                        /**
+                                         * Scale each element of the
+                                         * vector by the inverse of the
+                                         * given value.
+                                         */
+       Vector<Number> & operator /= (const Number factor);
+
+                                        /**
+                                         * Scale each element of this
+                                         * vector by the corresponding
+                                         * element in the argument. This
+                                         * function is mostly meant to
+                                         * simulate multiplication (and
+                                         * immediate re-assignment) by a
+                                         * diagonal scaling matrix.
+                                         */
+       void scale (const Vector<Number> &scaling_factors);
+
+                                        /**
+                                         * Scale each element of this
+                                         * vector by the corresponding
+                                         * element in the argument. This
+                                         * function is mostly meant to
+                                         * simulate multiplication (and
+                                         * immediate re-assignment) by a
+                                         * diagonal scaling matrix.
+                                         */
+       template <typename Number2>
+       void scale (const Vector<Number2> &scaling_factors);
+
+                                        /**
+                                         * Assignment <tt>*this = a*u</tt>.
+                                         */
+       void equ (const Number a, const Vector<Number>& u);
+
+                                        /**
+                                         * Assignment <tt>*this = a*u</tt>.
+                                         */
+       template <typename Number2>
+       void equ (const Number a, const Vector<Number2>& u);
+
+                                        /**
+                                         * Assignment <tt>*this = a*u + b*v</tt>.
+                                         */
+       void equ (const Number a, const Vector<Number>& u,
+                 const Number b, const Vector<Number>& v);
+
+                                        /**
+                                         * Assignment <tt>*this = a*u + b*v + b*w</tt>.
+                                         */
+       void equ (const Number a, const Vector<Number>& u,
+                 const Number b, const Vector<Number>& v,
+                 const Number c, const Vector<Number>& w);
+
+                                        /**
+                                         * Compute the elementwise ratio of the
+                                         * two given vectors, that is let
+                                         * <tt>this[i] = a[i]/b[i]</tt>. This is
+                                         * useful for example if you want to
+                                         * compute the cellwise ratio of true to
+                                         * estimated error.
+                                         *
+                                         * This vector is appropriately
+                                         * scaled to hold the result.
+                                         *
+                                         * If any of the <tt>b[i]</tt> is
+                                         * zero, the result is
+                                         * undefined. No attempt is made
+                                         * to catch such situations.
+                                         */
+       void ratio (const Vector<Number> &a,
+                   const Vector<Number> &b);
+                                        //@}
+
+
+                                        /**
+                                         * @name 4: Mixed stuff
+                                         */
+                                        //@{
+                                        /**
+                                         * Prints the vector to the output stream @p
+                                         * out.
+                                         */
+       void print (std::ostream       &out,
+                   const unsigned int  precision  = 3,
+                   const bool          scientific = true,
+                   const bool          across     = true) const;
+
+                                        /**
+                                         * Returns the memory consumption of this
+                                         * class in bytes.
+                                         */
+       std::size_t memory_consumption () const;
+                                        //@}
+
+      private:
+                                        /**
+                                         * Shared pointer to store the parallel
+                                         * partitioning information. This information
+                                         * can be shared between several vectors that
+                                         * have the same partitioning.
+                                         */
+       std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> partitioner;
+
+                                        /**
+                                         * The size that is currently allocated in the
+                                         * val array.
+                                         */
+       unsigned int    allocated_size;
+
+                                        /**
+                                         * Pointer to the array of
+                                         * local elements of this vector.
+                                         */
+       Number         *val;
+
+                                        /**
+                                         * Temporary storage that holds the data that
+                                         * is sent to this processor in @p compress()
+                                         * or sent from this processor in @p
+                                         * update_ghost_values.
+                                         */
+       mutable Number *import_data;
+
+                                        /**
+                                         * Provide this class with all functionality
+                                         * of ::dealii::Vector<Number> by creating a
+                                         * VectorView object.
+                                         */
+       VectorView<Number> vector_view;
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+                                        /**
+                                         * A vector that collects all requests from @p
+                                         * compress() operations. This class uses
+                                         * persistent MPI communicators, i.e., the
+                                         * communication channels are stored during
+                                         * successive calls to a given function. This
+                                         * reduces the overhead involved with setting
+                                         * up the MPI machinery, but it does not
+                                         * remove the need for a receive operation to
+                                         * be posted before the data can actually be
+                                         * sent.
+                                         */
+       std::vector<MPI_Request>   compress_requests;
+
+                                        /**
+                                         * A vector that collects all requests from @p
+                                         * update_ghost_values() operations. This class uses
+                                         * persistent MPI communicators.
+                                         */
+       mutable std::vector<MPI_Request>   update_ghost_values_requests;
+#endif
+
+                                    /**
+                                     * A lock that makes sure that
+                                     * the @p compress and @p
+                                     * update_ghost_values functions
+                                     * give reasonable results also
+                                     * when used with several
+                                     * threads.
+                                     */
+        mutable Threads::ThreadMutex mutex;
+
+                                        /**
+                                         * A helper function that clears the
+                                         * compress_requests and update_ghost_values_requests
+                                         * field. Used in reinit functions.
+                                         */
+       void clear_mpi_requests ();
+
+                                        /**
+                                         * A helper function that is used to resize
+                                         * the val array.
+                                         */
+       void resize_val (const unsigned int new_allocated_size);
+
+                                        /*
+                                         * Make all other vector types
+                                         * friends.
+                                         */
+       template <typename Number2> friend class Vector;
+    };
+
+/*@}*/
+
+
+/*----------------------- Inline functions ----------------------------------*/
+
+#ifndef DOXYGEN
+
+    template <typename Number>
+    inline
+    Vector<Number>::Vector ()
+                   :
+                   partitioner (new Utilities::MPI::Partitioner()),
+                   allocated_size (0),
+                   val (0),
+                   import_data (0),
+                   vector_view (0, static_cast<Number*>(0))
+    {}
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>::Vector (const Vector<Number> &v)
+                   :
+                   allocated_size (0),
+                   val (0),
+                   import_data (0),
+                   vector_view (0, static_cast<Number*>(0))
+    {
+      reinit (v, true);
+      vector_view = v.vector_view;
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>::Vector (const IndexSet &local_range,
+                           const IndexSet &ghost_indices,
+                           const MPI_Comm  communicator)
+                   :
+                   allocated_size (0),
+                   val (0),
+                   import_data (0),
+                   vector_view (0, static_cast<Number*>(0))
+    {
+      reinit (local_range, ghost_indices, communicator);
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>::Vector (const unsigned int size)
+                   :
+                   allocated_size (0),
+                   val (0),
+                   import_data (0),
+                   vector_view (0, static_cast<Number*>(0))
+    {
+      reinit (size, false);
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>::
+    Vector (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner)
+                   :
+                   allocated_size (0),
+                   val (0),
+                   import_data (0),
+                   vector_view (0, static_cast<Number*>(0))
+    {
+      reinit (partitioner);
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>::~Vector ()
+    {
+      if (val != 0)
+       delete[] val;
+      val = 0;
+
+      if (import_data != 0)
+       delete[] import_data;
+      import_data = 0;
+
+      clear_mpi_requests();
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>&
+    Vector<Number>::operator = (const Vector<Number>& c)
+    {
+      Assert (c.partitioner.get() != 0, ExcNotInitialized());
+
+                                      // check whether the two vectors use the same
+                                      // parallel partitioner. if not, check if all
+                                      // local ranges are the same (that way, we can
+                                      // exchange data between different parallel
+                                      // layouts)
+      if (partitioner.get() == 0)
+       reinit (c, true);
+      else if (partitioner.get() != c.partitioner.get())
+       {
+         unsigned int local_ranges_different_loc = (local_range() !=
+                                                    c.local_range());
+         if(Utilities::MPI::max(local_ranges_different_loc,
+                                partitioner->get_communicator()) != 0)
+           reinit (c, true);
+       }
+      vector_view = c.vector_view;
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    inline
+    Vector<Number>&
+    Vector<Number>::operator = (const Vector<Number2> &c)
+    {
+      Assert (c.partitioner.get() != 0, ExcNotInitialized());
+
+                                      // check whether the two vectors use the same
+                                      // parallel partitioner. if not, check if all
+                                      // local ranges are the same (that way, we can
+                                      // exchange data between different parallel
+                                      // layouts)
+      if (partitioner.get() == 0)
+       reinit (c, true);
+      else if (partitioner.get() != c.partitioner.get())
+       {
+         unsigned int local_ranges_different_loc = (local_range() !=
+                                                    c.local_range());
+         if(Utilities::MPI::max(local_ranges_different_loc,
+                                partitioner->get_communicator()) != 0)
+           reinit (c, true);
+       }
+      vector_view.reinit (partitioner->local_size(), val);
+      if (partitioner->local_size() > 0)
+       vector_view.equ (1., c.vector_view);
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::compress (const bool add_ghost_data)
+    {
+      compress_start ();
+      compress_finish (add_ghost_data);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::update_ghost_values () const
+    {
+      update_ghost_values_start ();
+      update_ghost_values_finish ();
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::zero_out_ghosts ()
+    {
+      std::fill_n (&val[partitioner->local_size()],
+                  partitioner->n_ghost_indices(),
+                  Number());
+    }
+
+
+
+    template <typename Number>
+    inline
+    bool
+    Vector<Number>::all_zero () const
+    {
+                                      // use int instead of bool
+      int local_result = (partitioner->local_size()>0 ?
+                         -vector_view.all_zero () : -1);
+      return - Utilities::MPI::max(local_result,partitioner->get_communicator());
+    }
+
+
+
+    template <typename Number>
+    inline
+    bool
+    Vector<Number>::is_non_negative () const
+    {
+                                      // use int instead of bool
+      int local_result = (partitioner->local_size()>0 ?
+                         -vector_view.is_non_negative () : -1);
+      return  - Utilities::MPI::max(local_result,partitioner->get_communicator());
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    inline
+    bool
+    Vector<Number>::operator == (const Vector<Number2> &v) const
+    {
+      AssertDimension (local_size(), v.local_size());
+
+                                      // MPI does not support bools, so use unsigned
+                                      // int instead. Two vectors are equal if the
+                                      // check for non-equal fails on all processors
+      unsigned int local_result = (partitioner->local_size()>0 ?
+                                  vector_view.template operator !=
+                                  <Number2>(v.vector_view)
+                                  : 0 );
+      unsigned int result = Utilities::MPI::max(local_result,
+                                               partitioner->get_communicator());
+      return result==0;
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    inline
+    bool
+    Vector<Number>::operator != (const Vector<Number2> &v) const
+    {
+      return !(operator == (v));
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    inline
+    Number
+    Vector<Number>::operator * (const Vector<Number2> &V) const
+    {
+      Number local_result = (partitioner->local_size()>0 ?
+                            vector_view.operator* (V.vector_view)
+                            : 0);
+      return Utilities::MPI::sum (local_result, partitioner->get_communicator());
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::real_type
+    Vector<Number>::norm_sqr () const
+    {
+                                      // on some processors, the size might be zero,
+                                      // which is not allowed by the base
+                                      // class. Therefore, insert a check here
+      Number local_result = (partitioner->local_size()>0 ?
+                            vector_view.norm_sqr()
+                            : 0);
+      return Utilities::MPI::sum(local_result,partitioner->get_communicator());
+    }
+
+
+
+    template <typename Number>
+    inline
+    Number
+    Vector<Number>::mean_value () const
+    {
+      Number local_result =
+       (partitioner->local_size()>0 ?
+        vector_view.mean_value()
+        : 0)
+       *((real_type)partitioner->local_size()/(real_type)partitioner->size());
+      return Utilities::MPI::sum (local_result, partitioner->get_communicator());
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::real_type
+    Vector<Number>::l1_norm () const
+    {
+      Number local_result = (partitioner->local_size()>0 ?
+                            vector_view.l1_norm()
+                            : 0);
+      return Utilities::MPI::sum(local_result, partitioner->get_communicator());
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::real_type
+    Vector<Number>::l2_norm () const
+    {
+      return std::sqrt(norm_sqr());
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::real_type
+    Vector<Number>::lp_norm (const real_type p) const
+    {
+      Number local_result = (partitioner->local_size()>0 ?
+                            std::pow(vector_view.lp_norm(p),p)
+                            : 0);
+      return std::pow(Utilities::MPI::sum(local_result,
+                                         partitioner->get_communicator()), 1.0/p);
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::real_type
+    Vector<Number>::linfty_norm () const
+    {
+      Number local_result = (partitioner->local_size()>0 ?
+                            vector_view.linfty_norm()
+                            : 0);
+      return Utilities::MPI::max (local_result, partitioner->get_communicator());
+    }
+
+
+
+    template <typename Number>
+    inline
+    types::global_dof_index Vector<Number>::size () const
+    {
+      return partitioner->size();
+    }
+
+
+
+    template <typename Number>
+    inline
+    unsigned int Vector<Number>::local_size () const
+    {
+      return partitioner->local_size();
+    }
+
+
+
+    template <typename Number>
+    inline
+    std::pair<types::global_dof_index, types::global_dof_index>
+    Vector<Number>::local_range () const
+    {
+      return partitioner->local_range();
+    }
+
+
+
+    template <typename Number>
+    inline
+    bool
+    Vector<Number>::in_local_range
+    (const types::global_dof_index global_index) const
+    {
+      return partitioner->in_local_range (global_index);
+    }
+
+
+
+    template <typename Number>
+    inline
+    unsigned int
+    Vector<Number>::n_ghost_entries () const
+    {
+      return partitioner->n_ghost_indices();
+    }
+
+
+
+    template <typename Number>
+    inline
+    bool
+    Vector<Number>::is_ghost_entry (const types::global_dof_index global_index) const
+    {
+      return partitioner->is_ghost_entry (global_index);
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::iterator
+    Vector<Number>::begin ()
+    {
+      return vector_view.begin();
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::const_iterator
+    Vector<Number>::begin () const
+    {
+      return vector_view.begin();
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::iterator
+    Vector<Number>::end ()
+    {
+      return vector_view.end();
+    }
+
+
+
+    template <typename Number>
+    inline
+    typename Vector<Number>::const_iterator
+    Vector<Number>::end () const
+    {
+      return vector_view.end();
+    }
+
+
+
+    template <typename Number>
+    inline
+    Number
+    Vector<Number>::operator() (const types::global_dof_index global_index) const
+    {
+      return val[partitioner->global_to_local(global_index)];
+    }
+
+
+
+    template <typename Number>
+    inline
+    Number&
+    Vector<Number>::operator() (const types::global_dof_index global_index)
+    {
+      return val[partitioner->global_to_local (global_index)];
+    }
+
+
+
+    template <typename Number>
+    inline
+    Number
+    Vector<Number>::local_element (const unsigned int local_index) const
+    {
+      AssertIndexRange (local_index,
+                       partitioner->local_size()+
+                       partitioner->n_ghost_indices());
+      return val[local_index];
+    }
+
+
+
+    template <typename Number>
+    inline
+    Number&
+    Vector<Number>::local_element (const unsigned int local_index)
+    {
+      AssertIndexRange (local_index,
+                       partitioner->local_size()+
+                       partitioner->n_ghost_indices());
+      return val[local_index];
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>&
+    Vector<Number>::operator = (const Number s)
+    {
+                                      // if we call Vector::operator=0, we want to
+                                      // zero out all the entries plus ghosts.
+      vector_view.dealii::template Vector<Number>::operator= (s);
+      if (s==Number())
+       zero_out_ghosts();
+
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number> &
+    Vector<Number>::operator += (const Vector<Number> &v)
+    {
+      AssertDimension (local_size(), v.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view += v.vector_view;
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number> &
+    Vector<Number>::operator -= (const Vector<Number> &v)
+    {
+      AssertDimension (local_size(), v.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view -= v.vector_view;
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    template <typename OtherNumber>
+    inline
+    void
+    Vector<Number>::add (const std::vector<unsigned int> &indices,
+                        const std::vector<OtherNumber>  &values)
+    {
+      AssertDimension (indices.size(), values.size());
+      add (indices.size(), &indices[0], &values[0]);
+    }
+
+
+
+    template <typename Number>
+    template <typename OtherNumber>
+    inline
+    void
+    Vector<Number>::add (const std::vector<unsigned int>    &indices,
+                        const ::dealii::Vector<OtherNumber>&values)
+    {
+      AssertDimension (indices.size(), values.size());
+      add (indices.size(), &indices[0], values.begin());
+    }
+
+
+
+    template <typename Number>
+    template <typename OtherNumber>
+    inline
+    void
+    Vector<Number>::add (const unsigned int  n_indices,
+                        const unsigned int *indices,
+                        const OtherNumber  *values)
+    {
+      for (unsigned int i=0; i<n_indices; ++i)
+       {
+         Assert (numbers::is_finite(values[i]),
+                 ExcMessage("The given value is not finite but either infinite or Not A Number (NaN)"));
+         this->operator()(indices[i]) += values[i];
+       }
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::add (const Number a)
+    {
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.add (a);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::add (const Vector<Number>& v)
+    {
+      AssertDimension (local_size(), v.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.add (v.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::add (const Number a,
+                        const Vector<Number>& v)
+    {
+      AssertDimension (local_size(), v.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.add (a, v.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::add (const Number a,
+                        const Vector<Number>& v,
+                        const Number b,
+                        const Vector<Number>& w)
+    {
+      AssertDimension (local_size(), v.local_size());
+      AssertDimension (local_size(), w.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.add (a, v.vector_view, b, w.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::sadd (const Number x,
+                         const Vector<Number>& v)
+    {
+      AssertDimension (local_size(), v.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.sadd (x, v.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::sadd (const Number x,
+                         const Number a,
+                         const Vector<Number>& v)
+    {
+      AssertDimension (local_size(), v.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.sadd (x, a, v.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::sadd (const Number x,
+                         const Number a,
+                         const Vector<Number>& v,
+                         const Number b,
+                         const Vector<Number>& w)
+    {
+      AssertDimension (local_size(), v.local_size());
+      AssertDimension (local_size(), w.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.sadd (x, a, v.vector_view, b, w.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::sadd (const Number s,
+                         const Number a,
+                         const Vector<Number>& v,
+                         const Number b,
+                         const Vector<Number>& w,
+                         const Number c,
+                         const Vector<Number>& x)
+    {
+      AssertDimension (local_size(), v.local_size());
+      AssertDimension (local_size(), w.local_size());
+      AssertDimension (local_size(), x.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.sadd (s, a, v.vector_view, b, w.vector_view,
+                         c, x.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::scale (const Number factor)
+    {
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.scale (factor);
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>&
+    Vector<Number>::operator *= (const Number factor)
+    {
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.operator *= (factor);
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    inline
+    Vector<Number>&
+    Vector<Number>::operator /= (const Number factor)
+    {
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.operator /= (factor);
+      return *this;
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::scale (const Vector<Number> &scaling_factors)
+    {
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.scale (scaling_factors.vector_view);
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    inline
+    void
+    Vector<Number>::scale (const Vector<Number2> &scaling_factors)
+    {
+      vector_view.template scale<Number2> (scaling_factors.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::equ (const Number a,
+                        const Vector<Number>& v)
+    {
+      AssertDimension (local_size(), v.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.equ (a, v.vector_view);
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    inline
+    void
+    Vector<Number>::equ (const Number a,
+                        const Vector<Number2>& v)
+    {
+      AssertDimension (local_size(), v.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.equ (a, v.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::equ (const Number a,
+                        const Vector<Number>& v,
+                        const Number b,
+                        const Vector<Number>& w)
+    {
+      AssertDimension (local_size(), v.local_size());
+      AssertDimension (local_size(), w.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.equ (a, v.vector_view, b, w.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::equ (const Number a,
+                        const Vector<Number>& v,
+                        const Number b,
+                        const Vector<Number>& w,
+                        const Number c,
+                        const Vector<Number>& x)
+    {
+      AssertDimension (local_size(), v.local_size());
+      AssertDimension (local_size(), w.local_size());
+      AssertDimension (local_size(), w.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.equ (a, v.vector_view, b, w.vector_view,
+                        c, x.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::ratio (const Vector<Number>& a,
+                          const Vector<Number>& b)
+    {
+      AssertDimension (local_size(), a.local_size());
+      AssertDimension (local_size(), b.local_size());
+                                      // dealii::Vector does not allow empty fields
+                                      // but this might happen on some processors
+                                      // for parallel implementation
+      if(local_size()>0)
+       vector_view.ratio (a.vector_view, b.vector_view);
+    }
+
+
+
+    template <typename Number>
+    inline
+    void
+    Vector<Number>::swap (Vector<Number> &v)
+    {
+      std::swap (allocated_size,         v.allocated_size);
+      std::swap (val,                    v.val);
+      std::swap (import_data,            v.import_data);
+      std::swap (vector_view,            v.vector_view);
+#ifdef DEAL_II_COMPILER_SUPPROTS_MPI
+      std::swap (compress_requests,      v.compress_requests);
+      std::swap (update_ghost_values_requests, v.update_ghost_values_requests);
+#endif
+    }
+
+#endif  // ifndef DOXYGEN
+
+  } // end of namespace distributed
+
+} // end of namespace parallel
+
+
+
+/**
+ * Global function @p swap which overloads the default implementation
+ * of the C++ standard library which uses a temporary object. The
+ * function simply exchanges the data of the two vectors.
+ *
+ * @relates Vector
+ * @author Katharina Kormann, Martin Kronbichler, 2011
+ */
+template <typename Number>
+inline
+void swap (parallel::distributed::Vector<Number> &u,
+          parallel::distributed::Vector<Number> &v)
+{
+  u.swap (v);
+}
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
diff --git a/deal.II/include/deal.II/lac/parallel_vector.templates.h b/deal.II/include/deal.II/lac/parallel_vector.templates.h
new file mode 100644 (file)
index 0000000..05a5323
--- /dev/null
@@ -0,0 +1,560 @@
+//----------%-----------------------------------------------------------------
+//    $Id$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+#ifndef __deal2__parallel_vector_templates_h
+#define __deal2__parallel_vector_templates_h
+
+
+#include <deal.II/base/config.h>
+#include <deal.II/lac/parallel_vector.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace parallel
+{
+  namespace distributed
+  {
+
+    template <typename Number>
+    void
+    Vector<Number>::clear_mpi_requests ()
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      for (unsigned int j=0;j<compress_requests.size();j++)
+       MPI_Request_free(&compress_requests[j]);
+      compress_requests.clear();
+      for (unsigned int j=0;j<update_ghost_values_requests.size();j++)
+       MPI_Request_free(&update_ghost_values_requests[j]);
+      update_ghost_values_requests.clear();
+#endif
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::resize_val (const unsigned int new_alloc_size)
+    {
+      if (new_alloc_size > allocated_size)
+       {
+         Assert (((allocated_size > 0 && val != 0) ||
+                  val == 0), ExcInternalError());
+         if (val != 0)
+           delete [] val;
+         val = new Number[new_alloc_size];
+         allocated_size = new_alloc_size;
+       }
+      else if (new_alloc_size == 0)
+       {
+         if (val != 0)
+           delete [] val;
+         val = 0;
+         allocated_size = 0;
+       }
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::reinit (const unsigned int size,
+                           const bool         fast)
+    {
+      clear_mpi_requests();
+                               // check whether we need to reallocate
+      resize_val (size);
+
+                               // reset vector view
+      vector_view.reinit (size, val);
+
+                               // delete previous content in import data
+      if (import_data != 0)
+       delete[] import_data;
+      import_data = 0;
+
+                               // set partitioner to serial version
+      partitioner.reset (new Utilities::MPI::Partitioner (size));
+
+                               // set entries to zero if so requested
+      if (fast == false)
+       this->operator = (Number());
+    }
+
+
+
+    template <typename Number>
+    template <typename Number2>
+    void
+    Vector<Number>::reinit (const Vector<Number2> &v,
+                           const bool             fast)
+    {
+      clear_mpi_requests();
+      Assert (v.partitioner.get() != 0, ExcNotInitialized());
+
+                               // check whether the partitioners are
+                               // different (check only if the are allocated
+                               // differently, not if the actual data is
+                               // different)
+      if (partitioner.get() != v.partitioner.get())
+       {
+         partitioner = v.partitioner;
+         const unsigned int new_allocated_size = partitioner->local_size() +
+           partitioner->n_ghost_indices();
+         resize_val (new_allocated_size);
+         vector_view.reinit (partitioner->local_size(), val);
+       }
+      else
+       Assert (vector_view.size() == partitioner->local_size(),
+               ExcInternalError());
+
+      if (fast == false)
+       this->operator= (Number());
+
+      if (import_data != 0)
+       {
+         delete [] import_data;
+
+                               // do not reallocate import_data directly, but
+                               // only upon request. It is only used as
+                               // temporary storage for compress() and
+                               // update_ghost_values, and we might have vectors where
+                               // we never call these methods and hence do
+                               // not need to have the storage.
+         import_data = 0;
+       }
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::reinit (const IndexSet &locally_owned_indices,
+                           const IndexSet &ghost_indices,
+                           const MPI_Comm  communicator)
+    {
+                               // set up parallel partitioner with index sets
+                               // and communicator
+      std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> new_partitioner
+       (new Utilities::MPI::Partitioner (locally_owned_indices,
+                                         ghost_indices, communicator));
+      reinit (new_partitioner);
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::reinit (const std_cxx1x::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_in)
+    {
+      clear_mpi_requests();
+      partitioner = partitioner_in;
+
+                               // set vector size and allocate memory
+      const unsigned int new_allocated_size = partitioner->local_size() +
+       partitioner->n_ghost_indices();
+      resize_val (new_allocated_size);
+      vector_view.reinit (partitioner->local_size(), val);
+
+                               // initialize to zero
+      this->operator= (Number());
+
+      if (import_data != 0)
+       {
+         delete [] import_data;
+
+                               // do not reallocate import_data directly, but
+                               // only upon request. It is only used as
+                               // temporary storage for compress() and
+                               // update_ghost_values, and we might have vectors where
+                               // we never call these methods and hence do
+                               // not need to have the storage.
+         import_data = 0;
+       }
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::copy_from (const Vector<Number> &c,
+                              const bool            call_update_ghost_values)
+    {
+      AssertDimension (local_range().first, c.local_range().first);
+      AssertDimension (local_range().second, c.local_range().second);
+      vector_view = c.vector_view;
+      if (call_update_ghost_values == true)
+       update_ghost_values();
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::compress_start (const unsigned int counter)
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      const Utilities::MPI::Partitioner &part = *partitioner;
+
+                               // nothing to do when we neither have import
+                               // nor ghost indices.
+      if (part.n_ghost_indices()==0 && part.n_import_indices()==0)
+       return;
+
+                               // make this function thread safe
+      Threads::ThreadMutex::ScopedLock lock (mutex);
+
+      const unsigned int n_import_targets = part.import_targets().size();
+      const unsigned int n_ghost_targets  = part.ghost_targets().size();
+
+                               // Need to send and receive the data. Use
+                               // non-blocking communication, where it is
+                               // generally less overhead to first initiate
+                               // the receive and then actually send the data
+      if (compress_requests.size() == 0)
+       {
+                               // set channels in different range from
+                               // update_ghost_values channels
+         const unsigned int channel = counter + 400;
+         unsigned int current_index_start = 0;
+         compress_requests.resize (n_import_targets + n_ghost_targets);
+
+                               // allocate import_data in case it is not set
+                               // up yet
+         if (import_data == 0)
+           import_data = new Number[part.n_import_indices()];
+         for (unsigned int i=0; i<n_import_targets; i++)
+           {
+             MPI_Recv_init (&import_data[current_index_start],
+                            part.import_targets()[i].second*sizeof(Number),
+                            MPI_BYTE,
+                            part.import_targets()[i].first,
+                            part.import_targets()[i].first +
+                            part.n_mpi_processes()*channel,
+                            part.get_communicator(),
+                            &compress_requests[i]);
+             current_index_start += part.import_targets()[i].second;
+           }
+         AssertDimension(current_index_start, part.n_import_indices());
+
+         Assert (part.local_size() == vector_view.size(), ExcInternalError());
+         current_index_start = part.local_size();
+         for (unsigned int i=0; i<n_ghost_targets; i++)
+           {
+             MPI_Send_init (&this->val[current_index_start],
+                            part.ghost_targets()[i].second*sizeof(Number),
+                            MPI_BYTE,
+                            part.ghost_targets()[i].first,
+                            part.this_mpi_process() +
+                            part.n_mpi_processes()*channel,
+                            part.get_communicator(),
+                            &compress_requests[n_import_targets+i]);
+             current_index_start += part.ghost_targets()[i].second;
+           }
+         AssertDimension (current_index_start,
+                          part.local_size()+part.n_ghost_indices());
+       }
+
+      AssertDimension(n_import_targets + n_ghost_targets,
+                     compress_requests.size());
+      if (compress_requests.size() > 0)
+       {
+         int ierr;
+         ierr = MPI_Startall(compress_requests.size(),&compress_requests[0]);
+         Assert (ierr == MPI_SUCCESS, ExcInternalError());
+       }
+#else
+      (void)counter;
+#endif
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::compress_finish (const bool add_ghost_data)
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      const Utilities::MPI::Partitioner &part = *partitioner;
+
+                               // nothing to do when we neither have import
+                               // nor ghost indices.
+      if (part.n_ghost_indices()==0 && part.n_import_indices()==0)
+       return;
+
+                               // make this function thread safe
+      Threads::ThreadMutex::ScopedLock lock (mutex);
+
+      const unsigned int n_import_targets = part.import_targets().size();
+      const unsigned int n_ghost_targets  = part.ghost_targets().size();
+
+      AssertDimension (n_ghost_targets+n_import_targets,
+                      compress_requests.size());
+
+                               // first wait for the receive to complete
+      if (n_import_targets > 0)
+       {
+         int ierr;
+         ierr = MPI_Waitall (n_import_targets, &compress_requests[0],
+                             MPI_STATUSES_IGNORE);
+         Assert (ierr == MPI_SUCCESS, ExcInternalError());
+
+         Number * read_position = import_data;
+         std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+           my_imports = part.import_indices().begin();
+
+                               // If add_ghost_data is set, add the imported
+                               // data to the local values. If not, set the
+                               // vector entries.
+         if (add_ghost_data == true)
+           for ( ; my_imports!=part.import_indices().end(); ++my_imports)
+             for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+               local_element(j) += *read_position++;
+         else
+           for ( ; my_imports!=part.import_indices().end(); ++my_imports)
+             for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+               local_element(j) = *read_position++;
+         AssertDimension(read_position-import_data,part.n_import_indices());
+       }
+
+      if (n_ghost_targets > 0)
+       {
+         int ierr;
+         ierr = MPI_Waitall (n_ghost_targets,
+                             &compress_requests[n_import_targets],
+                             MPI_STATUSES_IGNORE);
+         Assert (ierr == MPI_SUCCESS, ExcInternalError());
+         zero_out_ghosts ();
+       }
+      else
+       AssertDimension (part.n_ghost_indices(), 0);
+#else
+      (void)add_ghost_data;
+#endif
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::update_ghost_values_start (const unsigned int counter) const
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      const Utilities::MPI::Partitioner &part = *partitioner;
+
+                               // nothing to do when we neither have import
+                               // nor ghost indices.
+      if (part.n_ghost_indices()==0 && part.n_import_indices()==0)
+       return;
+
+                               // make this function thread safe
+      Threads::ThreadMutex::ScopedLock lock (mutex);
+
+      const unsigned int n_import_targets = part.import_targets().size();
+      const unsigned int n_ghost_targets = part.ghost_targets().size();
+
+                               // Need to send and receive the data. Use
+                               // non-blocking communication, where it is
+                               // generally less overhead to first initiate
+                               // the receive and then actually send the data
+      if (update_ghost_values_requests.size() == 0)
+       {
+         Assert (part.local_size() == vector_view.size(),
+                 ExcInternalError());
+         unsigned int current_index_start = part.local_size();
+         update_ghost_values_requests.resize (n_import_targets+n_ghost_targets);
+         for (unsigned int i=0; i<n_ghost_targets; i++)
+           {
+                               // allow writing into ghost indices even
+                               // though we are in a const function
+             MPI_Recv_init (const_cast<Number*>(&val[current_index_start]),
+                            part.ghost_targets()[i].second*sizeof(Number),
+                            MPI_BYTE,
+                            part.ghost_targets()[i].first,
+                            part.ghost_targets()[i].first +
+                            counter*part.n_mpi_processes(),
+                            part.get_communicator(),
+                            &update_ghost_values_requests[i]);
+             current_index_start += part.ghost_targets()[i].second;
+           }
+         AssertDimension (current_index_start,
+                          part.local_size()+part.n_ghost_indices());
+
+                               // allocate import_data in case it is not set
+                               // up yet
+         if (import_data == 0 && part.n_import_indices() > 0)
+           import_data = new Number[part.n_import_indices()];
+         current_index_start = 0;
+         for (unsigned int i=0; i<n_import_targets; i++)
+           {
+             MPI_Send_init (&import_data[current_index_start],
+                            part.import_targets()[i].second*sizeof(Number),
+                            MPI_BYTE, part.import_targets()[i].first,
+                            part.this_mpi_process() +
+                            part.n_mpi_processes()*counter,
+                            part.get_communicator(),
+                            &update_ghost_values_requests[n_ghost_targets+i]);
+             current_index_start += part.import_targets()[i].second;
+           }
+         AssertDimension (current_index_start, part.n_import_indices());
+       }
+
+                               // copy the data that is actually to be send
+                               // to the import_data field
+      if (part.n_import_indices() > 0)
+       {
+         Assert (import_data != 0, ExcInternalError());
+         Number * write_position = import_data;
+         std::vector<std::pair<unsigned int, unsigned int> >::const_iterator
+           my_imports = part.import_indices().begin();
+         for ( ; my_imports!=part.import_indices().end(); ++my_imports)
+           for (unsigned int j=my_imports->first; j<my_imports->second; j++)
+             *write_position++ = local_element(j);
+       }
+
+      AssertDimension (n_import_targets+n_ghost_targets,
+                      update_ghost_values_requests.size());
+      if (update_ghost_values_requests.size() > 0)
+       {
+         int ierr;
+         ierr = MPI_Startall(update_ghost_values_requests.size(),
+                             &update_ghost_values_requests[0]);
+         Assert (ierr == MPI_SUCCESS, ExcInternalError());
+       }
+#else
+      (void)counter;
+#endif
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::update_ghost_values_finish () const
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+                               // wait for both sends and receives to
+                               // complete, even though only receives are
+                               // really necessary. this gives (much) better
+                               // performance
+      AssertDimension (partitioner->ghost_targets().size() +
+                      partitioner->import_targets().size(),
+                      update_ghost_values_requests.size());
+      if (update_ghost_values_requests.size() > 0)
+       {
+                               // make this function thread safe
+         Threads::ThreadMutex::ScopedLock lock (mutex);
+
+         int ierr;
+         ierr = MPI_Waitall (update_ghost_values_requests.size(),
+                             &update_ghost_values_requests[0],
+                             MPI_STATUSES_IGNORE);
+         Assert (ierr == MPI_SUCCESS, ExcInternalError());
+       }
+#endif
+    }
+
+
+
+    template <typename Number>
+    std::size_t
+    Vector<Number>::memory_consumption () const
+    {
+      std::size_t memory = sizeof(*this);
+      memory += sizeof (Number) * static_cast<std::size_t>(allocated_size);
+
+                               // if the partitioner is shared between more
+                               // processors, just count a fraction of that
+                               // memory, since we're not actually using more
+                               // memory for it.
+      if (partitioner.use_count() > 0)
+       memory += partitioner->memory_consumption()/partitioner.use_count()+1;
+      if (import_data != 0)
+       memory += (static_cast<std::size_t>(partitioner->n_import_indices())*
+                  sizeof(Number));
+      return memory;
+    }
+
+
+
+    template <typename Number>
+    void
+    Vector<Number>::print (std::ostream      &out,
+                          const unsigned int precision,
+                          const bool         scientific,
+                          const bool         across) const
+    {
+      Assert (partitioner.get() !=0, ExcInternalError());
+      AssertThrow (out, ExcIO());
+      std::ios::fmtflags old_flags = out.flags();
+      unsigned int old_precision = out.precision (precision);
+
+      out.precision (precision);
+      if (scientific)
+       out.setf (std::ios::scientific, std::ios::floatfield);
+      else
+       out.setf (std::ios::fixed, std::ios::floatfield);
+
+                               // to make the vector write out all the
+                               // information in order, use as many barriers
+                               // as there are processors and start writing
+                               // when it's our turn
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      for (unsigned int i=0; i<partitioner->this_mpi_process(); i++)
+       MPI_Barrier (partitioner->get_communicator());
+#endif
+
+      out << "Process #" << partitioner->this_mpi_process() << std::endl
+         << "Local range: [" << partitioner->local_range().first << "/"
+         << partitioner->local_range().second << "], global size: "
+         << partitioner->size() << std::endl
+         << "Vector data:" << std::endl;
+      if (across)
+       for (unsigned int i=0; i<partitioner->local_size(); ++i)
+         out << local_element(i) << ' ';
+      else
+       for (unsigned int i=0; i<partitioner->local_size(); ++i)
+         out << local_element(i) << std::endl;
+      out << std::endl;
+      out << "Ghost entries (global index / value):" << std::endl;
+      if (across)
+       for (unsigned int i=0; i<partitioner->n_ghost_indices(); ++i)
+         out << '(' << partitioner->ghost_indices().nth_index_in_set(i)
+             << '/' << local_element(partitioner->local_size()+i) << ") ";
+    else
+      for (unsigned int i=0; i<partitioner->n_ghost_indices(); ++i)
+       out << '(' << partitioner->ghost_indices().nth_index_in_set(i)
+           << '/' << local_element(partitioner->local_size()+i) << ")"
+           << std::endl;
+      out << std::endl << std::flush;
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      MPI_Barrier (partitioner->get_communicator());
+
+      for (unsigned int i=partitioner->this_mpi_process()+1;
+          i<partitioner->n_mpi_processes(); i++)
+       MPI_Barrier (partitioner->get_communicator());
+#endif
+
+      AssertThrow (out, ExcIO());
+                               // reset output format
+      out.flags (old_flags);
+      out.precision(old_precision);
+    }
+
+  } // end of namespace distributed
+
+} // end of namespace parallel
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif
index 9d2d89dc41edd33b69942a6466d7617412bcf01f..51d51e16490e383c23b31b1edab53cff7b4c1c1d 100644 (file)
@@ -21,6 +21,7 @@
 #  include <deal.II/base/std_cxx1x/shared_ptr.h>
 
 #  include <deal.II/lac/trilinos_vector_base.h>
+#  include <deal.II/lac/parallel_vector.h>
 
 #  ifdef DEAL_II_COMPILER_SUPPORTS_MPI
 #    include <Epetra_MpiComm.h>
@@ -124,6 +125,15 @@ namespace TrilinosWrappers
       void vmult (dealii::Vector<double>       &dst,
                  const dealii::Vector<double> &src) const;
 
+                                      /**
+                                       * Apply the preconditioner on deal.II
+                                       * parallel data structures instead of
+                                       * the ones provided in the Trilinos
+                                       * wrapper class.
+                                       */
+      void vmult (dealii::parallel::distributed::Vector<double>       &dst,
+                 const dealii::parallel::distributed::Vector<double> &src) const;
+
                                        /**
                                        * Exception.
                                        */
@@ -1507,12 +1517,36 @@ namespace TrilinosWrappers
   void PreconditionBase::vmult (dealii::Vector<double>       &dst,
                                const dealii::Vector<double> &src) const
   {
-    Epetra_Vector LHS (View, preconditioner->OperatorDomainMap(),
-                      dst.begin());
-    Epetra_Vector RHS (View, preconditioner->OperatorRangeMap(),
-                      const_cast<double*>(src.begin()));
+    AssertDimension (dst.size(),
+                    preconditioner->OperatorDomainMap().NumMyElements());
+    AssertDimension (src.size(),
+                    preconditioner->OperatorRangeMap().NumMyElements());
+    Epetra_Vector tril_dst (View, preconditioner->OperatorDomainMap(),
+                           dst.begin());
+    Epetra_Vector tril_src (View, preconditioner->OperatorRangeMap(),
+                           const_cast<double*>(src.begin()));
+
+    const int ierr = preconditioner->ApplyInverse (tril_src, tril_dst);
+    AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+  }
 
-    const int ierr = preconditioner->ApplyInverse (RHS, LHS);
+
+
+  inline
+  void
+  PreconditionBase::vmult (parallel::distributed::Vector<double>       &dst,
+                          const parallel::distributed::Vector<double> &src) const
+  {
+    AssertDimension (dst.local_size(),
+                    preconditioner->OperatorDomainMap().NumMyElements());
+    AssertDimension (src.local_size(),
+                    preconditioner->OperatorRangeMap().NumMyElements());
+    Epetra_Vector tril_dst (View, preconditioner->OperatorDomainMap(),
+                           dst.begin());
+    Epetra_Vector tril_src (View, preconditioner->OperatorRangeMap(),
+                           const_cast<double*>(src.begin()));
+
+    const int ierr = preconditioner->ApplyInverse (tril_src, tril_dst);
     AssertThrow (ierr == 0, ExcTrilinosError(ierr));
   }
 
index 97f3270e4b06153d52ee66ea48b482506f154402..0f347e8f3c090416e142fb48bf596b3368cfb364 100644 (file)
@@ -23,6 +23,7 @@
 #  include <deal.II/lac/full_matrix.h>
 #  include <deal.II/lac/exceptions.h>
 #  include <deal.II/lac/trilinos_vector_base.h>
+#  include <deal.II/lac/parallel_vector.h>
 
 #  include <vector>
 #  include <cmath>
@@ -32,6 +33,7 @@
 #  include <Epetra_FECrsMatrix.h>
 #  include <Epetra_Map.h>
 #  include <Epetra_CrsGraph.h>
+#  include <Epetra_Vector.h>
 #  ifdef DEAL_II_COMPILER_SUPPORTS_MPI
 #    include <Epetra_MpiComm.h>
 #    include "mpi.h"
@@ -1658,6 +1660,14 @@ namespace TrilinosWrappers
       void vmult (VectorBase       &dst,
                   const VectorBase &src) const;
 
+                                       /**
+                                        * Same as before, but working with
+                                        * deal.II's own distributed vector
+                                        * class.
+                                        */
+      void vmult (parallel::distributed::Vector<TrilinosScalar>       &dst,
+                  const parallel::distributed::Vector<TrilinosScalar> &src) const;
+
                                        /**
                                         * Matrix-vector multiplication:
                                         * let <i>dst =
@@ -1693,6 +1703,14 @@ namespace TrilinosWrappers
       void Tvmult (VectorBase       &dst,
                   const VectorBase &src) const;
 
+                                       /**
+                                        * Same as before, but working with
+                                        * deal.II's own distributed vector
+                                        * class.
+                                        */
+      void Tvmult (parallel::distributed::Vector<TrilinosScalar>       &dst,
+                  const parallel::distributed::Vector<TrilinosScalar> &src) const;
+
                                        /**
                                         * Adding Matrix-vector
                                         * multiplication. Add
@@ -3215,6 +3233,27 @@ namespace TrilinosWrappers
 
 
 
+  inline
+  void
+  SparseMatrix::vmult (parallel::distributed::Vector<TrilinosScalar>       &dst,
+                      const parallel::distributed::Vector<TrilinosScalar> &src) const
+  {
+    Assert (&src != &dst, ExcSourceEqualsDestination());
+    Assert (matrix->Filled(), ExcMatrixNotCompressed());
+
+    AssertDimension (dst.local_size(), matrix->RangeMap().NumMyElements());
+    AssertDimension (src.local_size(), matrix->DomainMap().NumMyElements());
+
+    Epetra_Vector tril_dst (View, matrix->RangeMap(), dst.begin());
+    Epetra_Vector tril_src (View, matrix->DomainMap(),
+                           const_cast<double*>(src.begin()));
+
+    const int ierr = matrix->Multiply (false, tril_src, tril_dst);
+    Assert (ierr == 0, ExcTrilinosError(ierr));
+  }
+
+
+
   inline
   void
   SparseMatrix::Tvmult (VectorBase       &dst,
@@ -3235,6 +3274,27 @@ namespace TrilinosWrappers
 
 
 
+  inline
+  void
+  SparseMatrix::Tvmult (parallel::distributed::Vector<TrilinosScalar>      &dst,
+                       const parallel::distributed::Vector<TrilinosScalar>&src) const
+  {
+    Assert (&src != &dst, ExcSourceEqualsDestination());
+    Assert (matrix->Filled(), ExcMatrixNotCompressed());
+
+    AssertDimension (dst.local_size(), matrix->DomainMap().NumMyElements());
+    AssertDimension (src.local_size(), matrix->RangeMap().NumMyElements());
+
+    Epetra_Vector tril_dst (View, matrix->DomainMap(), dst.begin());
+    Epetra_Vector tril_src (View, matrix->RangeMap(),
+                           const_cast<double*>(src.begin()));
+
+    const int ierr = matrix->Multiply (true, tril_src, tril_dst);
+    Assert (ierr == 0, ExcTrilinosError(ierr));
+  }
+
+
+
   inline
   void
   SparseMatrix::vmult_add (VectorBase       &dst,
index 0b0cc02cb3a4027cbc799bf70a61708d71e278ab..cd4b0180f478821c3e53ea6dfbca592a068753e2 100644 (file)
@@ -1195,6 +1195,11 @@ inline
 Vector<Number> &
 Vector<Number>::operator = (const Vector<Number>& v)
 {
+                               // if v is the same vector as *this, there is
+                               // nothing to
+  if (PointerComparison::equal(this, &v) == true)
+    return *this;
+
   if (v.vec_size != vec_size)
     reinit (v.vec_size, true);
   if (vec_size>internal::Vector::minimum_parallel_grain_size)
index 094d00ae426564358bd0b29631ec125a94b3238a..071e973796f320ded9d23374a5b4da4851ec5b4c 100644 (file)
 
 DEAL_II_NAMESPACE_OPEN
 
+
+namespace
+{
+                                    /**
+                                     * Adjust vectors on all levels to
+                                     * correct size.  Here, we just
+                                     * count the numbers of degrees
+                                     * of freedom on each level and
+                                     * @p reinit each level vector
+                                     * to this length.
+                                      * For compatibility reasons with
+                                      * the next function
+                                      * the target_component is added
+                                      * here but is not used.
+                                     */
+  template <int dim, typename number, int spacedim>
+  void
+  reinit_vector (const dealii::MGDoFHandler<dim,spacedim> &mg_dof,
+                std::vector<unsigned int> ,
+                MGLevelObject<dealii::Vector<number> > &v)
+  {
+    for (unsigned int level=v.get_minlevel();
+        level<=v.get_maxlevel();++level)
+      {
+       unsigned int n = mg_dof.n_dofs (level);
+       v[level].reinit(n);
+      }
+
+  }
+
+
+                                    /**
+                                     * Adjust vectors on all levels to
+                                     * correct size.  Here, we just
+                                     * count the numbers of degrees
+                                     * of freedom on each level and
+                                     * @p reinit each level vector
+                                     * to this length. The target_component
+                                      * is handed to MGTools::count_dofs_per_block.
+                                      * See for documentation there.
+                                     */
+  template <int dim, typename number, int spacedim>
+  void
+  reinit_vector (const dealii::MGDoFHandler<dim,spacedim> &mg_dof,
+                std::vector<unsigned int> target_component,
+                MGLevelObject<BlockVector<number> > &v)
+  {
+    const unsigned int n_blocks = mg_dof.get_fe().n_blocks();
+    if (target_component.size()==0)
+      {
+        target_component.resize(n_blocks);
+        for (unsigned int i=0;i<n_blocks;++i)
+          target_component[i] = i;
+      }
+    Assert(target_component.size()==n_blocks,
+          ExcDimensionMismatch(target_component.size(),n_blocks));
+    const unsigned int max_block
+      = *std::max_element (target_component.begin(),
+                          target_component.end());
+    const unsigned int n_target_blocks = max_block + 1;
+
+    std::vector<std::vector<unsigned int> >
+      ndofs(mg_dof.get_tria().n_levels(),
+           std::vector<unsigned int>(n_target_blocks));
+    MGTools::count_dofs_per_block (mg_dof, ndofs, target_component);
+
+    for (unsigned int level=v.get_minlevel();
+        level<=v.get_maxlevel();++level)
+      {
+       v[level].reinit(n_target_blocks);
+       for (unsigned int b=0; b<n_target_blocks; ++b)
+         v[level].block(b).reinit(ndofs[level][b]);
+       v[level].collect_sizes();
+      }
+  }
+}
+
+
+
 /* --------------------- MGTransferPrebuilt -------------- */
 
 
index 38d5b2b31198287c62c1143266bfec0a904e5f98..ff66a2d8c35457e7a03ca108005dd39b4f4abdf1 100644 (file)
@@ -17,6 +17,8 @@
 #include <deal.II/base/quadrature.h>
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
diff --git a/deal.II/source/base/mpi.cc b/deal.II/source/base/mpi.cc
new file mode 100644 (file)
index 0000000..978a06a
--- /dev/null
@@ -0,0 +1,381 @@
+//---------------------------------------------------------------------------
+//      $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2005, 2006, 2008, 2009, 2010, 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/exceptions.h>
+
+#include <cstddef>
+
+#ifdef DEAL_II_USE_TRILINOS
+#  ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#    include <Epetra_MpiComm.h>
+#    include <deal.II/lac/vector_memory.h>
+#    include <deal.II/lac/trilinos_vector.h>
+#    include <deal.II/lac/trilinos_block_vector.h>
+#  endif
+#endif
+
+DEAL_II_NAMESPACE_OPEN
+
+
+namespace Utilities
+{
+
+  namespace MPI
+  {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+                                // Unfortunately, we have to work
+                                // around an oddity in the way PETSc
+                                // and some gcc versions interact. If
+                                // we use PETSc's MPI dummy
+                                // implementation, it expands the
+                                // calls to the two MPI functions
+                                // basically as ``(n_jobs=1, 0)'',
+                                // i.e. it assigns the number one to
+                                // the variable holding the number of
+                                // jobs, and then uses the comma
+                                // operator to let the entire
+                                // expression have the value zero. The
+                                // latter is important, since
+                                // ``MPI_Comm_size'' returns an error
+                                // code that we may want to check (we
+                                // don't here, but one could in
+                                // principle), and the trick with the
+                                // comma operator makes sure that both
+                                // the number of jobs is correctly
+                                // assigned, and the return value is
+                                // zero. Unfortunately, if some recent
+                                // versions of gcc detect that the
+                                // comma expression just stands by
+                                // itself, i.e. the result is not
+                                // assigned to another variable, then
+                                // they warn ``right-hand operand of
+                                // comma has no effect''. This
+                                // unwanted side effect can be
+                                // suppressed by casting the result of
+                                // the entire expression to type
+                                // ``void'' -- not beautiful, but
+                                // helps calming down unwarranted
+                                // compiler warnings...
+    unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator)
+    {
+      int n_jobs=1;
+      (void) MPI_Comm_size (mpi_communicator, &n_jobs);
+
+      return n_jobs;
+    }
+
+
+    unsigned int this_mpi_process (const MPI_Comm &mpi_communicator)
+    {
+      int rank=0;
+      (void) MPI_Comm_rank (mpi_communicator, &rank);
+
+      return rank;
+    }
+
+
+    MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
+    {
+      MPI_Comm new_communicator;
+      MPI_Comm_dup (mpi_communicator, &new_communicator);
+      return new_communicator;
+    }
+
+
+    std::vector<unsigned int>
+    compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
+                                                 const std::vector<unsigned int> & destinations)
+    {
+      unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
+      unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
+
+      for (unsigned int i=0; i<destinations.size(); ++i)
+       {
+         Assert (destinations[i] < n_procs,
+                 ExcIndexRange (destinations[i], 0, n_procs));
+         Assert (destinations[i] != myid,
+                 ExcMessage ("There is no point in communicating with ourselves."));
+       }
+
+
+                                      // let all processors
+                                      // communicate the maximal
+                                      // number of destinations they
+                                      // have
+      const unsigned int max_n_destinations
+       = Utilities::MPI::max (destinations.size(), mpi_comm);
+
+                                      // now that we know the number
+                                      // of data packets every
+                                      // processor wants to send, set
+                                      // up a buffer with the maximal
+                                      // size and copy our
+                                      // destinations in there,
+                                      // padded with -1's
+      std::vector<unsigned int> my_destinations(max_n_destinations,
+                                               numbers::invalid_unsigned_int);
+      std::copy (destinations.begin(), destinations.end(),
+                my_destinations.begin());
+
+                                      // now exchange these (we could
+                                      // communicate less data if we
+                                      // used MPI_Allgatherv, but
+                                      // we'd have to communicate
+                                      // my_n_destinations to all
+                                      // processors in this case,
+                                      // which is more expensive than
+                                      // the reduction operation
+                                      // above in MPI_Allreduce)
+      std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
+      MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
+                    &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
+                    mpi_comm);
+
+                                      // now we know who is going to
+                                      // communicate with
+                                      // whom. collect who is going
+                                      // to communicate with us!
+      std::vector<unsigned int> origins;
+      for (unsigned int i=0; i<n_procs; ++i)
+       for (unsigned int j=0; j<max_n_destinations; ++j)
+         if (all_destinations[i*max_n_destinations + j] == myid)
+           origins.push_back (i);
+         else if (all_destinations[i*max_n_destinations + j] ==
+                  numbers::invalid_unsigned_int)
+           break;
+
+      return origins;
+    }
+
+
+    namespace
+    {
+                                      // custom MIP_Op for
+                                      // calculate_collective_mpi_min_max_avg
+      void max_reduce ( const void * in_lhs_,
+                       void * inout_rhs_,
+                       int * len,
+                       MPI_Datatype * )
+      {
+       const MinMaxAvg * in_lhs = static_cast<const MinMaxAvg*>(in_lhs_);
+       MinMaxAvg * inout_rhs = static_cast<MinMaxAvg*>(inout_rhs_);
+
+       Assert(*len==1, ExcInternalError());
+
+       inout_rhs->sum += in_lhs->sum;
+       if (inout_rhs->min>in_lhs->min)
+         {
+           inout_rhs->min = in_lhs->min;
+           inout_rhs->min_index = in_lhs->min_index;
+         }
+       else if (inout_rhs->min == in_lhs->min)
+         { // choose lower cpu index when tied to make operator cumutative
+           if (inout_rhs->min_index > in_lhs->min_index)
+             inout_rhs->min_index = in_lhs->min_index;
+         }
+
+       if (inout_rhs->max < in_lhs->max)
+         {
+         inout_rhs->max = in_lhs->max;
+         inout_rhs->max_index = in_lhs->max_index;
+         }
+       else if (inout_rhs->max == in_lhs->max)
+         { // choose lower cpu index when tied to make operator cumutative
+           if (inout_rhs->max_index > in_lhs->max_index)
+             inout_rhs->max_index = in_lhs->max_index;
+         }
+      }
+    }
+
+
+
+    MinMaxAvg
+    min_max_avg(const double my_value,
+               const MPI_Comm &mpi_communicator)
+    {
+      MinMaxAvg result;
+
+      const unsigned int my_id
+       = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
+      const unsigned int numproc
+       = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+      MPI_Op op;
+      int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
+      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+      MinMaxAvg in;
+      in.sum = in.min = in.max = my_value;
+      in.min_index = in.max_index = my_id;
+
+      MPI_Datatype type;
+      int lengths[]={3,2};
+      MPI_Aint displacements[]={0,offsetof(MinMaxAvg, min_index)};
+      MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
+
+      ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
+      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+      ierr = MPI_Type_commit(&type);
+      ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator);
+      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+      ierr = MPI_Type_free (&type);
+      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+      ierr = MPI_Op_free(&op);
+      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+      result.avg = result.sum / numproc;
+
+      return result;
+    }
+
+#else
+
+    unsigned int n_mpi_processes (const MPI_Comm &)
+    {
+      return 1;
+    }
+
+
+
+    unsigned int this_mpi_process (const MPI_Comm &)
+    {
+      return 0;
+    }
+
+
+    MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
+    {
+      return mpi_communicator;
+    }
+
+
+
+
+    MinMaxAvg
+    min_max_avg(const double my_value,
+               const MPI_Comm &)
+    {
+      MinMaxAvg result;
+
+      result.sum = my_value;
+      result.avg = my_value;
+      result.min = my_value;
+      result.max = my_value;
+      result.min_index = 0;
+      result.max_index = 0;
+
+      return result;
+    }
+
+#endif
+
+
+
+
+    MPI_InitFinalize::MPI_InitFinalize (int    &argc,
+                                       char** &argv)
+                   :
+                   owns_mpi (true)
+    {
+      static bool constructor_has_already_run = false;
+      Assert (constructor_has_already_run == false,
+             ExcMessage ("You can only create a single object of this class "
+                         "in a program since it initializes the MPI system."));
+
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+      int MPI_has_been_started = 0;
+      MPI_Initialized(&MPI_has_been_started);
+      AssertThrow (MPI_has_been_started == 0,
+                  ExcMessage ("MPI error. You can only start MPI once!"));
+
+      int mpi_err;
+      mpi_err = MPI_Init (&argc, &argv);
+      AssertThrow (mpi_err == 0,
+                  ExcMessage ("MPI could not be initialized."));
+#else
+                                      // make sure the compiler doesn't warn
+                                      // about these variables
+      (void)argc;
+      (void)argv;
+#endif
+
+      constructor_has_already_run = true;
+    }
+
+
+    MPI_InitFinalize::~MPI_InitFinalize()
+    {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+
+#  if defined(DEAL_II_USE_TRILINOS) && !defined(__APPLE__)
+                                      // make memory pool release all
+                                      // vectors that are no longer
+                                      // used at this point. this is
+                                      // relevant because the static
+                                      // object destructors run for
+                                      // these vectors at the end of
+                                      // the program would run after
+                                      // MPI_Finalize is called,
+                                      // leading to errors
+                                      //
+                                      // TODO: On Mac OS X, shared libs can
+                                      // only depend on other libs listed
+                                      // later on the command line. This
+                                      // means that libbase can't depend on
+                                      // liblac, and we can't destroy the
+                                      // memory pool here as long as we have
+                                      // separate libraries. Consequently,
+                                      // the #ifdef above. Deal will then
+                                      // just continue to seg fault upon
+                                      // completion of main()
+      GrowingVectorMemory<TrilinosWrappers::MPI::Vector>
+       ::release_unused_memory ();
+      GrowingVectorMemory<TrilinosWrappers::MPI::BlockVector>
+       ::release_unused_memory ();
+#  endif
+
+      int mpi_err = 0;
+
+      int MPI_has_been_started = 0;
+      MPI_Initialized(&MPI_has_been_started);
+      if (Utilities::System::program_uses_mpi() == true && owns_mpi == true &&
+         MPI_has_been_started != 0)
+       {
+         if (std::uncaught_exception())
+           {
+             std::cerr << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
+                       << this_mpi_process(MPI_COMM_WORLD)
+                       << ". Skipping MPI_Finalize() to avoid a deadlock."
+                       << std::endl;
+           }
+         else
+           mpi_err = MPI_Finalize();
+       }
+
+
+      AssertThrow (mpi_err == 0,
+                  ExcMessage ("An error occurred while calling MPI_Finalize()"));
+#endif
+    }
+
+
+  } // end of namespace MPI
+
+} // end of namespace Utilities
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/deal.II/source/base/partitioner.cc b/deal.II/source/base/partitioner.cc
new file mode 100644 (file)
index 0000000..d8bab35
--- /dev/null
@@ -0,0 +1,341 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+#include <deal.II/base/partitioner.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace Utilities
+{
+  namespace MPI
+  {
+  Partitioner::Partitioner ()
+    :
+    global_size (0),
+    local_range_data (std::pair<types::global_dof_index, types::global_dof_index> (0, 0)),
+    n_ghost_indices_data (0),
+    n_import_indices_data (0),
+    my_pid (0),
+    n_procs (1),
+    communicator (MPI_COMM_SELF)
+  {}
+
+
+
+  Partitioner::Partitioner (const unsigned int size)
+    :
+    global_size (size),
+    local_range_data (std::pair<types::global_dof_index, types::global_dof_index> (0, size)),
+    n_ghost_indices_data (0),
+    n_import_indices_data (0),
+    my_pid (0),
+    n_procs (1),
+    communicator (MPI_COMM_SELF)
+  {}
+
+
+
+  Partitioner::Partitioner (const IndexSet &locally_owned_indices,
+                           const IndexSet &ghost_indices_in,
+                           const MPI_Comm  communicator_in)
+    :
+    global_size (static_cast<types::global_dof_index>(locally_owned_indices.size())),
+    n_ghost_indices_data (0),
+    n_import_indices_data (0),
+    my_pid (0),
+    n_procs (1),
+    communicator (communicator_in)
+  {
+    set_owned_indices (locally_owned_indices);
+    set_ghost_indices (ghost_indices_in);
+  }
+
+
+
+  Partitioner::Partitioner (const IndexSet &locally_owned_indices,
+                           const MPI_Comm  communicator_in)
+    :
+    global_size (static_cast<types::global_dof_index>(locally_owned_indices.size())),
+    n_ghost_indices_data (0),
+    n_import_indices_data (0),
+    my_pid (0),
+    n_procs (1),
+    communicator (communicator_in)
+  {
+    set_owned_indices (locally_owned_indices);
+  }
+
+
+
+  void
+  Partitioner::set_owned_indices (const IndexSet &locally_owned_indices)
+  {
+    if (Utilities::System::job_supports_mpi() == true)
+      {
+       my_pid = Utilities::MPI::this_mpi_process(communicator);
+       n_procs = Utilities::MPI::n_mpi_processes(communicator);
+      }
+    else
+      {
+       my_pid = 0;
+       n_procs = 1;
+      }
+
+                               // set the local range
+    Assert (locally_owned_indices.is_contiguous() == 1,
+           ExcMessage ("The index set specified in locally_owned_indices "
+                       "is not contiguous."));
+    locally_owned_indices.compress();
+    if (locally_owned_indices.n_elements()>0)
+      local_range_data = std::pair<types::global_dof_index, types::global_dof_index>
+       (locally_owned_indices.nth_index_in_set(0),
+        locally_owned_indices.nth_index_in_set(0)+
+        locally_owned_indices.n_elements());
+    locally_owned_range_data.set_size (locally_owned_indices.size());
+    locally_owned_range_data.add_range (local_range_data.first,local_range_data.second);
+    locally_owned_range_data.compress();
+
+    ghost_indices_data.set_size (locally_owned_indices.size());
+  }
+
+
+
+  void
+  Partitioner::set_ghost_indices (const IndexSet &ghost_indices_in)
+  {
+                               // Set ghost indices from input. To be sure
+                               // that no entries from the locally owned
+                               // range are present, subtract the locally
+                               // owned indices in any case.
+    Assert (ghost_indices_in.n_elements() == 0 ||
+           ghost_indices_in.size() == locally_owned_range_data.size(),
+           ExcDimensionMismatch (ghost_indices_in.size(),
+                                 locally_owned_range_data.size()));
+    ghost_indices_data = ghost_indices_in;
+    ghost_indices_data.subtract_set (locally_owned_range_data);
+    ghost_indices_data.compress();
+    n_ghost_indices_data = ghost_indices_data.n_elements();
+
+                               // In the rest of this function, we determine
+                               // the point-to-point communication pattern of
+                               // the partitioner. We make up a list with
+                               // both the processors the ghost indices
+                               // actually belong to, and the indices that
+                               // are locally held but ghost indices of other
+                               // processors. This allows then to import and
+                               // export data very easily.
+
+                               // find out the end index for each processor
+                               // and communicate it (this implies the start
+                               // index for the next processor)
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+    if (n_procs < 2)
+      {
+       Assert (ghost_indices_data.n_elements() == 0, ExcInternalError());
+       Assert (n_import_indices_data == 0, ExcInternalError());
+       Assert (n_ghost_indices_data  == 0, ExcInternalError());
+       return;
+      }
+
+    std::vector<types::global_dof_index> first_index (n_procs+1);
+    first_index[0] = 0;
+    MPI_Allgather(&local_range_data.second, sizeof(types::global_dof_index),
+                 MPI_BYTE, &first_index[1], sizeof(types::global_dof_index),
+                 MPI_BYTE, communicator);
+    first_index[n_procs] = global_size;
+
+                               // fix case when there are some processors
+                               // without any locally owned indices: then
+                               // there might be a zero in some entries
+    if (global_size > 0)
+      {
+       unsigned int first_proc_with_nonzero_dofs = 0;
+       for (unsigned int i=0; i<n_procs; ++i)
+         if (first_index[i+1]>0)
+           {
+             first_proc_with_nonzero_dofs = i;
+             break;
+           }
+       for (unsigned int i=first_proc_with_nonzero_dofs+1; i<n_procs; ++i)
+         if (first_index[i] == 0)
+           first_index[i] = first_index[i-1];
+
+                               // correct if our processor has a wrong local
+                               // range
+       if (first_index[my_pid] != local_range_data.first)
+         {
+           Assert(local_range_data.first == local_range_data.second,
+                  ExcInternalError());
+           local_range_data.first = local_range_data.second = first_index[my_pid];
+         }
+      }
+
+                               // Allocate memory for data that will be
+                               // exported
+    std::vector<types::global_dof_index> expanded_ghost_indices (n_ghost_indices_data);
+    unsigned int n_ghost_targets = 0;
+    if (n_ghost_indices_data > 0)
+      {
+                               // Create first a vector of ghost_targets from
+                               // the list of ghost indices and then push
+                               // back new values. When we are done, copy the
+                               // data to that field of the partitioner. This
+                               // way, the variable ghost_targets will have
+                               // exactly the size we need, whereas the
+                               // vector filled with push_back might actually
+                               // be too long.
+       unsigned int current_proc = 0;
+       ghost_indices_data.fill_index_vector (expanded_ghost_indices);
+       unsigned int current_index = expanded_ghost_indices[0];
+       while(current_index >= first_index[current_proc+1])
+         current_proc++;
+       std::vector<std::pair<unsigned int,unsigned int> > ghost_targets_temp
+         (1, std::pair<unsigned int, unsigned int>(current_proc, 0));
+       n_ghost_targets++;
+
+       for (unsigned int iterator=1; iterator<n_ghost_indices_data; ++iterator)
+         {
+           current_index = expanded_ghost_indices[iterator];
+           while(current_index >= first_index[current_proc+1])
+             current_proc++;
+           AssertIndexRange (current_proc, n_procs);
+           if( ghost_targets_temp[n_ghost_targets-1].first < current_proc)
+             {
+               ghost_targets_temp[n_ghost_targets-1].second =
+                 iterator - ghost_targets_temp[n_ghost_targets-1].second;
+               ghost_targets_temp.push_back(std::pair<unsigned int,
+                                            unsigned int>(current_proc,iterator));
+               n_ghost_targets++;
+             }
+         }
+       ghost_targets_temp[n_ghost_targets-1].second =
+         n_ghost_indices_data - ghost_targets_temp[n_ghost_targets-1].second;
+       ghost_targets_data = ghost_targets_temp;
+      }
+                               // find the processes that want to import to
+                               // me
+    {
+      std::vector<int> send_buffer (n_procs, 0);
+      std::vector<int> receive_buffer (n_procs, 0);
+     for (unsigned int i=0; i<n_ghost_targets; i++)
+       send_buffer[ghost_targets_data[i].first] = ghost_targets_data[i].second;
+
+      MPI_Alltoall (&send_buffer[0], 1, MPI_INT, &receive_buffer[0], 1,
+                   MPI_INT, communicator);
+
+                               // allocate memory for import data
+      std::vector<std::pair<unsigned int,unsigned int> > import_targets_temp;
+      n_import_indices_data = 0;
+      for (unsigned int i=0; i<n_procs; i++)
+       if (receive_buffer[i] > 0)
+         {
+           n_import_indices_data += receive_buffer[i];
+           import_targets_temp.push_back(std::pair<unsigned int,
+                                         unsigned int> (i, receive_buffer[i]));
+         }
+      import_targets_data = import_targets_temp;
+    }
+
+                               // send and receive indices for import
+                               // data. non-blocking receives and blocking
+                               // sends
+    std::vector<types::global_dof_index> expanded_import_indices (n_import_indices_data);
+    {
+      unsigned int current_index_start = 0;
+      std::vector<MPI_Request> import_requests (import_targets_data.size());
+      for (unsigned int i=0; i<import_targets_data.size(); i++)
+       {
+         MPI_Irecv (&expanded_import_indices[current_index_start],
+                    import_targets_data[i].second*sizeof(types::global_dof_index),
+                    MPI_BYTE,
+                    import_targets_data[i].first, import_targets_data[i].first,
+                    communicator, &import_requests[i]);
+         current_index_start += import_targets_data[i].second;
+       }
+      AssertDimension (current_index_start, n_import_indices_data);
+
+                               // use blocking send
+      current_index_start = 0;
+      for (unsigned int i=0; i<n_ghost_targets; i++)
+       {
+         MPI_Send (&expanded_ghost_indices[current_index_start],
+                   ghost_targets_data[i].second*sizeof(types::global_dof_index),
+                   MPI_BYTE, ghost_targets_data[i].first, my_pid,
+                   communicator);
+         current_index_start += ghost_targets_data[i].second;
+       }
+      AssertDimension (current_index_start, n_ghost_indices_data);
+
+      MPI_Waitall (import_requests.size(), &import_requests[0],
+                  MPI_STATUSES_IGNORE);
+
+                               // transform import indices to local index
+                               // space and compress contiguous indices in
+                               // form of ranges
+      {
+       unsigned int last_index = numbers::invalid_unsigned_int-1;
+       std::vector<std::pair<unsigned int,unsigned int> > compressed_import_indices;
+       for (unsigned int i=0;i<n_import_indices_data;i++)
+         {
+           Assert (expanded_import_indices[i] >= local_range_data.first &&
+                   expanded_import_indices[i] < local_range_data.second,
+                   ExcIndexRange(expanded_import_indices[i], local_range_data.first,
+                                 local_range_data.second));
+           unsigned int new_index = (expanded_import_indices[i] -
+                                     local_range_data.first);
+           if (new_index == last_index+1)
+             compressed_import_indices.back().second++;
+           else
+             {
+               compressed_import_indices.push_back
+                 (std::pair<unsigned int,unsigned int>(new_index,new_index+1));
+             }
+           last_index = new_index;
+         }
+       import_indices_data = compressed_import_indices;
+
+                               // sanity check
+#ifdef DEBUG
+       const unsigned int n_local_dofs = local_range_data.second-local_range_data.first;
+       for (unsigned int i=0; i<import_indices_data.size(); ++i)
+         {
+           AssertIndexRange (import_indices_data[i].first, n_local_dofs);
+           AssertIndexRange (import_indices_data[i].second-1, n_local_dofs);
+         }
+#endif
+      }
+    }
+#endif
+  }
+
+
+
+  std::size_t
+  Partitioner::memory_consumption() const
+  {
+    std::size_t memory = (3*sizeof(types::global_dof_index)+4*sizeof(unsigned int)+
+                         sizeof(MPI_Comm));
+    memory += MemoryConsumption::memory_consumption(ghost_targets_data);
+    memory += MemoryConsumption::memory_consumption(import_targets_data);
+    memory += MemoryConsumption::memory_consumption(import_indices_data);
+    memory += MemoryConsumption::memory_consumption(ghost_indices_data);
+    return memory;
+  }
+
+  } // end of namespace MPI
+
+} // end of namespace Utilities
+
+
+DEAL_II_NAMESPACE_CLOSE
index eec4ec59ccb6fba7fc8d0aa789973dd2a55a34d2..301246ca049b58e801ac6b5dfb7beb73a063ad9e 100644 (file)
@@ -34,6 +34,7 @@
 #  include <limits.h>
 #endif
 
+
 #ifdef DEAL_II_USE_TRILINOS
 #  ifdef DEAL_II_COMPILER_SUPPORTS_MPI
 #    include <Epetra_MpiComm.h>
@@ -456,350 +457,6 @@ namespace Utilities
   }
 
 
-  namespace MPI
-  {
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-                                // Unfortunately, we have to work
-                                // around an oddity in the way PETSc
-                                // and some gcc versions interact. If
-                                // we use PETSc's MPI dummy
-                                // implementation, it expands the
-                                // calls to the two MPI functions
-                                // basically as ``(n_jobs=1, 0)'',
-                                // i.e. it assigns the number one to
-                                // the variable holding the number of
-                                // jobs, and then uses the comma
-                                // operator to let the entire
-                                // expression have the value zero. The
-                                // latter is important, since
-                                // ``MPI_Comm_size'' returns an error
-                                // code that we may want to check (we
-                                // don't here, but one could in
-                                // principle), and the trick with the
-                                // comma operator makes sure that both
-                                // the number of jobs is correctly
-                                // assigned, and the return value is
-                                // zero. Unfortunately, if some recent
-                                // versions of gcc detect that the
-                                // comma expression just stands by
-                                // itself, i.e. the result is not
-                                // assigned to another variable, then
-                                // they warn ``right-hand operand of
-                                // comma has no effect''. This
-                                // unwanted side effect can be
-                                // suppressed by casting the result of
-                                // the entire expression to type
-                                // ``void'' -- not beautiful, but
-                                // helps calming down unwarranted
-                                // compiler warnings...
-    unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator)
-    {
-      int n_jobs=1;
-      (void) MPI_Comm_size (mpi_communicator, &n_jobs);
-
-      return n_jobs;
-    }
-
-
-    unsigned int this_mpi_process (const MPI_Comm &mpi_communicator)
-    {
-      int rank=0;
-      (void) MPI_Comm_rank (mpi_communicator, &rank);
-
-      return rank;
-    }
-
-
-    MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
-    {
-      MPI_Comm new_communicator;
-      MPI_Comm_dup (mpi_communicator, &new_communicator);
-      return new_communicator;
-    }
-
-
-    std::vector<unsigned int>
-    compute_point_to_point_communication_pattern (const MPI_Comm & mpi_comm,
-                                                 const std::vector<unsigned int> & destinations)
-    {
-      unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
-      unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
-
-      for (unsigned int i=0; i<destinations.size(); ++i)
-       {
-         Assert (destinations[i] < n_procs,
-                 ExcIndexRange (destinations[i], 0, n_procs));
-         Assert (destinations[i] != myid,
-                 ExcMessage ("There is no point in communicating with ourselves."));
-       }
-
-
-                                      // let all processors
-                                      // communicate the maximal
-                                      // number of destinations they
-                                      // have
-      const unsigned int max_n_destinations
-       = Utilities::MPI::max (destinations.size(), mpi_comm);
-
-                                      // now that we know the number
-                                      // of data packets every
-                                      // processor wants to send, set
-                                      // up a buffer with the maximal
-                                      // size and copy our
-                                      // destinations in there,
-                                      // padded with -1's
-      std::vector<unsigned int> my_destinations(max_n_destinations,
-                                               numbers::invalid_unsigned_int);
-      std::copy (destinations.begin(), destinations.end(),
-                my_destinations.begin());
-
-                                      // now exchange these (we could
-                                      // communicate less data if we
-                                      // used MPI_Allgatherv, but
-                                      // we'd have to communicate
-                                      // my_n_destinations to all
-                                      // processors in this case,
-                                      // which is more expensive than
-                                      // the reduction operation
-                                      // above in MPI_Allreduce)
-      std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
-      MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
-                    &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
-                    mpi_comm);
-
-                                      // now we know who is going to
-                                      // communicate with
-                                      // whom. collect who is going
-                                      // to communicate with us!
-      std::vector<unsigned int> origins;
-      for (unsigned int i=0; i<n_procs; ++i)
-       for (unsigned int j=0; j<max_n_destinations; ++j)
-         if (all_destinations[i*max_n_destinations + j] == myid)
-           origins.push_back (i);
-         else if (all_destinations[i*max_n_destinations + j] ==
-                  numbers::invalid_unsigned_int)
-           break;
-
-      return origins;
-    }
-
-
-    namespace
-    {
-                                      // custom MIP_Op for
-                                      // calculate_collective_mpi_min_max_avg
-      void max_reduce ( const void * in_lhs_,
-                       void * inout_rhs_,
-                       int * len,
-                       MPI_Datatype * )
-      {
-       const MinMaxAvg * in_lhs = static_cast<const MinMaxAvg*>(in_lhs_);
-       MinMaxAvg * inout_rhs = static_cast<MinMaxAvg*>(inout_rhs_);
-
-       Assert(*len==1, ExcInternalError());
-
-       inout_rhs->sum += in_lhs->sum;
-       if (inout_rhs->min>in_lhs->min)
-         {
-           inout_rhs->min = in_lhs->min;
-           inout_rhs->min_index = in_lhs->min_index;
-         }
-       else if (inout_rhs->min == in_lhs->min)
-         { // choose lower cpu index when tied to make operator cumutative
-           if (inout_rhs->min_index > in_lhs->min_index)
-             inout_rhs->min_index = in_lhs->min_index;
-         }
-
-       if (inout_rhs->max < in_lhs->max)
-         {
-         inout_rhs->max = in_lhs->max;
-         inout_rhs->max_index = in_lhs->max_index;
-         }
-       else if (inout_rhs->max == in_lhs->max)
-         { // choose lower cpu index when tied to make operator cumutative
-           if (inout_rhs->max_index > in_lhs->max_index)
-             inout_rhs->max_index = in_lhs->max_index;
-         }
-      }
-    }
-
-
-
-    MinMaxAvg
-    min_max_avg(const double my_value,
-               const MPI_Comm &mpi_communicator)
-    {
-      MinMaxAvg result;
-
-      const unsigned int my_id
-       = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
-      const unsigned int numproc
-       = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator);
-
-      MPI_Op op;
-      int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
-      MinMaxAvg in;
-      in.sum = in.min = in.max = my_value;
-      in.min_index = in.max_index = my_id;
-
-      MPI_Datatype type;
-      int lengths[]={3,2};
-      MPI_Aint displacements[]={0,offsetof(MinMaxAvg, min_index)};
-      MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
-
-      ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
-      ierr = MPI_Type_commit(&type);
-      ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
-      ierr = MPI_Type_free (&type);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
-      ierr = MPI_Op_free(&op);
-      AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
-      result.avg = result.sum / numproc;
-
-      return result;
-    }
-
-#else
-
-    unsigned int n_mpi_processes (const MPI_Comm &)
-    {
-      return 1;
-    }
-
-
-
-    unsigned int this_mpi_process (const MPI_Comm &)
-    {
-      return 0;
-    }
-
-
-    MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
-    {
-      return mpi_communicator;
-    }
-
-
-
-
-    MinMaxAvg
-    min_max_avg(const double my_value,
-               const MPI_Comm &)
-    {
-      MinMaxAvg result;
-
-      result.sum = my_value;
-      result.avg = my_value;
-      result.min = my_value;
-      result.max = my_value;
-      result.min_index = 0;
-      result.max_index = 0;
-
-      return result;
-    }
-
-#endif
-
-
-
-
-    MPI_InitFinalize::MPI_InitFinalize (int    &argc,
-                                       char** &argv)
-                   :
-                   owns_mpi (true)
-    {
-      static bool constructor_has_already_run = false;
-      Assert (constructor_has_already_run == false,
-             ExcMessage ("You can only create a single object of this class "
-                         "in a program since it initializes the MPI system."));
-
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-      int MPI_has_been_started = 0;
-      MPI_Initialized(&MPI_has_been_started);
-      AssertThrow (MPI_has_been_started == 0,
-                  ExcMessage ("MPI error. You can only start MPI once!"));
-
-      int mpi_err;
-      mpi_err = MPI_Init (&argc, &argv);
-      AssertThrow (mpi_err == 0,
-                  ExcMessage ("MPI could not be initialized."));
-#else
-                                      // make sure the compiler doesn't warn
-                                      // about these variables
-      (void)argc;
-      (void)argv;
-#endif
-
-      constructor_has_already_run = true;
-    }
-
-
-    MPI_InitFinalize::~MPI_InitFinalize()
-    {
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
-
-#  if defined(DEAL_II_USE_TRILINOS) && !defined(__APPLE__)
-                                      // make memory pool release all
-                                      // vectors that are no longer
-                                      // used at this point. this is
-                                      // relevant because the static
-                                      // object destructors run for
-                                      // these vectors at the end of
-                                      // the program would run after
-                                      // MPI_Finalize is called,
-                                      // leading to errors
-                                      //
-                                      // TODO: On Mac OS X, shared libs can
-                                      // only depend on other libs listed
-                                      // later on the command line. This
-                                      // means that libbase can't depend on
-                                      // liblac, and we can't destroy the
-                                      // memory pool here as long as we have
-                                      // separate libraries. Consequently,
-                                      // the #ifdef above. Deal will then
-                                      // just continue to seg fault upon
-                                      // completion of main()
-      GrowingVectorMemory<TrilinosWrappers::MPI::Vector>
-       ::release_unused_memory ();
-      GrowingVectorMemory<TrilinosWrappers::MPI::BlockVector>
-       ::release_unused_memory ();
-#  endif
-
-      int mpi_err = 0;
-
-      int MPI_has_been_started = 0;
-      MPI_Initialized(&MPI_has_been_started);
-      if (Utilities::System::program_uses_mpi() == true && owns_mpi == true &&
-         MPI_has_been_started != 0)
-       {
-         if (std::uncaught_exception())
-           {
-             std::cerr << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
-                       << this_mpi_process(MPI_COMM_WORLD)
-                       << ". Skipping MPI_Finalize() to avoid a deadlock."
-                       << std::endl;
-           }
-         else
-           mpi_err = MPI_Finalize();
-       }
-
-
-      AssertThrow (mpi_err == 0,
-                  ExcMessage ("An error occurred while calling MPI_Finalize()"));
-#endif
-    }
-
-
-  }
-
 
   namespace System
   {
index faf2c655f6ba4b88da2aae2b456dd8efe8580872..fef12dc30334abea38b610484e1956cfc2f77c75 100644 (file)
@@ -1,5 +1,5 @@
 //---------------------------------------------------------------------------
-//    $Id$
+//    $Id: solution_transfer.cc 23752 2011-05-30 00:16:00Z bangerth $
 //    Version: $Name$
 //
 //    Copyright (C) 2009, 2010, 2011 by the deal.II authors
 #ifdef DEAL_II_USE_P4EST
 
 #include <deal.II/lac/vector.h>
-#include <deal.II/lac/petsc_vector.h>
-#include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
+#include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
 #include <deal.II/lac/trilinos_block_vector.h>
@@ -247,7 +248,7 @@ namespace parallel
       typename DH::cell_iterator cell(&dof_handler->get_tria(), cell_->level(), cell_->index(), dof_handler);
 
       const unsigned int dofs_per_cell=cell->get_fe().dofs_per_cell;
-      Vector<double> dofvalues(dofs_per_cell);
+      ::dealii::Vector<double> dofvalues(dofs_per_cell);
       for (typename std::vector<const VECTOR*>::iterator it=input_vectors.begin();
           it !=input_vectors.end();
           ++it)
@@ -271,7 +272,7 @@ namespace parallel
        cell(&dof_handler->get_tria(), cell_->level(), cell_->index(), dof_handler);
 
       const unsigned int dofs_per_cell=cell->get_fe().dofs_per_cell;
-      Vector<double> dofvalues(dofs_per_cell);
+      ::dealii::Vector<double> dofvalues(dofs_per_cell);
       const double *data_store = reinterpret_cast<const double *>(data);
 
       for (typename std::vector<VECTOR*>::iterator it = all_out.begin();
index f622eed77341d58d2366080015def1fc16b4dd05..746ca2552e1df8063183307b2e7dee20bd8d73ba 100644 (file)
@@ -19,7 +19,11 @@ for (deal_II_dimension : DIMENSIONS)
     namespace distributed
     \{
 #if deal_II_dimension > 1
-    template class SolutionTransfer<deal_II_dimension,Vector<double>, DoFHandler<deal_II_dimension> >;
+    template class SolutionTransfer<deal_II_dimension,::dealii::Vector<double>, DoFHandler<deal_II_dimension> >;
+    template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::Vector<double>, DoFHandler<deal_II_dimension> >;
+    template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::Vector<float>, DoFHandler<deal_II_dimension> >;
+    template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::BlockVector<double>, DoFHandler<deal_II_dimension> >;
+    template class SolutionTransfer<deal_II_dimension,::dealii::parallel::distributed::BlockVector<float>, DoFHandler<deal_II_dimension> >;
 
 
 #ifdef DEAL_II_USE_PETSC
index 999984dcd7c64d2503634f17048ac51fd4386ee4..81dcf5e156990812de9e08a3faff185c9b30e1ac 100644 (file)
@@ -14,6 +14,8 @@
 
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index c8bd490e438bc7d80b04411552756679d87e5dfe..79182e569b4ec146e45025a61954b8ba08b15aaa 100644 (file)
@@ -16,6 +16,8 @@
 #include <deal.II/base/quadrature.h>
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
@@ -3263,7 +3265,7 @@ maybe_invalidate_previous_present_cell (const typename Triangulation<dim,spacedi
   if (present_cell.get() != 0)
   {
     if (&cell->get_triangulation() !=
-        &static_cast<typename Triangulation<dim,spacedim>::cell_iterator>(*present_cell)
+        &present_cell->operator typename Triangulation<dim,spacedim>::cell_iterator()
        ->get_triangulation())
       {
        // the triangulations for the previous cell and the current cell
index 457da40bfd2e16c86cc07303489577df2da70667..d726a890c88fab770e96413568629270aa57cd04 100644 (file)
@@ -438,7 +438,7 @@ MappingQ1<dim,spacedim>::update_each (const UpdateFlags in) const
                                       // is a Piola transformation, which
                                       // requires the determinant of the
                                       // Jacobi matrix of the transformation.
-                                      // Therefore these values have to
+                                      // Therefore these values have to be
                                       // updated for each cell.
       if (out & update_contravariant_transformation)
        out |= update_JxW_values;
@@ -635,52 +635,65 @@ MappingQ1<dim,spacedim>::compute_fill (const typename Triangulation<dim,spacedim
                                    // first compute quadrature points
   if (update_flags & update_quadrature_points)
     {
-      Assert (quadrature_points.size() == n_q_points,
-             ExcDimensionMismatch(quadrature_points.size(), n_q_points));
-      std::fill(quadrature_points.begin(), quadrature_points.end(),
-               Point<spacedim>());
+      AssertDimension (quadrature_points.size(), n_q_points);
 
       for (unsigned int point=0; point<n_q_points; ++point)
-       for (unsigned int k=0; k<data.n_shape_functions; ++k)
-         quadrature_points[point]
-           += data.shape(point+data_set,k) * data.mapping_support_points[k];
+       {
+         const double * shape = &data.shape(point+data_set,0);
+         Point<spacedim> result = (shape[0] *
+                                   data.mapping_support_points[0]);
+         for (unsigned int k=1; k<data.n_shape_functions; ++k)
+           for (unsigned int i=0; i<spacedim; ++i)
+             result[i] += shape[k] * data.mapping_support_points[k][i];
+         quadrature_points[point] = result;
+       }
     }
 
                                    // then Jacobians
   if (update_flags & update_contravariant_transformation)
     {
-      Assert (data.contravariant.size() == n_q_points,
-             ExcDimensionMismatch(data.contravariant.size(), n_q_points));
+      AssertDimension (data.contravariant.size(), n_q_points);
 
                                   // if the current cell is just a
                                   // translation of the previous one, no
                                   // need to recompute jacobians...
       if (cell_similarity != CellSimilarity::translation)
        {
-         std::fill(data.contravariant.begin(), data.contravariant.end(),
-                   Tensor<2,spacedim>());
+         Assert (data.n_shape_functions > 0, ExcInternalError());
+         const Tensor<1,spacedim> *supp_pts =
+           &data.mapping_support_points[0];
          for (unsigned int point=0; point<n_q_points; ++point)
-           for (unsigned int k=0; k<data.n_shape_functions; ++k)
-             {
-                                  // some compilers seem to have problems
-                                  // to detected that the two innermost
-                                  // loops just use the data of the same
-                                  // tensors, so get a reference to them by
-                                  // hand
-               const Tensor<1,dim> &data_derv = data.derivative(point+data_set, k);
-               const Tensor<1,spacedim> &supp_pts = data.mapping_support_points[k];
+           {
+             const Tensor<1,dim> * data_derv =
+               &data.derivative(point+data_set, 0);
+
+             double result [spacedim][dim];
 
+                               // peel away part of sum to avoid zeroing the
+                               // entries and adding for the first time
+             for (unsigned int i=0; i<spacedim; ++i)
+               for (unsigned int j=0; j<dim; ++j)
+                 result[i][j] = data_derv[0][j] * supp_pts[0][i];
+             for (unsigned int k=1; k<data.n_shape_functions; ++k)
                for (unsigned int i=0; i<spacedim; ++i)
                  for (unsigned int j=0; j<dim; ++j)
-                   data.contravariant[point][i][j] += data_derv[j] * supp_pts[i];
-             }
+                   result[i][j] += data_derv[k][j] * supp_pts[k][i];
+
+                               // write result into contravariant data. for
+                               // j=dim in the case dim<spacedim, there will
+                               // never be any nonzero data that arrives in
+                               // here, so it is ok anyway because it was
+                               // initialized to zero at the initialization
+             for (unsigned int i=0; i<spacedim; ++i)
+               for (unsigned int j=0; j<dim; ++j)
+                 data.contravariant[point][i][j] = result[i][j];
+           }
        }
     }
 
   if (update_flags & update_covariant_transformation)
     {
-      Assert (data.covariant.size() == n_q_points,
-             ExcDimensionMismatch(data.covariant.size(), n_q_points));
+      AssertDimension (data.covariant.size(), n_q_points);
       if (cell_similarity != CellSimilarity::translation)
        {
          if (dim == spacedim)
@@ -773,8 +786,7 @@ MappingQ1<dim,spacedim>::fill_fe_values (
   if (update_flags & (update_normal_vectors
                      | update_JxW_values))
     {
-      Assert (JxW_values.size() == n_q_points,
-             ExcDimensionMismatch(JxW_values.size(), n_q_points));
+      AssertDimension (JxW_values.size(), n_q_points);
 
       Assert( !(update_flags & update_normal_vectors ) ||
              (normal_vectors.size() == n_q_points),
@@ -865,8 +877,7 @@ MappingQ1<dim,spacedim>::fill_fe_values (
                                   // vector given by reference
   if (update_flags & update_jacobians)
     {
-      Assert (jacobians.size() == n_q_points,
-             ExcDimensionMismatch(jacobians.size(), n_q_points));
+      AssertDimension (jacobians.size(), n_q_points);
       if (cell_similarity != CellSimilarity::translation)
        for (unsigned int point=0; point<n_q_points; ++point)
          jacobians[point] = data.contravariant[point];
@@ -877,32 +888,45 @@ MappingQ1<dim,spacedim>::fill_fe_values (
                                   // cells, not faces.
   if (update_flags & update_jacobian_grads)
     {
-      Assert (jacobian_grads.size() == n_q_points,
-             ExcDimensionMismatch(jacobian_grads.size(), n_q_points));
+      AssertDimension (jacobian_grads.size(), n_q_points);
 
       if (cell_similarity != CellSimilarity::translation)
        {
-         std::fill(jacobian_grads.begin(),
-                   jacobian_grads.end(),
-                   Tensor<3,spacedim>());
-
+         const unsigned int data_set = DataSetDescriptor::cell();
          for (unsigned int point=0; point<n_q_points; ++point)
-           for (unsigned int k=0; k<data.n_shape_functions; ++k)
-             for (unsigned int i=0; i<dim; ++i)
+           {
+             const Tensor<2,dim> * second =
+               &data.second_derivative(point+data_set, 0);
+             double result [spacedim][dim][dim];
+             for (unsigned int i=0; i<spacedim; ++i)
+               for (unsigned int j=0; j<dim; ++j)
+                 for (unsigned int l=0; l<dim; ++l)
+                   result[i][j][l] = (second[0][j][l] *
+                                      data.mapping_support_points[0][i]);
+             for (unsigned int k=1; k<data.n_shape_functions; ++k)
+               for (unsigned int i=0; i<spacedim; ++i)
+                 for (unsigned int j=0; j<dim; ++j)
+                   for (unsigned int l=0; l<dim; ++l)
+                     result[i][j][l]
+                       += (second[k][j][l]
+                           *
+                           data.mapping_support_points[k][i]);
+
+                               // never touch any data for j=dim in case
+                               // dim<spacedim, so it will always be zero as
+                               // it was initialized
+             for (unsigned int i=0; i<spacedim; ++i)
                for (unsigned int j=0; j<dim; ++j)
                  for (unsigned int l=0; l<dim; ++l)
-                   jacobian_grads[point][i][j][l]
-                     += (data.second_derivative(point+DataSetDescriptor::cell (), k)[j][l]
-                         *
-                         data.mapping_support_points[k][i]);
+                   jacobian_grads[point][i][j][l] = result[i][j][l];
+           }
        }
     }
                                   // copy values from InternalData to vector
                                   // given by reference
   if (update_flags & update_inverse_jacobians)
     {
-      Assert (inverse_jacobians.size() == n_q_points,
-             ExcDimensionMismatch(inverse_jacobians.size(), n_q_points));
+      AssertDimension (inverse_jacobians.size(), n_q_points);
       if (cell_similarity != CellSimilarity::translation)
        for (unsigned int point=0; point<n_q_points; ++point)
          inverse_jacobians[point] = transpose(data.covariant[point]);
@@ -935,14 +959,11 @@ namespace internal
 
       if (update_flags & update_boundary_forms)
        {
-         Assert (boundary_forms.size()==n_q_points,
-                 ExcDimensionMismatch(boundary_forms.size(), n_q_points));
+         AssertDimension (boundary_forms.size(), n_q_points);
          if (update_flags & update_normal_vectors)
-           Assert (normal_vectors.size()==n_q_points,
-                   ExcDimensionMismatch(normal_vectors.size(), n_q_points));
+           AssertDimension (normal_vectors.size(), n_q_points);
          if (update_flags & update_JxW_values)
-           Assert (JxW_values.size() == n_q_points,
-                   ExcDimensionMismatch(JxW_values.size(), n_q_points));
+           AssertDimension (JxW_values.size(), n_q_points);
 
                                           // map the unit tangentials to the
                                           // real cell. checking for d!=dim-1
@@ -1013,8 +1034,7 @@ namespace internal
                                               // use the same method used in
                                               // fill_fe_values for cells
                                               // above
-             Assert (data.contravariant.size() == n_q_points,
-                     ExcInternalError());
+             AssertDimension (data.contravariant.size(), n_q_points);
              for (unsigned int point=0; point<n_q_points; ++point)
                {
                  Tensor<1,spacedim> cell_normal;
@@ -1421,7 +1441,7 @@ MappingQ1<dim,spacedim>::
 transform_unit_to_real_cell_internal (const InternalData &data) const
 {
   const unsigned int n_mapping_points=data.mapping_support_points.size();
-  Assert(data.shape_values.size()==n_mapping_points, ExcInternalError());
+  AssertDimension (data.shape_values.size(), n_mapping_points);
 
                                   // use now the InternalData to
                                   // compute the point in real space.
@@ -1488,10 +1508,10 @@ transform_real_to_unit_cell_internal
 {
   const unsigned int n_shapes=mdata.shape_values.size();
   Assert(n_shapes!=0, ExcInternalError());
-  Assert(mdata.shape_derivatives.size()==n_shapes, ExcInternalError());
+  AssertDimension (mdata.shape_derivatives.size(), n_shapes);
 
   std::vector<Point<spacedim> > &points=mdata.mapping_support_points;
-  Assert(points.size()==n_shapes, ExcInternalError());
+  AssertDimension (points.size(), n_shapes);
 
 
                                   // Newton iteration to solve
index abde64548104ad63adfd2285ecdd2290db1f1180..b484d8ad7b0e22ac00ad0a3f5e448e69c476bc76 100644 (file)
@@ -505,7 +505,7 @@ template <int dim, class Vector, int spacedim>
 void
 GridRefinement::refine_and_coarsen_optimize (Triangulation<dim,spacedim> &tria,
                                             const Vector       &criteria,
-                                            const unsigned int  order=2)
+                                            const unsigned int  order)
 {
   Assert (criteria.size() == tria.n_active_cells(),
          ExcDimensionMismatch(criteria.size(), tria.n_active_cells()));
index cdf3a599120e0c033a91602c78bc7d0ada195bfc..06849b32fc032ee199d17de491154d6a77c0223c 100644 (file)
@@ -22,6 +22,8 @@
 #include <deal.II/lac/block_sparse_matrix.h>
 #include <deal.II/lac/sparse_matrix_ez.h>
 #include <deal.II/lac/block_sparse_matrix_ez.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/petsc_sparse_matrix.h>
index f4ba50e19f58b25a5bd13a9a133e9dd05ad18339..359ebfbd108befd62fccac77630a2b7e74fd41d4 100644 (file)
@@ -21,6 +21,8 @@
 
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
diff --git a/deal.II/source/lac/parallel_vector.cc b/deal.II/source/lac/parallel_vector.cc
new file mode 100644 (file)
index 0000000..045088f
--- /dev/null
@@ -0,0 +1,23 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_vector.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+#include "parallel_vector.inst"
+
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/deal.II/source/lac/parallel_vector.inst.in b/deal.II/source/lac/parallel_vector.inst.in
new file mode 100644 (file)
index 0000000..534d690
--- /dev/null
@@ -0,0 +1,36 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+for (SCALAR : REAL_SCALARS)
+{
+  namespace parallel
+  \{
+    namespace distributed
+    \{
+      template class Vector<SCALAR>;
+    \}
+  \}
+}
+
+for (S1, S2 : REAL_SCALARS)
+{
+  namespace parallel
+  \{
+    namespace distributed
+    \{
+      template void Vector<S1>::reinit<S2> (const Vector<S2>&,
+                                            const bool);
+    \}
+  \}
+}
index 34437d08cfac3034461f82cdcfe68d10d81998a7..a1cf69a5da18f73e9851ae5b405e6274603dcc84 100644 (file)
@@ -15,6 +15,8 @@
 #include <deal.II/lac/vector_memory.h>
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/petsc_parallel_vector.h>
index 1c7bebce246eb54d3e9bbc230730add3bb96d7ff..a14396780c2815f486fe4b1225e51ad1ce572ec8 100644 (file)
 
 DEAL_II_NAMESPACE_OPEN
 
-namespace
-{
-                                    /**
-                                     * Adjust vectors on all levels to
-                                     * correct size.  Here, we just
-                                     * count the numbers of degrees
-                                     * of freedom on each level and
-                                     * @p reinit each level vector
-                                     * to this length.
-                                      * For compatibility reasons with
-                                      * the next function
-                                      * the target_component is added
-                                      * here but is not used.
-                                     */
-  template <int dim, typename number, int spacedim>
-  void
-  reinit_vector (const dealii::MGDoFHandler<dim,spacedim> &mg_dof,
-                std::vector<unsigned int> ,
-                MGLevelObject<dealii::Vector<number> > &v)
-  {
-    for (unsigned int level=v.get_minlevel();
-        level<=v.get_maxlevel();++level)
-      {
-       unsigned int n = mg_dof.n_dofs (level);
-       v[level].reinit(n);
-      }
-
-  }
-
-
-                                    /**
-                                     * Adjust vectors on all levels to
-                                     * correct size.  Here, we just
-                                     * count the numbers of degrees
-                                     * of freedom on each level and
-                                     * @p reinit each level vector
-                                     * to this length. The target_component
-                                      * is handed to MGTools::count_dofs_per_block.
-                                      * See for documentation there.
-                                     */
-  template <int dim, typename number, int spacedim>
-  void
-  reinit_vector (const dealii::MGDoFHandler<dim,spacedim> &mg_dof,
-                std::vector<unsigned int> target_component,
-                MGLevelObject<BlockVector<number> > &v)
-  {
-    const unsigned int n_blocks = mg_dof.get_fe().n_blocks();
-    if (target_component.size()==0)
-      {
-        target_component.resize(n_blocks);
-        for (unsigned int i=0;i<n_blocks;++i)
-          target_component[i] = i;
-      }
-    Assert(target_component.size()==n_blocks,
-          ExcDimensionMismatch(target_component.size(),n_blocks));
-    const unsigned int max_block
-      = *std::max_element (target_component.begin(),
-                          target_component.end());
-    const unsigned int n_target_blocks = max_block + 1;
-
-    std::vector<std::vector<unsigned int> >
-      ndofs(mg_dof.get_tria().n_levels(),
-           std::vector<unsigned int>(n_target_blocks));
-    MGTools::count_dofs_per_block (mg_dof, ndofs, target_component);
-
-    for (unsigned int level=v.get_minlevel();
-        level<=v.get_maxlevel();++level)
-      {
-       v[level].reinit(n_target_blocks);
-       for (unsigned int b=0; b<n_target_blocks; ++b)
-         v[level].block(b).reinit(ndofs[level][b]);
-       v[level].collect_sizes();
-      }
-  }
-}
-
-
 
 template <typename VECTOR>
 template <int dim, int spacedim>
index 7580a57845e48d10cdcffeed8a8ca9998305b5c5..16c7ff58a9ea66d0c5d3d6af9cdbf26962854a73 100644 (file)
@@ -17,6 +17,8 @@
 #include <deal.II/base/memory_consumption.h>
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index 71ef757e4a8f4ffa775e65ae4b448b52e36976e4..1b24bd14230eac4097f61bc5909003d2da467d7b 100644 (file)
@@ -20,6 +20,8 @@
 
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index fba82bb5132d1b0518514239f334862cf55d2c1d..0cfde2bd5ac5746a9d4e8c3d977ae1fd8e3edda1 100644 (file)
@@ -13,6 +13,8 @@
 
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index 47fff5aae6fe511a461ee8925b61fd81ce345e3c..a7445fb44d6cfd486748e9f098ecd397f8d461bb 100644 (file)
@@ -19,6 +19,8 @@
 
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
index 032bb4ab7c7889c7ea23bcf9baeb5795110874aa..952424df26794803ff3959e90db195e6094cb0b5 100644 (file)
@@ -13,6 +13,8 @@
 
 #include <deal.II/lac/vector.h>
 #include <deal.II/lac/block_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <deal.II/lac/parallel_block_vector.h>
 #include <deal.II/lac/petsc_vector.h>
 #include <deal.II/lac/petsc_block_vector.h>
 #include <deal.II/lac/trilinos_vector.h>
diff --git a/tests/mpi/parallel_partitioner_01.cc b/tests/mpi/parallel_partitioner_01.cc
new file mode 100644 (file)
index 0000000..b93ee0f
--- /dev/null
@@ -0,0 +1,136 @@
+//---------------------  parallel_partitioner_01.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------  parallel_partitioner_01.cc  -----------------------
+
+// check n_ghost_indices() and is_ghost_entry(), similar to
+// parallel_vector_09.cc test case
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/partitioner.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+  const unsigned int set = 200;
+  AssertIndexRange (numproc, set-2);
+  const unsigned int local_size = set - myid;
+  unsigned int global_size = 0;
+  unsigned int my_start = 0;
+  for (unsigned int i=0; i<numproc; ++i)
+    {
+      global_size += set - i;
+      if (i<myid)
+       my_start += set - i;
+    }
+                                  // each processor owns some indices and all
+                                   // are ghosting elements from three
+                                   // processors (the second). some entries
+                                   // are right around the border between two
+                                   // processors
+  IndexSet local_owned(global_size);
+  local_owned.add_range(my_start, my_start + local_size);
+  IndexSet local_relevant(global_size);
+  local_relevant = local_owned;
+  unsigned int ghost_indices [10] = {1, 2, 13, set-2, set-1, set, set+1, 2*set,
+                                    2*set+1, 2*set+3};
+  local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
+
+  Utilities::MPI::Partitioner v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                               // check number of ghosts everywhere (counted
+                               // the above)
+  if (myid == 0)
+    {
+      AssertDimension (v.n_ghost_indices(), 5);
+    }
+  else if (myid == 1)
+    {
+      AssertDimension (v.n_ghost_indices(), 8);
+    }
+  else if (myid == 2)
+    {
+      AssertDimension (v.n_ghost_indices(), 7);
+    }
+  else
+    {
+      AssertDimension (v.n_ghost_indices(), 10);
+    }
+
+                               // count that 13 is ghost only on non-owning
+                               // processors
+  if (myid == 0)
+    {
+      Assert (v.is_ghost_entry (13) == false, ExcInternalError());
+    }
+  else
+    {
+      Assert (v.is_ghost_entry (13) == true, ExcInternalError());
+    }
+
+                               // count that 27 is ghost nowhere
+  Assert (v.is_ghost_entry (27) == false, ExcInternalError());
+  if (myid == 0)
+    {
+      Assert (v.in_local_range (27) == true, ExcInternalError());
+    }
+  else
+    {
+      Assert (v.in_local_range (27) == false, ExcInternalError());
+    }
+
+                               // element with number set is ghost
+  if (myid == 1)
+    {
+      Assert (v.is_ghost_entry (set) == false, ExcInternalError());
+    }
+  else
+    {
+      Assert (v.is_ghost_entry (set) == true, ExcInternalError());
+    }
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_partitioner_01").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_partitioner_01/ncpu_10/cmp/generic b/tests/mpi/parallel_partitioner_01/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_partitioner_01/ncpu_4/cmp/generic b/tests/mpi/parallel_partitioner_01/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_partitioner_02.cc b/tests/mpi/parallel_partitioner_02.cc
new file mode 100644 (file)
index 0000000..180a175
--- /dev/null
@@ -0,0 +1,109 @@
+//---------------------  parallel_partitioner_02.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------  parallel_partitioner_02.cc  -----------------------
+
+// check n_import_indices on test case from parallel_partitioner_01
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/partitioner.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+  const unsigned int set = 200;
+  AssertIndexRange (numproc, set-2);
+  const unsigned int local_size = set - myid;
+  unsigned int global_size = 0;
+  unsigned int my_start = 0;
+  for (unsigned int i=0; i<numproc; ++i)
+    {
+      global_size += set - i;
+      if (i<myid)
+       my_start += set - i;
+    }
+                                  // each processor owns some indices and all
+                                   // are ghosting elements from three
+                                   // processors (the second). some entries
+                                   // are right around the border between two
+                                   // processors
+  IndexSet local_owned(global_size);
+  local_owned.add_range(my_start, my_start + local_size);
+  IndexSet local_relevant(global_size);
+  local_relevant = local_owned;
+  if (myid < 6)
+    {
+      unsigned int ghost_indices [10] = {1, 2, 13, set-2, set-1, set, set+1, 2*set,
+                                        2*set+1, 2*set+3};
+      local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
+    }
+
+  Utilities::MPI::Partitioner v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                               // check number of import indices everywhere
+                               // (counted the above) times the number of
+                               // processors which have these as ghosts
+  const unsigned int n_procs_with_ghosts = std::min (numproc-1, 5U);
+  if (myid == 0)
+    {
+      AssertDimension (v.n_import_indices(), 5*n_procs_with_ghosts);
+    }
+  else if (myid == 1)
+    {
+      AssertDimension (v.n_import_indices(), 2*n_procs_with_ghosts);
+    }
+  else if (myid == 2)
+    {
+      AssertDimension (v.n_import_indices(), 3*n_procs_with_ghosts);
+    }
+  else
+    {
+      AssertDimension (v.n_import_indices(), 0);
+    }
+
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_partitioner_02").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_partitioner_02/ncpu_10/cmp/generic b/tests/mpi/parallel_partitioner_02/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_partitioner_02/ncpu_4/cmp/generic b/tests/mpi/parallel_partitioner_02/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_partitioner_03.cc b/tests/mpi/parallel_partitioner_03.cc
new file mode 100644 (file)
index 0000000..0ad9471
--- /dev/null
@@ -0,0 +1,115 @@
+//---------------------  parallel_partitioner_03.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------  parallel_partitioner_03.cc  -----------------------
+
+// let processors write ghost_targets, import_indices, import_targets to file
+
+#include "../tests.h"
+#include "coarse_grid_common.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/partitioner.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+  const unsigned int set = 200;
+  AssertIndexRange (numproc, set-2);
+  const unsigned int local_size = set - myid;
+  unsigned int global_size = 0;
+  unsigned int my_start = 0;
+  for (unsigned int i=0; i<numproc; ++i)
+    {
+      global_size += set - i;
+      if (i<myid)
+       my_start += set - i;
+    }
+                                  // each processor owns some indices and all
+                                   // are ghosting elements from three
+                                   // processors (the second). some entries
+                                   // are right around the border between two
+                                   // processors
+  IndexSet local_owned(global_size);
+  local_owned.add_range(my_start, my_start + local_size);
+  IndexSet local_relevant(global_size);
+  local_relevant = local_owned;
+  unsigned int ghost_indices [10] = {1, 2, 13, set-2, set-1, set, set+1, 2*set,
+                                    2*set+1, 2*set+3};
+  local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
+
+  Utilities::MPI::Partitioner v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                               // write the info on ghost processors and import indices to file
+  {
+    std::ofstream file((std::string("parallel_partitioner_03/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(myid)).c_str());
+    file << "**** proc " << myid << std::endl;
+    file << "ghost targets: ";
+    for (unsigned int i=0; i<v.ghost_targets().size(); ++i)
+      file << "[" << v.ghost_targets()[i].first << "/"
+          << v.ghost_targets()[i].second << "] ";
+    file << std::endl;
+    file << "import targets: ";
+    for (unsigned int i=0; i<v.import_targets().size(); ++i)
+      file << "[" << v.import_targets()[i].first << "/"
+          << v.import_targets()[i].second << "] ";
+    file << std::endl;
+    file << "import indices:" << std::endl;
+    for (unsigned int i=0; i<v.import_indices().size(); ++i)
+      file << "[" << v.import_indices()[i].first << "/"
+          << v.import_indices()[i].second << ")" << std::endl;
+    file << "****" << std::endl;
+  }
+
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  if (myid==0)
+    {
+      for (unsigned int i=0;i<numproc;++i)
+        {
+          cat_file((std::string("parallel_partitioner_03/ncpu_") + Utilities::int_to_string(Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD)) + "/dat." + Utilities::int_to_string(i)).c_str());
+        }
+
+    }
+
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_partitioner_03").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_partitioner_03/ncpu_10/cmp/generic b/tests/mpi/parallel_partitioner_03/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..9e82d3c
--- /dev/null
@@ -0,0 +1,116 @@
+
+DEAL:0::numproc=10
+**** proc 0
+ghost targets: [1/2] [2/3] 
+import targets: [1/5] [2/5] [3/5] [4/5] [5/5] [6/5] [7/5] [8/5] [9/5] 
+import indices:
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+****
+
+**** proc 1
+ghost targets: [0/5] [2/3] 
+import targets: [0/2] [2/2] [3/2] [4/2] [5/2] [6/2] [7/2] [8/2] [9/2] 
+import indices:
+[0/2)
+[0/2)
+[0/2)
+[0/2)
+[0/2)
+[0/2)
+[0/2)
+[0/2)
+[0/2)
+****
+
+**** proc 2
+ghost targets: [0/5] [1/2] 
+import targets: [0/3] [1/3] [3/3] [4/3] [5/3] [6/3] [7/3] [8/3] [9/3] 
+import indices:
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+****
+
+**** proc 3
+ghost targets: [0/5] [1/2] [2/3] 
+import targets: 
+import indices:
+****
+
+**** proc 4
+ghost targets: [0/5] [1/2] [2/3] 
+import targets: 
+import indices:
+****
+
+**** proc 5
+ghost targets: [0/5] [1/2] [2/3] 
+import targets: 
+import indices:
+****
+
+**** proc 6
+ghost targets: [0/5] [1/2] [2/3] 
+import targets: 
+import indices:
+****
+
+**** proc 7
+ghost targets: [0/5] [1/2] [2/3] 
+import targets: 
+import indices:
+****
+
+**** proc 8
+ghost targets: [0/5] [1/2] [2/3] 
+import targets: 
+import indices:
+****
+
+**** proc 9
+ghost targets: [0/5] [1/2] [2/3] 
+import targets: 
+import indices:
+****
+
diff --git a/tests/mpi/parallel_partitioner_03/ncpu_4/cmp/generic b/tests/mpi/parallel_partitioner_03/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..0b1e5a8
--- /dev/null
@@ -0,0 +1,44 @@
+
+DEAL:0::numproc=4
+**** proc 0
+ghost targets: [1/2] [2/3] 
+import targets: [1/5] [2/5] [3/5] 
+import indices:
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+[1/3)
+[13/14)
+[198/200)
+****
+
+**** proc 1
+ghost targets: [0/5] [2/3] 
+import targets: [0/2] [2/2] [3/2] 
+import indices:
+[0/2)
+[0/2)
+[0/2)
+****
+
+**** proc 2
+ghost targets: [0/5] [1/2] 
+import targets: [0/3] [1/3] [3/3] 
+import indices:
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+[1/3)
+[4/5)
+****
+
+**** proc 3
+ghost targets: [0/5] [1/2] [2/3] 
+import targets: 
+import indices:
+****
+
diff --git a/tests/mpi/parallel_partitioner_04.cc b/tests/mpi/parallel_partitioner_04.cc
new file mode 100644 (file)
index 0000000..b4962d3
--- /dev/null
@@ -0,0 +1,106 @@
+//---------------------  parallel_partitioner_04.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------  parallel_partitioner_04.cc  -----------------------
+
+// check global_to_local and local_to_global on test case from
+// parallel_partitioner_01.cc
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/partitioner.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+  const unsigned int set = 200;
+  AssertIndexRange (numproc, set-2);
+  const unsigned int local_size = set - myid;
+  unsigned int global_size = 0;
+  unsigned int my_start = 0;
+  for (unsigned int i=0; i<numproc; ++i)
+    {
+      global_size += set - i;
+      if (i<myid)
+       my_start += set - i;
+    }
+                                  // each processor owns some indices and all
+                                   // are ghosting elements from three
+                                   // processors (the second). some entries
+                                   // are right around the border between two
+                                   // processors
+  IndexSet local_owned(global_size);
+  local_owned.add_range(my_start, my_start + local_size);
+  IndexSet local_relevant(global_size);
+  local_relevant = local_owned;
+  unsigned int ghost_indices [10] = {1, 2, 13, set-2, set-1, set, set+1, 2*set,
+                                    2*set+1, 2*set+3};
+  local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
+
+  Utilities::MPI::Partitioner v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                               // check locally owned range
+  for (unsigned int i=my_start; i<my_start+local_size; ++i)
+    {
+      AssertDimension (v.global_to_local(i), i-my_start);
+      AssertDimension (v.local_to_global(i-my_start), i);
+    }
+
+                               // check ghost indices
+  for (unsigned int i=0, count=0; i<10; ++i)
+    if (ghost_indices[i] < my_start || ghost_indices[i] >= my_start+local_size)
+      {
+       AssertDimension (local_size+count, v.global_to_local(ghost_indices[i]));
+       AssertDimension (ghost_indices[i], v.local_to_global(local_size+count));
+       ++count;
+      }
+
+                               // check that loc->glob and glob->loc form an
+                               // identity operation
+  for (unsigned int i=0; i<local_size+v.n_ghost_indices(); ++i)
+    AssertDimension (i, v.global_to_local(v.local_to_global(i)));
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_partitioner_04").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_partitioner_04/ncpu_10/cmp/generic b/tests/mpi/parallel_partitioner_04/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_partitioner_04/ncpu_4/cmp/generic b/tests/mpi/parallel_partitioner_04/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_01.cc b/tests/mpi/parallel_vector_01.cc
new file mode 100644 (file)
index 0000000..e3ec18a
--- /dev/null
@@ -0,0 +1,87 @@
+//--------------------------  parallel_vector_01.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_01.cc  -----------------------
+
+// check correct initialization of parallel vector without any ghosts
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+
+                                  // each processor owns 2 indices and all
+                                   // are ghosting element 1 (the second)
+  IndexSet local_owned(numproc*2);
+  local_owned.add_range(myid*2,myid*2+2);
+
+  parallel::distributed::Vector<double> v(local_owned, local_owned, MPI_COMM_WORLD);
+
+                                     // set local values
+  v(myid*2)=myid*2.0;
+  v(myid*2+1)=myid*2.0+1.0;
+
+  v.compress();
+  v*=2.0;
+
+  if (myid == 0)
+    {
+      deallog << myid*2 << ":" << v(myid*2) << std::endl;
+      deallog << myid*2+1 << ":" << v(myid*2+1) << std::endl;
+    }
+
+  Assert(v(myid*2) == myid*4.0, ExcInternalError());
+  Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
+
+                               // check l2 norm
+  const double l2_norm = v.l2_norm();
+  if (myid == 0)
+    deallog << "L2 norm: " << l2_norm << std::endl;
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_01").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_01/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_01/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..bcf8320
--- /dev/null
@@ -0,0 +1,6 @@
+
+DEAL:0::numproc=10
+DEAL:0::0:0
+DEAL:0::1:2.000
+DEAL:0::L2 norm: 99.40
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_01/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_01/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..819dab6
--- /dev/null
@@ -0,0 +1,6 @@
+
+DEAL:0::numproc=4
+DEAL:0::0:0
+DEAL:0::1:2.000
+DEAL:0::L2 norm: 23.66
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_02.cc b/tests/mpi/parallel_vector_02.cc
new file mode 100644 (file)
index 0000000..1d77b3b
--- /dev/null
@@ -0,0 +1,98 @@
+//--------------------------  parallel_vector_02.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_02.cc  -----------------------
+
+// check addition into ghosts for parallel vector
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+
+                                  // each processor owns 2 indices and all
+                                   // are ghosting element 1 (the second)
+  IndexSet local_owned(numproc*2);
+  local_owned.add_range(myid*2,myid*2+2);
+  IndexSet local_relevant(numproc*2);
+  local_relevant = local_owned;
+  local_relevant.add_range(1,2);
+
+  parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                                     // set local values and check them
+  v(myid*2)=myid*2.0;
+  v(myid*2+1)=myid*2.0+1.0;
+
+  v.compress();
+  v*=2.0;
+
+  Assert(v(myid*2) == myid*4.0, ExcInternalError());
+  Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
+
+                               // set ghost dof, compress
+  v(1) = 7;
+  v.compress();
+
+  if (myid == 0)
+    {
+      deallog << myid*2 << ":" << v(myid*2) << std::endl;
+      deallog << myid*2+1 << ":" << v(myid*2+1) << std::endl;
+    }
+
+                               // import ghosts onto all procs
+  v.update_ghost_values();
+  Assert (v(1) == 7. * numproc, ExcInternalError());
+
+                               // check l2 norm
+  const double l2_norm = v.l2_norm();
+  if (myid == 0)
+    deallog << "L2 norm: " << l2_norm << std::endl;
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_02").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_02/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_02/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..ba9e80a
--- /dev/null
@@ -0,0 +1,6 @@
+
+DEAL:0::numproc=10
+DEAL:0::0:0
+DEAL:0::1:70.00
+DEAL:0::L2 norm: 121.6
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_02/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_02/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..9b1b542
--- /dev/null
@@ -0,0 +1,6 @@
+
+DEAL:0::numproc=4
+DEAL:0::0:0
+DEAL:0::1:28.00
+DEAL:0::L2 norm: 36.61
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_03.cc b/tests/mpi/parallel_vector_03.cc
new file mode 100644 (file)
index 0000000..21e1a2b
--- /dev/null
@@ -0,0 +1,100 @@
+//--------------------------  parallel_vector_03.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_03.cc  -----------------------
+
+// check set of ghosts for parallel vector
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+
+                                  // each processor owns 2 indices and all
+                                   // are ghosting element 1 (the second)
+  IndexSet local_owned(numproc*2);
+  local_owned.add_range(myid*2,myid*2+2);
+  IndexSet local_relevant(numproc*2);
+  local_relevant = local_owned;
+  local_relevant.add_range(1,2);
+
+  parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                                     // set local values and check them
+  v(myid*2)=myid*2.0;
+  v(myid*2+1)=myid*2.0+1.0;
+
+  v.compress();
+  v*=2.0;
+
+  Assert(v(myid*2) == myid*4.0, ExcInternalError());
+  Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
+
+                               // set ghost dof on remote processors,
+                               // compress (no addition)
+  if (myid > 0)
+    v(1) = 7;
+  v.compress(/* add_ghost_data = */ false);
+
+  if (myid == 0)
+    {
+      deallog << myid*2 << ":" << v(myid*2) << std::endl;
+      deallog << myid*2+1 << ":" << v(myid*2+1) << std::endl;
+    }
+
+                               // import ghosts onto all procs
+  v.update_ghost_values();
+  Assert (v(1) == 7.0, ExcInternalError());
+
+                               // check l2 norm
+  const double l2_norm = v.l2_norm();
+  if (myid == 0)
+    deallog << "L2 norm: " << l2_norm << std::endl;
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_03").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_03/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_03/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..35d7099
--- /dev/null
@@ -0,0 +1,6 @@
+
+DEAL:0::numproc=10
+DEAL:0::0:0
+DEAL:0::1:7.000
+DEAL:0::L2 norm: 99.62
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_03/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_03/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f4e907
--- /dev/null
@@ -0,0 +1,6 @@
+
+DEAL:0::numproc=4
+DEAL:0::0:0
+DEAL:0::1:7.000
+DEAL:0::L2 norm: 24.60
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_04.cc b/tests/mpi/parallel_vector_04.cc
new file mode 100644 (file)
index 0000000..deac2f2
--- /dev/null
@@ -0,0 +1,121 @@
+//--------------------------  parallel_vector_04.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_04.cc  -----------------------
+
+// check that operator= resets ghosts, both if they have been set and if they
+// have not been set
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+
+                                  // each processor owns 2 indices and all
+                                   // are ghosting element 1 (the second)
+  IndexSet local_owned(numproc*2);
+  local_owned.add_range(myid*2,myid*2+2);
+  IndexSet local_relevant(numproc*2);
+  local_relevant = local_owned;
+  local_relevant.add_range(1,2);
+
+  parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                                     // set local values and check them
+  v(myid*2)=myid*2.0;
+  v(myid*2+1)=myid*2.0+1.0;
+
+  v.compress();
+  v*=2.0;
+
+  Assert(v(myid*2) == myid*4.0, ExcInternalError());
+  Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
+
+                               // set ghost dof on remote processors, no
+                               // compress called
+  if (myid > 0)
+    v(1) = 7;
+
+  Assert(v(myid*2) == myid*4.0, ExcInternalError());
+  Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
+
+  if (myid > 0)
+    Assert (v(1) == 7.0, ExcInternalError());
+
+                               // reset to zero
+  v = 0;
+
+  Assert(v(myid*2) == 0., ExcInternalError());
+  Assert(v(myid*2+1) == 0., ExcInternalError());
+
+                               // check that everything remains zero also
+                               // after compress
+  v.compress();
+
+  Assert(v(myid*2) == 0., ExcInternalError());
+  Assert(v(myid*2+1) == 0., ExcInternalError());
+
+                               // set element 1 on owning process to
+                               // something nonzero
+  if (myid == 0)
+    v(1) = 2.;
+  if (myid > 0)
+    Assert (v(1) == 0., ExcInternalError());
+
+                               // check that all processors get the correct
+                               // value again, and that it is erased by
+                               // operator=
+  v.update_ghost_values();
+
+  Assert (v(1) == 2.0, ExcInternalError());
+
+  v = 0;
+  Assert (v(1) == 0.0, ExcInternalError());
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_04").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_04/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_04/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_04/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_04/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_05.cc b/tests/mpi/parallel_vector_05.cc
new file mode 100644 (file)
index 0000000..2946e2c
--- /dev/null
@@ -0,0 +1,90 @@
+//--------------------------  parallel_vector_05.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_05.cc  -----------------------
+
+// check that compress(add) with zero add does not change the vector entry
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+
+                                  // each processor owns 2 indices and all
+                                   // are ghosting element 1 (the second)
+  IndexSet local_owned(numproc*2);
+  local_owned.add_range(myid*2,myid*2+2);
+  IndexSet local_relevant(numproc*2);
+  local_relevant = local_owned;
+  local_relevant.add_range(1,2);
+
+  parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                                     // set local values and check them
+  v(myid*2)=myid*2.0;
+  v(myid*2+1)=myid*2.0+1.0;
+
+  v.compress();
+  v*=2.0;
+
+  Assert(v(myid*2) == myid*4.0, ExcInternalError());
+  Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
+
+                               // set ghost dof on remote processors,
+                               // compress
+  if (myid > 0)
+    v(1) = 0;
+
+  v.compress();
+
+                               // check that nothing has changed
+  Assert(v(myid*2) == myid*4.0, ExcInternalError());
+  Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_05").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_05/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_05/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_05/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_05/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_06.cc b/tests/mpi/parallel_vector_06.cc
new file mode 100644 (file)
index 0000000..0c32721
--- /dev/null
@@ -0,0 +1,239 @@
+//--------------------------  parallel_vector_06.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_06.cc  -----------------------
+
+// check global reduction operation (norms, operator==, operator!=) on
+// parallel vector
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+
+                                  // each processor from processor 1 to 8
+                                   // owns 2 indices (the other processors do
+                                   // not own any dof), and all processors are
+                                   // ghosting element 1 (the second)
+  IndexSet local_owned(std::min(16U,numproc*2));
+  if (myid < 8)
+    local_owned.add_range(myid*2,myid*2+2);
+  IndexSet local_relevant(numproc*2);
+  local_relevant = local_owned;
+  local_relevant.add_range(1,2);
+
+  parallel::distributed::Vector<double> v(local_owned, local_owned, MPI_COMM_WORLD);
+
+                                     // set local values
+  if (myid < 8)
+    {
+      v(myid*2)=myid*2.0;
+      v(myid*2+1)=myid*2.0+1.0;
+    }
+  v.compress();
+  v*=2.0;
+  if (myid < 8)
+    {
+      Assert(v(myid*2) == myid*4.0, ExcInternalError());
+      Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
+    }
+
+                               // check l2 norm
+  {
+    const double l2_norm = v.l2_norm();
+    if (myid == 0)
+      deallog << "l2 norm: " << l2_norm << std::endl;
+  }
+
+                               // check l1 norm
+  {
+    const double l1_norm = v.l1_norm();
+    if (myid == 0)
+      deallog << "l1 norm: " << l1_norm << std::endl;
+  }
+
+                               // check linfty norm
+  {
+    const double linfty_norm = v.linfty_norm();
+    if (myid == 0)
+      deallog << "linfty norm: " << linfty_norm << std::endl;
+  }
+
+                               // check lp norm
+  {
+    const double lp_norm = v.lp_norm(2.2);
+    if (myid == 0)
+      deallog << "l2.2 norm: " << lp_norm << std::endl;
+
+    Assert (std::fabs (v.l2_norm() - v.lp_norm(2.0)) < 1e-14,
+           ExcInternalError());
+  }
+
+                               // check mean value (should be equal to l1
+                               // norm here since we have no negative
+                               // entries)
+  {
+    const double mean = v.mean_value();
+    if (myid == 0)
+      deallog << "Mean value: " << mean << std::endl;
+
+    Assert (std::fabs (mean * v.size() - v.l1_norm()) < 1e-15,
+           ExcInternalError());
+  }
+                               // check inner product
+  {
+    const double norm_sqr = v.norm_sqr();
+    Assert (std::fabs(v * v - norm_sqr) < 1e-15,
+           ExcInternalError());
+    parallel::distributed::Vector<double> v2;
+    v2 = v;
+    Assert (std::fabs(v2 * v - norm_sqr) < 1e-15,
+           ExcInternalError());
+
+    if(myid<8)
+      v2.local_element(0) = -1;
+    const double inner_prod = v * v2;
+    if (myid == 0)
+      deallog << "Inner product: " << inner_prod << std::endl;
+  }
+
+                               // check operator ==
+  {
+    parallel::distributed::Vector<double> v2 (v);
+    bool equal = (v2 == v);
+    if (myid == 0)
+      deallog << " v==v2 ? " << equal << std::endl;
+
+    bool not_equal = (v2 != v);
+    if (myid == 0)
+      deallog << " v!=v2 ? " << not_equal << std::endl;
+
+                               // change v2 on one proc only
+    if (myid == 0)
+      v2.local_element(1) = 2.2212;
+
+    equal = (v2 == v);
+    if (myid == 0)
+      deallog << " v==v2 ? " << equal << std::endl;
+    not_equal = (v2 != v);
+    if (myid == 0)
+      deallog << " v!=v2 ? " << not_equal << std::endl;
+
+                               // reset
+    v2 = v;
+    equal = (v2 == v);
+    if (myid == 0)
+      deallog << " v==v2 ? " << equal << std::endl;
+    not_equal = (v2 != v);
+    if (myid == 0)
+      deallog << " v!=v2 ? " << not_equal << std::endl;
+
+                               // change some value on all procs
+    if (myid < 8)
+      v2.local_element(0) = -1;
+    equal = (v2 == v);
+    if (myid == 0)
+      deallog << " v==v2 ? " << equal << std::endl;
+    not_equal = (v2 != v);
+    if (myid == 0)
+      deallog << " v!=v2 ? " << not_equal << std::endl;
+  }
+
+                               // check all_zero
+  {
+    bool allzero = v.all_zero();
+    if (myid == 0)
+      deallog << " v==0 ? " << allzero << std::endl;
+    parallel::distributed::Vector<double> v2;
+    v2.reinit (v);
+    allzero = v2.all_zero();
+    if (myid == 0)
+      deallog << " v2==0 ? " << allzero << std::endl;
+
+                               // now change one element to nonzero
+    if (myid == 0)
+      v2.local_element(1) = 1;
+    allzero = v2.all_zero();
+    if (myid == 0)
+      deallog << " v2==0 ? " << allzero << std::endl;
+  }
+
+
+                               // check all_non_negative
+  {
+    bool allnonneg = v.is_non_negative();
+    if (myid == 0)
+      deallog << " v>=0 ? " << allnonneg << std::endl;
+    parallel::distributed::Vector<double> v2, v3;
+
+                               // vector where all processors have
+                               // non-negative entries
+    v2 = v;
+    if (myid < 8)
+      v2.local_element(0) = -1;
+    allnonneg = v2.is_non_negative();
+    if (myid == 0)
+      deallog << " v2>=0 ? " << allnonneg << std::endl;
+
+                               // zero vector
+    v3.reinit (v2);
+    allnonneg = v3.is_non_negative();
+    if (myid == 0)
+      deallog << " v3>=0 ? " << allnonneg << std::endl;
+
+                               // only one processor has non-negative entry
+    v3 = v;
+    if (myid == 1)
+      v3.local_element(0) = -1;
+    allnonneg = v3.is_non_negative();
+    if (myid == 0)
+      deallog << " v3>=0 ? " << allnonneg << std::endl;
+  }
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_06").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_06/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_06/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..d147bb7
--- /dev/null
@@ -0,0 +1,24 @@
+
+DEAL:0::numproc=10
+DEAL:0::l2 norm: 70.43
+DEAL:0::l1 norm: 240.0
+DEAL:0::linfty norm: 30.00
+DEAL:0::l2.2 norm: 63.47
+DEAL:0::Mean value: 15.00
+DEAL:0::Inner product: 2608.
+DEAL:0:: v==v2 ? 1
+DEAL:0:: v!=v2 ? 0
+DEAL:0:: v==v2 ? 0
+DEAL:0:: v!=v2 ? 1
+DEAL:0:: v==v2 ? 1
+DEAL:0:: v!=v2 ? 0
+DEAL:0:: v==v2 ? 0
+DEAL:0:: v!=v2 ? 1
+DEAL:0:: v==0 ? 0
+DEAL:0:: v2==0 ? 1
+DEAL:0:: v2==0 ? 0
+DEAL:0:: v>=0 ? 1
+DEAL:0:: v2>=0 ? 0
+DEAL:0:: v3>=0 ? 1
+DEAL:0:: v3>=0 ? 0
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_06/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_06/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..d168316
--- /dev/null
@@ -0,0 +1,24 @@
+
+DEAL:0::numproc=4
+DEAL:0::l2 norm: 23.66
+DEAL:0::l1 norm: 56.00
+DEAL:0::linfty norm: 14.00
+DEAL:0::l2.2 norm: 22.04
+DEAL:0::Mean value: 7.000
+DEAL:0::Inner product: 312.0
+DEAL:0:: v==v2 ? 1
+DEAL:0:: v!=v2 ? 0
+DEAL:0:: v==v2 ? 0
+DEAL:0:: v!=v2 ? 1
+DEAL:0:: v==v2 ? 1
+DEAL:0:: v!=v2 ? 0
+DEAL:0:: v==v2 ? 0
+DEAL:0:: v!=v2 ? 1
+DEAL:0:: v==0 ? 0
+DEAL:0:: v2==0 ? 1
+DEAL:0:: v2==0 ? 0
+DEAL:0:: v>=0 ? 1
+DEAL:0:: v2>=0 ? 0
+DEAL:0:: v3>=0 ? 1
+DEAL:0:: v3>=0 ? 0
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_07.cc b/tests/mpi/parallel_vector_07.cc
new file mode 100644 (file)
index 0000000..03aa98a
--- /dev/null
@@ -0,0 +1,115 @@
+//--------------------------  parallel_vector_07.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_07.cc  -----------------------
+
+// check that access to elements and ghosts works correctly
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+  const unsigned int set = 200;
+  AssertIndexRange (numproc, set-2);
+  const unsigned int local_size = set - myid;
+  unsigned int global_size = 0;
+  unsigned int my_start = 0;
+  for (unsigned int i=0; i<numproc; ++i)
+    {
+      global_size += set - i;
+      if (i<myid)
+       my_start += set - i;
+    }
+                                  // each processor owns some indices and all
+                                   // are ghosting elements from three
+                                   // processors (the second). some entries
+                                   // are right around the border between two
+                                   // processors
+  IndexSet local_owned(global_size);
+  local_owned.add_range(my_start, my_start + local_size);
+  IndexSet local_relevant(global_size);
+  local_relevant = local_owned;
+  unsigned int ghost_indices [10] = {1, 2, 13, set-2, set-1, set, set+1, 2*set,
+                                    2*set+1, 2*set+3};
+  local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
+
+  parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                               // set a few of the local elements
+  for (unsigned i=0; i<local_size; ++i)
+    v.local_element(i) = 2.0 * (i + my_start);
+
+  v.compress();
+  v.update_ghost_values();
+
+                               // check local values for correctness
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == 2.0 * (i + my_start), ExcInternalError());
+
+                               // check local values with two different
+                               // access operators
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == v(local_owned.nth_index_in_set (i)), ExcInternalError());
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == v(i+my_start), ExcInternalError());
+  
+                               // check non-local entries on all processors
+  for (unsigned int i=0; i<10; ++i)
+    Assert (v(ghost_indices[i])== 2. * ghost_indices[i], ExcInternalError());
+
+                               // compare direct access [] with access ()
+  for (unsigned int i=0; i<10; ++i)
+    if (ghost_indices[i] < my_start)
+      Assert (v(ghost_indices[i])==v.local_element(local_size+i), ExcInternalError());
+
+  if (myid == 0)
+    for (unsigned int i=5; i<10; ++i)
+      Assert (v(ghost_indices[i])==v.local_element(local_size+i-5), ExcInternalError());
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_07").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_07/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_07/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_07/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_07/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_08.cc b/tests/mpi/parallel_vector_08.cc
new file mode 100644 (file)
index 0000000..6facccc
--- /dev/null
@@ -0,0 +1,162 @@
+//--------------------------  parallel_vector_08.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_08.cc  -----------------------
+
+// check parallel_vector::copy_from to update ghost values. Same vector layout
+// as in parallel_vector_07.cc
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+  const unsigned int set = 200;
+  AssertIndexRange (numproc, set-2);
+  const unsigned int local_size = set - myid;
+  unsigned int global_size = 0;
+  unsigned int my_start = 0;
+  for (unsigned int i=0; i<numproc; ++i)
+    {
+      global_size += set - i;
+      if (i<myid)
+       my_start += set - i;
+    }
+                                  // each processor owns some indices and all
+                                   // are ghosting elements from three
+                                   // processors (the second). some entries
+                                   // are right around the border between two
+                                   // processors
+  IndexSet local_owned(global_size);
+  local_owned.add_range(my_start, my_start + local_size);
+  IndexSet local_relevant(global_size);
+  local_relevant = local_owned;
+  unsigned int ghost_indices [10] = {1, 2, 13, set-2, set-1, set, set+1, 2*set,
+                                    2*set+1, 2*set+3};
+  local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
+
+                               // v has ghosts, w has none. set some entries
+                               // on w, copy into v and check if they are
+                               // there
+  parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+  parallel::distributed::Vector<double> w(local_owned, local_owned, MPI_COMM_WORLD);
+
+                               // set a few of the local elements
+  for (unsigned i=0; i<local_size; ++i)
+    w.local_element(i) = 2.0 * (i + my_start);
+
+  v.compress();
+  v.copy_from(w);
+  v.update_ghost_values();
+
+                               // check local values for correctness
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == 2.0 * (i + my_start), ExcInternalError());
+
+                               // check local values with two different
+                               // access operators
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == v(local_owned.nth_index_in_set (i)), ExcInternalError());
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == v(i+my_start), ExcInternalError());
+  
+                               // check non-local entries on all processors
+  for (unsigned int i=0; i<10; ++i)
+    Assert (v(ghost_indices[i])== 2. * ghost_indices[i], ExcInternalError());
+
+                               // compare direct access local_element with access ()
+  for (unsigned int i=0; i<10; ++i)
+    if (ghost_indices[i] < my_start)
+      Assert (v(ghost_indices[i])==v.local_element(local_size+i), ExcInternalError());
+
+  if (myid == 0)
+    for (unsigned int i=5; i<10; ++i)
+      Assert (v(ghost_indices[i])==v.local_element(local_size+i-5), ExcInternalError());
+
+
+                               // now the same again, but import ghosts
+                               // through the call to copy_from
+  v.reinit (local_owned, local_relevant, MPI_COMM_WORLD);
+  v.copy_from(w, true);
+
+                               // check local values for correctness
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == 2.0 * (i + my_start), ExcInternalError());
+
+                               // check local values with two different
+                               // access operators
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == v(local_owned.nth_index_in_set (i)), ExcInternalError());
+  for (unsigned int i=0; i<local_size; ++i)
+    Assert (v.local_element(i) == v(i+my_start), ExcInternalError());
+  
+                               // check non-local entries on all processors
+  for (unsigned int i=0; i<10; ++i)
+    Assert (v(ghost_indices[i])== 2. * ghost_indices[i], ExcInternalError());
+
+                               // compare direct access [] with access ()
+  for (unsigned int i=0; i<10; ++i)
+    if (ghost_indices[i] < my_start)
+      Assert (v(ghost_indices[i])==v.local_element(local_size+i), ExcInternalError());
+
+  if (myid == 0)
+    for (unsigned int i=5; i<10; ++i)
+      Assert (v(ghost_indices[i])==v.local_element(local_size+i-5), ExcInternalError());
+
+
+                               // now do not call import_ghosts and check
+                               // whether ghosts really are zero
+  v.reinit (local_owned, local_relevant, MPI_COMM_WORLD);
+  v.copy_from(w, false);
+
+                               // check non-local entries on all processors
+  for (unsigned int i=0; i<10; ++i)
+    if (local_owned.is_element (ghost_indices[i]) == false)
+      Assert (v(ghost_indices[i]) == 0., ExcInternalError());
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_08").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_08/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_08/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_08/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_08/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_09.cc b/tests/mpi/parallel_vector_09.cc
new file mode 100644 (file)
index 0000000..644597e
--- /dev/null
@@ -0,0 +1,135 @@
+//--------------------------  parallel_vector_09.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_09.cc  -----------------------
+
+// check n_ghost_entries() and is_ghost_entry()
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+  const unsigned int set = 200;
+  AssertIndexRange (numproc, set-2);
+  const unsigned int local_size = set - myid;
+  unsigned int global_size = 0;
+  unsigned int my_start = 0;
+  for (unsigned int i=0; i<numproc; ++i)
+    {
+      global_size += set - i;
+      if (i<myid)
+       my_start += set - i;
+    }
+                                  // each processor owns some indices and all
+                                   // are ghosting elements from three
+                                   // processors (the second). some entries
+                                   // are right around the border between two
+                                   // processors
+  IndexSet local_owned(global_size);
+  local_owned.add_range(my_start, my_start + local_size);
+  IndexSet local_relevant(global_size);
+  local_relevant = local_owned;
+  unsigned int ghost_indices [10] = {1, 2, 13, set-2, set-1, set, set+1, 2*set,
+                                    2*set+1, 2*set+3};
+  local_relevant.add_indices (&ghost_indices[0], &ghost_indices[0]+10);
+
+  parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+
+                               // check number of ghosts everywhere (counted
+                               // the above)
+  if (myid == 0)
+    {
+      AssertDimension (v.n_ghost_entries(), 5);
+    }
+  else if (myid == 1)
+    {
+      AssertDimension (v.n_ghost_entries(), 8);
+    }
+  else if (myid == 2)
+    {
+      AssertDimension (v.n_ghost_entries(), 7);
+    }
+  else
+    {
+      AssertDimension (v.n_ghost_entries(), 10);
+    }
+
+                               // count that 13 is ghost only on non-owning
+                               // processors
+  if (myid == 0)
+    {
+      Assert (v.is_ghost_entry (13) == false, ExcInternalError());
+    }
+  else
+    {
+      Assert (v.is_ghost_entry (13) == true, ExcInternalError());
+    }
+
+                               // count that 27 is ghost nowhere
+  Assert (v.is_ghost_entry (27) == false, ExcInternalError());
+  if (myid == 0)
+    {
+      Assert (v.in_local_range (27) == true, ExcInternalError());
+    }
+  else
+    {
+      Assert (v.in_local_range (27) == false, ExcInternalError());
+    }
+
+                               // element with number set is ghost
+  if (myid == 1)
+    {
+      Assert (v.is_ghost_entry (set) == false, ExcInternalError());
+    }
+  else
+    {
+      Assert (v.is_ghost_entry (set) == true, ExcInternalError());
+    }
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_09").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_09/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_09/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_09/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_09/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_10.cc b/tests/mpi/parallel_vector_10.cc
new file mode 100644 (file)
index 0000000..e4f3fe4
--- /dev/null
@@ -0,0 +1,89 @@
+//--------------------------  parallel_vector_10.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_10.cc  -----------------------
+
+// check operator= when we do some operations with ghosts
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+
+                                  // each processor owns 2 indices and all
+                                   // are ghosting element 1 (the second)
+  IndexSet local_owned(numproc*2);
+  local_owned.add_range(myid*2,myid*2+2);
+  IndexSet local_relevant(numproc*2);
+  local_relevant = local_owned;
+  local_relevant.add_range(1,2);
+
+  parallel::distributed::Vector<double> v(local_owned, local_relevant, MPI_COMM_WORLD);
+  parallel::distributed::Vector<double> w(v);
+
+                                     // set local values and check them
+  v(myid*2)=myid*2.0;
+  v(myid*2+1)=myid*2.0+1.0;
+
+  v.compress();
+  v.update_ghost_values();
+
+                               // check that the value of the ghost is 1.0
+  Assert (v(1) == 1., ExcInternalError());
+
+                               // copy vector
+  w  = v;
+  v *= 2.0;
+
+  v.update_ghost_values();
+  w.update_ghost_values();
+  Assert (v(1) == 2., ExcInternalError());
+  Assert (w(1) == 1., ExcInternalError());
+
+  if (myid == 0)
+    deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_10").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_10/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_10/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..999baf1
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=10
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_10/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_10/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..6f5d277
--- /dev/null
@@ -0,0 +1,3 @@
+
+DEAL:0::numproc=4
+DEAL:0::OK
diff --git a/tests/mpi/parallel_vector_11.cc b/tests/mpi/parallel_vector_11.cc
new file mode 100644 (file)
index 0000000..c3446e1
--- /dev/null
@@ -0,0 +1,183 @@
+//--------------------------  parallel_vector_11.cc  -----------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2012 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//--------------------------  parallel_vector_11.cc  -----------------------
+
+// check that add, sadd, equ, scale work correctly on a vector where some
+// processor do not own any degrees of freedom
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  unsigned int numproc = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0) deallog << "numproc=" << numproc << std::endl;
+
+                               // global size: 20, local_size: 3 as long as
+                               // less than 20
+  const unsigned int local_size = 3;
+  const unsigned int global_size = std::min(20U, local_size * numproc);
+  const int my_start = std::min (local_size * myid, global_size);
+  const int my_end   = std::min (local_size * (myid+1), global_size);
+  const int actual_local_size = my_end-my_start;
+
+  IndexSet local_owned (global_size);
+  if (my_end > my_start)
+    local_owned.add_range(static_cast<unsigned int>(my_start),
+                         static_cast<unsigned int>(my_end));
+  IndexSet local_relevant(global_size);
+  local_relevant = local_owned;
+  local_relevant.add_index (2);
+
+  parallel::distributed::Vector<double> v(local_owned, local_relevant,
+                                         MPI_COMM_WORLD);
+  AssertDimension (actual_local_size, v.local_size());
+  parallel::distributed::Vector<double> w (v), x(v), y(v);
+
+                               // set local elements
+  for (int i=0; i<actual_local_size; ++i)
+    {
+      v.local_element(i) = i + my_start;
+      w.local_element(i) = 1000 + 2 * (my_start + i);
+      x.local_element(i) = 10000;
+    }
+
+  y = v;
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == i+my_start, ExcInternalError());
+
+  if (myid==0) deallog << "Check add (scalar): ";
+  y.add (42);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == i+my_start+42, ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check add (vector): ";
+  y.add (w);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 3*(i+my_start)+1042, ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check add (factor, vector): ";
+  y.add (-1., w);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == i+my_start+42, ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check add (factor, vector, factor, vector): ";
+  y.add (2., w, -0.5, x);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 5*(i+my_start)+2042-5000,ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check sadd (factor, factor, vector): ";
+  y = v;
+  y.sadd (-3.,2.,v);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i)==(-i-my_start), ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check sadd (factor, factor, vector, factor, vector): ";
+  y.sadd (2.,3.,v, 2., w);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 5*(i+my_start)+2000, ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check sadd (factor, factor, vector, factor, vector, factor, vector): ";
+  y.sadd (-1.,1.,v, 2., w, 2., x);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 20000, ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check add (factor, vector_1, factor, vector_1): ";
+  y = 0;
+  y.add (1.,v, 3., v);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 4*(i+my_start), ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check operator * (scalar): ";
+  x *= 2.;
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (x.local_element(i) == 20000., ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check operator / (scalar): ";
+  x /= 2.;
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (x.local_element(i) == 10000., ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check scale (vector): ";
+  y.scale (x);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 40000.*(i+my_start), ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check equ (factor, vector): ";
+  y. equ (10., x);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 100000., ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check equ (factor, vector, factor, vector): ";
+  y. equ (10., v, -2., w);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 6.*(i+my_start)-2000, ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check equ (factor, vector, factor, vector, factor, vector): ";
+  y. equ (10., v, -2., w, 3., x);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == 6.*(i+my_start)+28000, ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+
+  if (myid==0) deallog << "Check equ<float> (factor, vector): ";
+  parallel::distributed::Vector<float> z;
+  z = v;
+  y.equ (1., z);
+  for (int i=0; i<actual_local_size; ++i)
+    Assert (y.local_element(i) == i+my_start, ExcInternalError());
+  if (myid==0) deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::System::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("parallel_vector_11").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/parallel_vector_11/ncpu_10/cmp/generic b/tests/mpi/parallel_vector_11/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..7c92ec6
--- /dev/null
@@ -0,0 +1,17 @@
+
+DEAL:0::numproc=10
+DEAL:0::Check add (scalar): OK
+DEAL:0::Check add (vector): OK
+DEAL:0::Check add (factor, vector): OK
+DEAL:0::Check add (factor, vector, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector, factor, vector, factor, vector): OK
+DEAL:0::Check add (factor, vector_1, factor, vector_1): OK
+DEAL:0::Check operator * (scalar): OK
+DEAL:0::Check operator / (scalar): OK
+DEAL:0::Check scale (vector): OK
+DEAL:0::Check equ (factor, vector): OK
+DEAL:0::Check equ (factor, vector, factor, vector): OK
+DEAL:0::Check equ (factor, vector, factor, vector, factor, vector): OK
+DEAL:0::Check equ<float> (factor, vector): OK
diff --git a/tests/mpi/parallel_vector_11/ncpu_4/cmp/generic b/tests/mpi/parallel_vector_11/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..d6fa84e
--- /dev/null
@@ -0,0 +1,17 @@
+
+DEAL:0::numproc=4
+DEAL:0::Check add (scalar): OK
+DEAL:0::Check add (vector): OK
+DEAL:0::Check add (factor, vector): OK
+DEAL:0::Check add (factor, vector, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector, factor, vector): OK
+DEAL:0::Check sadd (factor, factor, vector, factor, vector, factor, vector): OK
+DEAL:0::Check add (factor, vector_1, factor, vector_1): OK
+DEAL:0::Check operator * (scalar): OK
+DEAL:0::Check operator / (scalar): OK
+DEAL:0::Check scale (vector): OK
+DEAL:0::Check equ (factor, vector): OK
+DEAL:0::Check equ (factor, vector, factor, vector): OK
+DEAL:0::Check equ (factor, vector, factor, vector, factor, vector): OK
+DEAL:0::Check equ<float> (factor, vector): OK
index cbc2f52508c4aba3c323e320e769e3de4e6236fc..6458d55a9f4a5758e1b9abd297145f1ecb59e88c 100644 (file)
@@ -1,6 +1,6 @@
 //----------------------------  trilinos_vector_equality_4.cc  ---------------------------
 //    $Id$
-//    Version: $Name$ 
+//    Version: $Name$
 //
 //    Copyright (C) 2004, 2005, 2008, 2010 by the deal.II authors
 //
 // check correct behaviour of Trilinos ghosted vectors
 // create distributed and copy into ghosted...
 
-#include "../tests.h" 
+#include "../tests.h"
 #include <deal.II/base/utilities.h>
 #include <deal.II/base/index_set.h>
-#include <deal.II/lac/trilinos_vector.h>    
+#include <deal.II/lac/trilinos_vector.h>
 #include <fstream>
 #include <iostream>
 #include <vector>
@@ -42,7 +42,6 @@ void test ()
 
   TrilinosWrappers::MPI::Vector v(local_active, MPI_COMM_WORLD);
   TrilinosWrappers::MPI::Vector v_tmp(local_relevant, MPI_COMM_WORLD);
-  
 
                                      // set local values
   v(myid*2)=myid*2.0;
@@ -56,17 +55,17 @@ void test ()
       deallog << myid*2 << ":" << v(myid*2) << std::endl;
       deallog << myid*2+1 << ":" << v(myid*2+1) << std::endl;
     }
-  
+
   Assert(v(myid*2) == myid*4.0, ExcInternalError());
   Assert(v(myid*2+1) == myid*4.0+2.0, ExcInternalError());
 
   v_tmp.reinit(v,false,true);
-  
+
                                   // check ghost values
   if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
     deallog << "ghost: " << v_tmp(1) << std::endl;
   Assert(v_tmp(1) == 2.0, ExcInternalError());
-  
+
   if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
     deallog << "OK" << std::endl;
 }
diff --git a/tests/mpi/trilinos_matvec_01.cc b/tests/mpi/trilinos_matvec_01.cc
new file mode 100644 (file)
index 0000000..2cf80eb
--- /dev/null
@@ -0,0 +1,140 @@
+//----------------------  trilinos_matvec_01.cc  ---------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2004, 2005, 2008, 2010, 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//----------------------  trilinos_matvec_01.cc  ---------------------------
+
+
+// Test whether TrilinosWrappers::SparseMatrix::vmult gives same result with
+// Trilinos vector and parallel distributed vector
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  const unsigned int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+  const unsigned int n_rows = 3;
+  const unsigned int n_cols = 4;
+
+  IndexSet row_partitioning (n_rows);
+  IndexSet col_partitioning (n_cols);
+
+  if (n_procs == 1)
+    {
+      row_partitioning.add_range(0, n_rows);
+      col_partitioning.add_range(0, n_cols);
+    }
+  else if (n_procs == 2)
+    {
+                                      // row_partitioning should be { [0, 2), [2, n_rows) }
+                                      // col_partitioning should be { [0, 2), [2, n_cols) }
+                                      // col_relevant_set should be { [0, 3), [1, n_cols) }
+      if (my_id == 0)
+       {
+         row_partitioning.add_range(0, 2);
+         col_partitioning.add_range(0, 2);
+       }
+      else if (my_id == 1)
+       {
+         row_partitioning.add_range(2, n_rows);
+         col_partitioning.add_range(2, n_cols);
+       }
+    }
+  else
+    Assert (false, ExcNotImplemented());
+
+  TrilinosWrappers::SparsityPattern sp (row_partitioning,
+                                       col_partitioning, MPI_COMM_WORLD);
+  if (my_id == 0)
+    {
+      sp.add (0, 0);
+      sp.add (0, 2);
+    }
+  if ((n_procs == 1) || (my_id == 1))
+    sp.add(2,3);
+  sp.compress();
+
+  TrilinosWrappers::SparseMatrix A;
+  A.clear ();
+  A.reinit (sp);
+  if (my_id == 0)
+    {
+      A.add (0, 0, 1);
+      A.add (0, 2, 1);
+    }
+  if ((n_procs == 1) || (my_id == 1))
+    A.add(2,3, 2.0);
+  A.compress();
+
+  TrilinosWrappers::MPI::Vector x, y;
+  x.reinit (col_partitioning, MPI_COMM_WORLD);
+  y.reinit (row_partitioning, MPI_COMM_WORLD);
+
+  parallel::distributed::Vector<double>
+    dx (col_partitioning, col_partitioning, MPI_COMM_WORLD),
+    dy (row_partitioning, row_partitioning, MPI_COMM_WORLD);
+
+  for (unsigned int i=0; i<col_partitioning.n_elements(); ++i)
+    {
+      const unsigned int global_index = col_partitioning.nth_index_in_set(i);
+      dx(global_index) = (double)rand()/RAND_MAX;
+      x(global_index)  = dx(global_index);
+    }
+  dy = 1.;
+
+  A.vmult (y, x);
+  A.vmult (dy, dx);
+
+                               // compare whether we got the same result
+                               // (should be no roundoff difference)
+  for (unsigned int i=0; i<row_partitioning.n_elements(); ++i)
+    {
+      const unsigned int global_index = col_partitioning.nth_index_in_set(i);
+      Assert (dy(global_index) == y(global_index), ExcInternalError());
+    }
+  if (my_id == 0) deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("trilinos_matvec_01").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/trilinos_matvec_01/ncpu_1/cmp/generic b/tests/mpi/trilinos_matvec_01/ncpu_1/cmp/generic
new file mode 100644 (file)
index 0000000..be8d055
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::OK
diff --git a/tests/mpi/trilinos_matvec_01/ncpu_2/cmp/generic b/tests/mpi/trilinos_matvec_01/ncpu_2/cmp/generic
new file mode 100644 (file)
index 0000000..be8d055
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::OK
diff --git a/tests/mpi/trilinos_matvec_02.cc b/tests/mpi/trilinos_matvec_02.cc
new file mode 100644 (file)
index 0000000..4209136
--- /dev/null
@@ -0,0 +1,140 @@
+//----------------------  trilinos_matvec_01.cc  ---------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2004, 2005, 2008, 2010, 2011 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//----------------------  trilinos_matvec_02.cc  ---------------------------
+
+
+// Test whether TrilinosWrappers::SparseMatrix::Tvmult gives same result with
+// Trilinos vector and parallel distributed vector
+
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_sparsity_pattern.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/parallel_vector.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  const unsigned int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+  const unsigned int n_rows = 3;
+  const unsigned int n_cols = 4;
+
+  IndexSet row_partitioning (n_rows);
+  IndexSet col_partitioning (n_cols);
+
+  if (n_procs == 1)
+    {
+      row_partitioning.add_range(0, n_rows);
+      col_partitioning.add_range(0, n_cols);
+    }
+  else if (n_procs == 2)
+    {
+                                      // row_partitioning should be { [0, 2), [2, n_rows) }
+                                      // col_partitioning should be { [0, 2), [2, n_cols) }
+                                      // col_relevant_set should be { [0, 3), [1, n_cols) }
+      if (my_id == 0)
+       {
+         row_partitioning.add_range(0, 2);
+         col_partitioning.add_range(0, 2);
+       }
+      else if (my_id == 1)
+       {
+         row_partitioning.add_range(2, n_rows);
+         col_partitioning.add_range(2, n_cols);
+       }
+    }
+  else
+    Assert (false, ExcNotImplemented());
+
+  TrilinosWrappers::SparsityPattern sp (row_partitioning,
+                                       col_partitioning, MPI_COMM_WORLD);
+  if (my_id == 0)
+    {
+      sp.add (0, 0);
+      sp.add (0, 2);
+    }
+  if ((n_procs == 1) || (my_id == 1))
+    sp.add(2,3);
+  sp.compress();
+
+  TrilinosWrappers::SparseMatrix A;
+  A.clear ();
+  A.reinit (sp);
+  if (my_id == 0)
+    {
+      A.add (0, 0, 1);
+      A.add (0, 2, 1);
+    }
+  if ((n_procs == 1) || (my_id == 1))
+    A.add(2,3, 2.0);
+  A.compress();
+
+  TrilinosWrappers::MPI::Vector x, y;
+  x.reinit (col_partitioning, MPI_COMM_WORLD);
+  y.reinit (row_partitioning, MPI_COMM_WORLD);
+
+  parallel::distributed::Vector<double>
+    dx (col_partitioning, col_partitioning, MPI_COMM_WORLD),
+    dy (row_partitioning, row_partitioning, MPI_COMM_WORLD);
+
+  for (unsigned int i=0; i<row_partitioning.n_elements(); ++i)
+    {
+      const unsigned int global_index = row_partitioning.nth_index_in_set(i);
+      dy(global_index) = (double)rand()/RAND_MAX;
+      y(global_index)  = dy(global_index);
+    }
+  dx = 1.;
+
+  A.Tvmult (x, y);
+  A.Tvmult (dx, dy);
+
+                               // compare whether we got the same result
+                               // (should be no roundoff difference)
+  for (unsigned int i=0; i<col_partitioning.n_elements(); ++i)
+    {
+      const unsigned int global_index = col_partitioning.nth_index_in_set(i);
+      Assert (dx(global_index) == x(global_index), ExcInternalError());
+    }
+  if (my_id == 0) deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
+
+  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+  unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("trilinos_matvec_02").c_str());
+      deallog.attach(logfile);
+      deallog << std::setprecision(4);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/trilinos_matvec_02/ncpu_1/cmp/generic b/tests/mpi/trilinos_matvec_02/ncpu_1/cmp/generic
new file mode 100644 (file)
index 0000000..be8d055
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::OK
diff --git a/tests/mpi/trilinos_matvec_02/ncpu_2/cmp/generic b/tests/mpi/trilinos_matvec_02/ncpu_2/cmp/generic
new file mode 100644 (file)
index 0000000..be8d055
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::OK

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.