]> https://gitweb.dealii.org/ - dealii-svn.git/commitdiff
New tests.
authorbangerth <bangerth@0785d39b-7218-0410-832d-ea1e28bc413d>
Sat, 25 May 2013 12:46:37 +0000 (12:46 +0000)
committerbangerth <bangerth@0785d39b-7218-0410-832d-ea1e28bc413d>
Sat, 25 May 2013 12:46:37 +0000 (12:46 +0000)
git-svn-id: https://svn.dealii.org/trunk@29594 0785d39b-7218-0410-832d-ea1e28bc413d

tests/mpi/petsc_distribute_01_block.cc [new file with mode: 0644]
tests/mpi/petsc_distribute_01_block/ncpu_1/cmp/generic [new file with mode: 0644]
tests/mpi/petsc_distribute_01_block/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/petsc_distribute_01_block/ncpu_2/cmp/generic [new file with mode: 0644]
tests/mpi/petsc_distribute_01_block/ncpu_4/cmp/generic [new file with mode: 0644]
tests/mpi/trilinos_distribute_01_block.cc [new file with mode: 0644]
tests/mpi/trilinos_distribute_01_block/ncpu_1/cmp/generic [new file with mode: 0644]
tests/mpi/trilinos_distribute_01_block/ncpu_10/cmp/generic [new file with mode: 0644]
tests/mpi/trilinos_distribute_01_block/ncpu_2/cmp/generic [new file with mode: 0644]
tests/mpi/trilinos_distribute_01_block/ncpu_4/cmp/generic [new file with mode: 0644]

diff --git a/tests/mpi/petsc_distribute_01_block.cc b/tests/mpi/petsc_distribute_01_block.cc
new file mode 100644 (file)
index 0000000..996d6ae
--- /dev/null
@@ -0,0 +1,199 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2009, 2010, 2012, 2013 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+// check ConstraintMatrix.distribute() for a petsc vector
+//
+// like _01, but for a block vector. this has the additional complication that
+// (at a global level) the set of indices owned by this processor is not
+// contiguous
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/petsc_parallel_block_vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+
+#include <fstream>
+#include <sstream>
+
+
+
+void test()
+{
+  const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+  const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+  // create a vector that consists of elements indexed from 0 to n
+  PETScWrappers::MPI::BlockVector vec(2, MPI_COMM_WORLD, 100 * n_processes, 100);
+  vec.block(0).reinit(MPI_COMM_WORLD, 100 * n_processes, 100);
+  vec.block(1).reinit(MPI_COMM_WORLD, 100 * n_processes, 100);
+  vec.collect_sizes();
+  Assert (vec.block(0).local_size() == 100, ExcInternalError());
+  Assert (vec.block(0).local_range().first == 100*myid, ExcInternalError());
+  Assert (vec.block(0).local_range().second == 100*myid+100, ExcInternalError());
+  Assert (vec.block(1).local_size() == 100, ExcInternalError());
+  Assert (vec.block(1).local_range().first == 100*myid, ExcInternalError());
+  Assert (vec.block(1).local_range().second == 100*myid+100, ExcInternalError());
+
+  for (unsigned int i=vec.block(0).local_range().first; i<vec.block(0).local_range().second; ++i)
+    vec.block(0)(i) = i;
+  for (unsigned int i=vec.block(1).local_range().first; i<vec.block(1).local_range().second; ++i)
+    vec.block(1)(i) = i;
+  vec.compress();
+
+  // verify correctness so far
+  {
+    double exact_l1 = 0;
+    for (unsigned int i=0; i<vec.block(0).size(); ++i)
+      exact_l1 += 2*i;
+    Assert (vec.l1_norm() == exact_l1, ExcInternalError());
+  }
+
+
+  // create a ConstraintMatrix with a range that exceeds the locally
+  // owned range by 50 on each side
+  IndexSet locally_relevant_range (vec.size());
+  locally_relevant_range.add_range (std::max<int> (100*myid-50, 0),
+                                   std::min (100*myid+150, vec.block(0).size()));
+  locally_relevant_range.add_range (vec.block(0).size()+std::max<int> (100*myid-50, 0),
+                                   vec.block(0).size()+std::min (100*myid+150, vec.block(0).size()));
+  ConstraintMatrix cm (locally_relevant_range);
+
+  // add constraints that constrain an element in the middle of the
+  // local range of each processor against an element outside, both in
+  // the ghost range before and after
+  //
+  // note that we tell each processor about all constraints, but most
+  // of them will throw away this information since it is not for a
+  // DoF inside the locally relevant range
+  for (unsigned int p=0; p<n_processes; ++p)
+    {
+      if ((p != 0) && locally_relevant_range.is_element (p*100+10))
+       {
+         cm.add_line (p*100+10);
+         cm.add_entry (p*100+10,
+                       p*100-25,
+                       1);
+         cm.add_line (vec.block(0).size()+p*100+10);
+         cm.add_entry (vec.block(0).size()+p*100+10,
+                       vec.block(0).size()+p*100-25,
+                       1);
+       }
+
+      if ((p != n_processes-1) && locally_relevant_range.is_element (p*100+90))
+       {
+         cm.add_line (p*100+90);
+         cm.add_entry (p*100+90,
+                       p*100+105,
+                       1);
+         cm.add_line (vec.block(0).size()+p*100+90);
+         cm.add_entry (vec.block(0).size()+p*100+90,
+                       vec.block(0).size()+p*100+105,
+                       1);
+       }
+    }
+  cm.close ();
+
+  // now distribute these constraints
+  cm.distribute (vec);
+
+  // verify correctness
+  vec.compress ();
+
+  if (myid != 0)
+    Assert (vec(vec.block(0).local_range().first+10) == vec.block(0).local_range().first-25,
+           ExcInternalError());
+
+  if (myid != n_processes-1)
+    Assert (vec(vec.block(0).local_range().first+90) == vec.block(0).local_range().first+105,
+           ExcInternalError());
+
+  if (myid != 0)
+    Assert (vec(vec.block(0).size()+vec.block(1).local_range().first+10) == vec.block(1).local_range().first-25,
+           ExcInternalError());
+
+  if (myid != n_processes-1)
+    Assert (vec(vec.block(0).size()+vec.block(1).local_range().first+90) == vec.block(1).local_range().first+105,
+           ExcInternalError());
+
+  
+  for (unsigned int i=vec.block(0).local_range().first; i<vec.block(0).local_range().second; ++i)
+    {
+      if ((i != vec.block(0).local_range().first+10)
+         &&
+         (i != vec.block(0).local_range().first+90))
+       {
+         double val = vec.block(0)(i);
+         Assert (std::fabs(val - i) <= 1e-6, ExcInternalError());
+       }
+    }
+  for (unsigned int i=vec.block(1).local_range().first; i<vec.block(1).local_range().second; ++i)
+    {
+      if ((i != vec.block(1).local_range().first+10)
+         &&
+         (i != vec.block(1).local_range().first+90))
+       {
+         double val = vec.block(1)(i);
+         Assert (std::fabs(val - i) <= 1e-6, ExcInternalError());
+       }
+    }
+
+  {
+    double exact_l1 = 0;
+
+    // add up original values of vector entries
+    for (unsigned int i=0; i<vec.block(0).size(); ++i)
+      exact_l1 += i;
+
+    // but then correct for the constrained values
+    for (unsigned int p=0; p<n_processes; ++p)
+      {
+       if (p != 0)
+         exact_l1 = exact_l1 - (p*100+10) + (p*100-25);
+       if (p != n_processes-1)
+         exact_l1 = exact_l1 - (p*100+90) + (p*100+105);
+      }
+
+    const double l1_norm = vec.l1_norm();
+    Assert (l1_norm == 2*exact_l1, ExcInternalError());
+
+    // generate output. write the norm divided by two so that it matches the
+    // results of the _01 test
+    if (myid == 0)
+      deallog << "Norm = " << l1_norm/2 << std::endl;
+  }
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("petsc_distribute_01_block").c_str());
+      deallog.attach(logfile);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/petsc_distribute_01_block/ncpu_1/cmp/generic b/tests/mpi/petsc_distribute_01_block/ncpu_1/cmp/generic
new file mode 100644 (file)
index 0000000..2547450
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::Norm = 4950.00
diff --git a/tests/mpi/petsc_distribute_01_block/ncpu_10/cmp/generic b/tests/mpi/petsc_distribute_01_block/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..464154b
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::Norm = 499320.
diff --git a/tests/mpi/petsc_distribute_01_block/ncpu_2/cmp/generic b/tests/mpi/petsc_distribute_01_block/ncpu_2/cmp/generic
new file mode 100644 (file)
index 0000000..a5749c4
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::Norm = 19880.0
diff --git a/tests/mpi/petsc_distribute_01_block/ncpu_4/cmp/generic b/tests/mpi/petsc_distribute_01_block/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..848c4e7
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::Norm = 79740.0
diff --git a/tests/mpi/trilinos_distribute_01_block.cc b/tests/mpi/trilinos_distribute_01_block.cc
new file mode 100644 (file)
index 0000000..f2e19ae
--- /dev/null
@@ -0,0 +1,203 @@
+//---------------------------------------------------------------------------
+//    $Id$
+//    Version: $Name$
+//
+//    Copyright (C) 2009, 2010, 2012, 2013 by the deal.II authors
+//
+//    This file is subject to QPL and may not be  distributed
+//    without copyright and license information. Please refer
+//    to the file deal.II/doc/license.html for the  text  and
+//    further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+// check ConstraintMatrix.distribute() for a trilinos vector
+//
+// like _01, but for a block vector. this has the additional complication that
+// (at a global level) the set of indices owned by this processor is not
+// contiguous
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/trilinos_parallel_block_vector.h>
+#include <deal.II/lac/constraint_matrix.h>
+
+#include <fstream>
+#include <sstream>
+
+
+
+void test()
+{
+  const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+  const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+  // create a vector that consists of elements indexed from 0 to n
+  TrilinosWrappers::MPI::BlockVector vec(2);
+  {
+    IndexSet is (100*n_processes);
+    is.add_range (100*myid, 100*myid+100);
+    vec.block(0).reinit (is, MPI_COMM_WORLD);
+    vec.block(1).reinit (is, MPI_COMM_WORLD);
+  }
+  vec.collect_sizes();
+  Assert (vec.block(0).local_size() == 100, ExcInternalError());
+  Assert (vec.block(0).local_range().first == 100*myid, ExcInternalError());
+  Assert (vec.block(0).local_range().second == 100*myid+100, ExcInternalError());
+  Assert (vec.block(1).local_size() == 100, ExcInternalError());
+  Assert (vec.block(1).local_range().first == 100*myid, ExcInternalError());
+  Assert (vec.block(1).local_range().second == 100*myid+100, ExcInternalError());
+
+  for (unsigned int i=vec.block(0).local_range().first; i<vec.block(0).local_range().second; ++i)
+    vec.block(0)(i) = i;
+  for (unsigned int i=vec.block(1).local_range().first; i<vec.block(1).local_range().second; ++i)
+    vec.block(1)(i) = i;
+  vec.compress();
+
+  // verify correctness so far
+  {
+    double exact_l1 = 0;
+    for (unsigned int i=0; i<vec.block(0).size(); ++i)
+      exact_l1 += 2*i;
+    Assert (vec.l1_norm() == exact_l1, ExcInternalError());
+  }
+
+
+  // create a ConstraintMatrix with a range that exceeds the locally
+  // owned range by 50 on each side
+  IndexSet locally_relevant_range (vec.size());
+  locally_relevant_range.add_range (std::max<int> (100*myid-50, 0),
+                                   std::min (100*myid+150, vec.block(0).size()));
+  locally_relevant_range.add_range (vec.block(0).size()+std::max<int> (100*myid-50, 0),
+                                   vec.block(0).size()+std::min (100*myid+150, vec.block(0).size()));
+  ConstraintMatrix cm (locally_relevant_range);
+
+  // add constraints that constrain an element in the middle of the
+  // local range of each processor against an element outside, both in
+  // the ghost range before and after
+  //
+  // note that we tell each processor about all constraints, but most
+  // of them will throw away this information since it is not for a
+  // DoF inside the locally relevant range
+  for (unsigned int p=0; p<n_processes; ++p)
+    {
+      if ((p != 0) && locally_relevant_range.is_element (p*100+10))
+       {
+         cm.add_line (p*100+10);
+         cm.add_entry (p*100+10,
+                       p*100-25,
+                       1);
+         cm.add_line (vec.block(0).size()+p*100+10);
+         cm.add_entry (vec.block(0).size()+p*100+10,
+                       vec.block(0).size()+p*100-25,
+                       1);
+       }
+
+      if ((p != n_processes-1) && locally_relevant_range.is_element (p*100+90))
+       {
+         cm.add_line (p*100+90);
+         cm.add_entry (p*100+90,
+                       p*100+105,
+                       1);
+         cm.add_line (vec.block(0).size()+p*100+90);
+         cm.add_entry (vec.block(0).size()+p*100+90,
+                       vec.block(0).size()+p*100+105,
+                       1);
+       }
+    }
+  cm.close ();
+
+  // now distribute these constraints
+  cm.distribute (vec);
+
+  // verify correctness
+  vec.compress ();
+
+  if (myid != 0)
+    Assert (vec(vec.block(0).local_range().first+10) == vec.block(0).local_range().first-25,
+           ExcInternalError());
+
+  if (myid != n_processes-1)
+    Assert (vec(vec.block(0).local_range().first+90) == vec.block(0).local_range().first+105,
+           ExcInternalError());
+
+  if (myid != 0)
+    Assert (vec(vec.block(0).size()+vec.block(1).local_range().first+10) == vec.block(1).local_range().first-25,
+           ExcInternalError());
+
+  if (myid != n_processes-1)
+    Assert (vec(vec.block(0).size()+vec.block(1).local_range().first+90) == vec.block(1).local_range().first+105,
+           ExcInternalError());
+
+  
+  for (unsigned int i=vec.block(0).local_range().first; i<vec.block(0).local_range().second; ++i)
+    {
+      if ((i != vec.block(0).local_range().first+10)
+         &&
+         (i != vec.block(0).local_range().first+90))
+       {
+         double val = vec.block(0)(i);
+         Assert (std::fabs(val - i) <= 1e-6, ExcInternalError());
+       }
+    }
+  for (unsigned int i=vec.block(1).local_range().first; i<vec.block(1).local_range().second; ++i)
+    {
+      if ((i != vec.block(1).local_range().first+10)
+         &&
+         (i != vec.block(1).local_range().first+90))
+       {
+         double val = vec.block(1)(i);
+         Assert (std::fabs(val - i) <= 1e-6, ExcInternalError());
+       }
+    }
+
+  {
+    double exact_l1 = 0;
+
+    // add up original values of vector entries
+    for (unsigned int i=0; i<vec.block(0).size(); ++i)
+      exact_l1 += i;
+
+    // but then correct for the constrained values
+    for (unsigned int p=0; p<n_processes; ++p)
+      {
+       if (p != 0)
+         exact_l1 = exact_l1 - (p*100+10) + (p*100-25);
+       if (p != n_processes-1)
+         exact_l1 = exact_l1 - (p*100+90) + (p*100+105);
+      }
+
+    const double l1_norm = vec.l1_norm();
+    Assert (l1_norm == 2*exact_l1, ExcInternalError());
+
+    // generate output. write the norm divided by two so that it matches the
+    // results of the _01 test
+    if (myid == 0)
+      deallog << "Norm = " << l1_norm/2 << std::endl;
+  }
+}
+
+
+int main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+  unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+
+  deallog.push(Utilities::int_to_string(myid));
+
+  if (myid == 0)
+    {
+      std::ofstream logfile(output_file_for_mpi("trilinos_distribute_01_block").c_str());
+      deallog.attach(logfile);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      test();
+    }
+  else
+    test();
+
+}
diff --git a/tests/mpi/trilinos_distribute_01_block/ncpu_1/cmp/generic b/tests/mpi/trilinos_distribute_01_block/ncpu_1/cmp/generic
new file mode 100644 (file)
index 0000000..2547450
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::Norm = 4950.00
diff --git a/tests/mpi/trilinos_distribute_01_block/ncpu_10/cmp/generic b/tests/mpi/trilinos_distribute_01_block/ncpu_10/cmp/generic
new file mode 100644 (file)
index 0000000..464154b
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::Norm = 499320.
diff --git a/tests/mpi/trilinos_distribute_01_block/ncpu_2/cmp/generic b/tests/mpi/trilinos_distribute_01_block/ncpu_2/cmp/generic
new file mode 100644 (file)
index 0000000..a5749c4
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::Norm = 19880.0
diff --git a/tests/mpi/trilinos_distribute_01_block/ncpu_4/cmp/generic b/tests/mpi/trilinos_distribute_01_block/ncpu_4/cmp/generic
new file mode 100644 (file)
index 0000000..848c4e7
--- /dev/null
@@ -0,0 +1,2 @@
+
+DEAL:0::Norm = 79740.0

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.