]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add collective MPI sums to tensorial objects 864/head
authorIan Rose <ian.r.rose@gmail.com>
Wed, 22 Apr 2015 23:20:57 +0000 (16:20 -0700)
committerIan Rose <ian.r.rose@gmail.com>
Fri, 24 Apr 2015 19:45:10 +0000 (12:45 -0700)
Fix indentation

indentation

Move functions to mpi.h, address other comments

Try to avoid variable length array declaration

Add entry to changelog

Fix slash

doc/news/changes.h
include/deal.II/base/mpi.h
tests/mpi/collective_tensor.cc [new file with mode: 0644]
tests/mpi/collective_tensor.mpirun=1.output [new file with mode: 0644]
tests/mpi/collective_tensor.mpirun=10.output [new file with mode: 0644]
tests/mpi/collective_tensor.mpirun=4.output [new file with mode: 0644]

index 7c10dafcb54a46d950011358ab00350c0e16c386..7d33088137c8660f3e819ef261e9cb7da9b9e9bf 100644 (file)
@@ -468,13 +468,19 @@ inconvenience this causes.
 <h3>Specific improvements</h3>
 
 <ol>
+  <li> New: There are now MPI sum functions for Tensors and SymmetricTensors
+  in the Utilities::MPI namespace.
+  <br>
+  (Ian Rose, 2015/04/24)
+  </li>
+
   <li> Fixed: project_boundary_values_curl_conforming_l2() produced incorrect
   results for non-uniform grids in 2D. Adjustment to the way 2D tangents to edges are
   computed fixes this.
   <br>
   (Ross Kynch, 2015/04/23)
   </li>
-  
+
   <li> Fixed: The TimerOutput class reported abnormally large cpu time when run
   with more than one process with MPI. Now this is fixed.
   <br>
index 34bbc4d462f31c8c24f7486a9157b64d6924b3ed..09550080bd68ecb68094c64e741e0e24cb21c394 100644 (file)
@@ -50,6 +50,10 @@ namespace MPI
 DEAL_II_NAMESPACE_OPEN
 
 
+//Forward type declarations to allow MPI sums over tensorial types
+template <int rank, int dim, typename Number> class Tensor;
+template <int rank, int dim, typename Number> class SymmetricTensor;
+
 namespace Utilities
 {
   /**
@@ -160,6 +164,28 @@ namespace Utilities
               const MPI_Comm &mpi_communicator,
               std::vector<T> &sums);
 
+    /**
+     * Perform an MPI sum of the entries of a symmetric tensor.
+     *
+     * @relates SymmetricTensor
+     */
+    template <int rank, int dim, typename Number>
+    inline
+    SymmetricTensor<rank,dim,Number>
+    sum (const SymmetricTensor<rank,dim,Number> &local,
+         const MPI_Comm &mpi_communicator);
+
+    /**
+     * Perform an MPI sum of the entries of a tensor.
+     *
+     * @relates Tensor
+     */
+    template <int rank, int dim, typename Number>
+    inline
+    Tensor<rank,dim,Number>
+    sum (const Tensor<rank,dim,Number> &local,
+         const MPI_Comm &mpi_communicator);
+
     /**
      * Return the maximum over all processors of the value @p t. This function
      * is collective over all processors given in the communicator. If deal.II
@@ -468,6 +494,49 @@ namespace Utilities
         }
     }
 
+    template <int rank, int dim, typename Number>
+    inline
+    Tensor<rank,dim,Number>
+    sum (const Tensor<rank,dim,Number> &local,
+         const MPI_Comm &mpi_communicator)
+    {
+      const unsigned int n_entries = Tensor<rank,dim,Number>::n_independent_components;
+      Number entries[ Tensor<rank,dim,Number>::n_independent_components ];
+
+      for (unsigned int i=0; i< n_entries; ++i)
+        entries[i] = local[ local.unrolled_to_component_indices(i) ];
+
+      Number global_entries[ Tensor<rank,dim,Number>::n_independent_components ];
+      dealii::Utilities::MPI::sum( entries, mpi_communicator, global_entries );
+
+      Tensor<rank,dim,Number> global;
+      for (unsigned int i=0; i< n_entries; ++i)
+        global[ global.unrolled_to_component_indices(i) ] = global_entries[i];
+
+      return global;
+    }
+
+    template <int rank, int dim, typename Number>
+    inline
+    SymmetricTensor<rank,dim,Number>
+    sum (const SymmetricTensor<rank,dim,Number> &local,
+         const MPI_Comm &mpi_communicator)
+    {
+      const unsigned int n_entries = SymmetricTensor<rank,dim,Number>::n_independent_components;
+      Number entries[ SymmetricTensor<rank,dim,Number>::n_independent_components ];
+
+      for (unsigned int i=0; i< n_entries; ++i)
+        entries[i] = local[ local.unrolled_to_component_indices(i) ];
+
+      Number global_entries[ SymmetricTensor<rank,dim,Number>::n_independent_components ];
+      dealii::Utilities::MPI::sum( entries, mpi_communicator, global_entries );
+
+      SymmetricTensor<rank,dim,Number> global;
+      for (unsigned int i=0; i< n_entries; ++i)
+        global[ global.unrolled_to_component_indices(i) ] = global_entries[i];
+
+      return global;
+    }
 
     template <typename T>
     inline
diff --git a/tests/mpi/collective_tensor.cc b/tests/mpi/collective_tensor.cc
new file mode 100644 (file)
index 0000000..53f1746
--- /dev/null
@@ -0,0 +1,120 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check Utilities::MPI::sum() for tensor objects
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/symmetric_tensor.h>
+#include <fstream>
+
+void test()
+{
+  Assert( Utilities::MPI::job_supports_mpi(), ExcInternalError());
+
+  unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+  const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+  if (myid==0)
+    deallog << "Running on " << numprocs << " CPU(s)." << std::endl;
+
+  Tensor<1,1> tensor_1;
+
+  tensor_1[0] = 1.0;
+
+  Tensor<1,1> result_1 = Utilities::MPI::sum( tensor_1, MPI_COMM_WORLD );
+
+  if (myid==0)
+    deallog << "1D tensor: " << result_1 << std::endl;
+
+  Tensor<1,2> tensor_2;
+
+  tensor_2[0] = 1.0;
+  tensor_2[1] = 2.0;
+
+  Tensor<1,2> result_2 = Utilities::MPI::sum( tensor_2, MPI_COMM_WORLD );
+
+  if (myid==0)
+    deallog << "2D tensor: " << result_2 << std::endl;
+
+  Tensor<1,3> tensor_3;
+
+  tensor_3[0] = 1.0;
+  tensor_3[1] = 2.0;
+  tensor_3[2] = 3.0;
+
+  Tensor<1,3> result_3 = Utilities::MPI::sum( tensor_3, MPI_COMM_WORLD );
+
+  if (myid==0)
+    deallog << "3D tensor: " << result_3 << std::endl;
+
+  Tensor<2,3> rank_2_tensor;
+
+  rank_2_tensor[0][0] = 1.0;
+  rank_2_tensor[0][1] = 2.0;
+  rank_2_tensor[0][2] = 3.0;
+  rank_2_tensor[1][0] = 4.0;
+  rank_2_tensor[1][1] = 5.0;
+  rank_2_tensor[1][2] = 6.0;
+  rank_2_tensor[2][0] = 7.0;
+  rank_2_tensor[2][1] = 8.0;
+  rank_2_tensor[2][2] = 9.0;
+
+  Tensor<2,3> result_rank_2 = Utilities::MPI::sum( rank_2_tensor, MPI_COMM_WORLD );
+
+  if (myid==0)
+    deallog << "Rank 2 tensor: " << result_rank_2 << std::endl;
+
+  SymmetricTensor<2,2> symmetric_tensor;
+
+  symmetric_tensor[0][0] = 1.0;
+  symmetric_tensor[0][1] = 2.0;
+  symmetric_tensor[1][1] = 3.0;
+
+  SymmetricTensor<2,2> result_symmetric = Utilities::MPI::sum( symmetric_tensor, MPI_COMM_WORLD );
+
+  if (myid==0)
+    deallog << "Symmetric tensor: " << result_symmetric << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+#ifdef DEAL_II_WITH_MPI
+  Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, numbers::invalid_unsigned_int);
+#else
+  (void)argc;
+  (void)argv;
+  compile_time_error;
+
+#endif
+
+  if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+    {
+      std::ofstream logfile("output");
+      deallog.attach(logfile);
+      deallog.depth_console(0);
+      deallog.threshold_double(1.e-10);
+
+      deallog.push("mpi");
+      test();
+      deallog.pop();
+    }
+  else
+    test();
+}
diff --git a/tests/mpi/collective_tensor.mpirun=1.output b/tests/mpi/collective_tensor.mpirun=1.output
new file mode 100644 (file)
index 0000000..6fd92b2
--- /dev/null
@@ -0,0 +1,7 @@
+
+DEAL:mpi::Running on 1 CPU(s).
+DEAL:mpi::1D tensor: 1.00000
+DEAL:mpi::2D tensor: 1.00000 2.00000
+DEAL:mpi::3D tensor: 1.00000 2.00000 3.00000
+DEAL:mpi::Rank 2 tensor: 1.00000 2.00000 3.00000 4.00000 5.00000 6.00000 7.00000 8.00000 9.00000
+DEAL:mpi::Symmetric tensor: 1.00000 2.00000 2.00000 3.00000
diff --git a/tests/mpi/collective_tensor.mpirun=10.output b/tests/mpi/collective_tensor.mpirun=10.output
new file mode 100644 (file)
index 0000000..236824d
--- /dev/null
@@ -0,0 +1,7 @@
+
+DEAL:mpi::Running on 10 CPU(s).
+DEAL:mpi::1D tensor: 10.0000
+DEAL:mpi::2D tensor: 10.0000 20.0000
+DEAL:mpi::3D tensor: 10.0000 20.0000 30.0000
+DEAL:mpi::Rank 2 tensor: 10.0000 20.0000 30.0000 40.0000 50.0000 60.0000 70.0000 80.0000 90.0000
+DEAL:mpi::Symmetric tensor: 10.0000 20.0000 20.0000 30.0000
diff --git a/tests/mpi/collective_tensor.mpirun=4.output b/tests/mpi/collective_tensor.mpirun=4.output
new file mode 100644 (file)
index 0000000..3e5d55c
--- /dev/null
@@ -0,0 +1,7 @@
+
+DEAL:mpi::Running on 4 CPU(s).
+DEAL:mpi::1D tensor: 4.00000
+DEAL:mpi::2D tensor: 4.00000 8.00000
+DEAL:mpi::3D tensor: 4.00000 8.00000 12.0000
+DEAL:mpi::Rank 2 tensor: 4.00000 8.00000 12.0000 16.0000 20.0000 24.0000 28.0000 32.0000 36.0000
+DEAL:mpi::Symmetric tensor: 4.00000 8.00000 8.00000 12.0000

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.