<h3>Specific improvements</h3>
<ol>
+ <li> New: There are now MPI sum functions for Tensors and SymmetricTensors
+ in the Utilities::MPI namespace.
+ <br>
+ (Ian Rose, 2015/04/24)
+ </li>
+
<li> Fixed: project_boundary_values_curl_conforming_l2() produced incorrect
results for non-uniform grids in 2D. Adjustment to the way 2D tangents to edges are
computed fixes this.
<br>
(Ross Kynch, 2015/04/23)
</li>
-
+
<li> Fixed: The TimerOutput class reported abnormally large cpu time when run
with more than one process with MPI. Now this is fixed.
<br>
DEAL_II_NAMESPACE_OPEN
+//Forward type declarations to allow MPI sums over tensorial types
+template <int rank, int dim, typename Number> class Tensor;
+template <int rank, int dim, typename Number> class SymmetricTensor;
+
namespace Utilities
{
/**
const MPI_Comm &mpi_communicator,
std::vector<T> &sums);
+ /**
+ * Perform an MPI sum of the entries of a symmetric tensor.
+ *
+ * @relates SymmetricTensor
+ */
+ template <int rank, int dim, typename Number>
+ inline
+ SymmetricTensor<rank,dim,Number>
+ sum (const SymmetricTensor<rank,dim,Number> &local,
+ const MPI_Comm &mpi_communicator);
+
+ /**
+ * Perform an MPI sum of the entries of a tensor.
+ *
+ * @relates Tensor
+ */
+ template <int rank, int dim, typename Number>
+ inline
+ Tensor<rank,dim,Number>
+ sum (const Tensor<rank,dim,Number> &local,
+ const MPI_Comm &mpi_communicator);
+
/**
* Return the maximum over all processors of the value @p t. This function
* is collective over all processors given in the communicator. If deal.II
}
}
+ template <int rank, int dim, typename Number>
+ inline
+ Tensor<rank,dim,Number>
+ sum (const Tensor<rank,dim,Number> &local,
+ const MPI_Comm &mpi_communicator)
+ {
+ const unsigned int n_entries = Tensor<rank,dim,Number>::n_independent_components;
+ Number entries[ Tensor<rank,dim,Number>::n_independent_components ];
+
+ for (unsigned int i=0; i< n_entries; ++i)
+ entries[i] = local[ local.unrolled_to_component_indices(i) ];
+
+ Number global_entries[ Tensor<rank,dim,Number>::n_independent_components ];
+ dealii::Utilities::MPI::sum( entries, mpi_communicator, global_entries );
+
+ Tensor<rank,dim,Number> global;
+ for (unsigned int i=0; i< n_entries; ++i)
+ global[ global.unrolled_to_component_indices(i) ] = global_entries[i];
+
+ return global;
+ }
+
+ template <int rank, int dim, typename Number>
+ inline
+ SymmetricTensor<rank,dim,Number>
+ sum (const SymmetricTensor<rank,dim,Number> &local,
+ const MPI_Comm &mpi_communicator)
+ {
+ const unsigned int n_entries = SymmetricTensor<rank,dim,Number>::n_independent_components;
+ Number entries[ SymmetricTensor<rank,dim,Number>::n_independent_components ];
+
+ for (unsigned int i=0; i< n_entries; ++i)
+ entries[i] = local[ local.unrolled_to_component_indices(i) ];
+
+ Number global_entries[ SymmetricTensor<rank,dim,Number>::n_independent_components ];
+ dealii::Utilities::MPI::sum( entries, mpi_communicator, global_entries );
+
+ SymmetricTensor<rank,dim,Number> global;
+ for (unsigned int i=0; i< n_entries; ++i)
+ global[ global.unrolled_to_component_indices(i) ] = global_entries[i];
+
+ return global;
+ }
template <typename T>
inline
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check Utilities::MPI::sum() for tensor objects
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/symmetric_tensor.h>
+#include <fstream>
+
+void test()
+{
+ Assert( Utilities::MPI::job_supports_mpi(), ExcInternalError());
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ if (myid==0)
+ deallog << "Running on " << numprocs << " CPU(s)." << std::endl;
+
+ Tensor<1,1> tensor_1;
+
+ tensor_1[0] = 1.0;
+
+ Tensor<1,1> result_1 = Utilities::MPI::sum( tensor_1, MPI_COMM_WORLD );
+
+ if (myid==0)
+ deallog << "1D tensor: " << result_1 << std::endl;
+
+ Tensor<1,2> tensor_2;
+
+ tensor_2[0] = 1.0;
+ tensor_2[1] = 2.0;
+
+ Tensor<1,2> result_2 = Utilities::MPI::sum( tensor_2, MPI_COMM_WORLD );
+
+ if (myid==0)
+ deallog << "2D tensor: " << result_2 << std::endl;
+
+ Tensor<1,3> tensor_3;
+
+ tensor_3[0] = 1.0;
+ tensor_3[1] = 2.0;
+ tensor_3[2] = 3.0;
+
+ Tensor<1,3> result_3 = Utilities::MPI::sum( tensor_3, MPI_COMM_WORLD );
+
+ if (myid==0)
+ deallog << "3D tensor: " << result_3 << std::endl;
+
+ Tensor<2,3> rank_2_tensor;
+
+ rank_2_tensor[0][0] = 1.0;
+ rank_2_tensor[0][1] = 2.0;
+ rank_2_tensor[0][2] = 3.0;
+ rank_2_tensor[1][0] = 4.0;
+ rank_2_tensor[1][1] = 5.0;
+ rank_2_tensor[1][2] = 6.0;
+ rank_2_tensor[2][0] = 7.0;
+ rank_2_tensor[2][1] = 8.0;
+ rank_2_tensor[2][2] = 9.0;
+
+ Tensor<2,3> result_rank_2 = Utilities::MPI::sum( rank_2_tensor, MPI_COMM_WORLD );
+
+ if (myid==0)
+ deallog << "Rank 2 tensor: " << result_rank_2 << std::endl;
+
+ SymmetricTensor<2,2> symmetric_tensor;
+
+ symmetric_tensor[0][0] = 1.0;
+ symmetric_tensor[0][1] = 2.0;
+ symmetric_tensor[1][1] = 3.0;
+
+ SymmetricTensor<2,2> result_symmetric = Utilities::MPI::sum( symmetric_tensor, MPI_COMM_WORLD );
+
+ if (myid==0)
+ deallog << "Symmetric tensor: " << result_symmetric << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+#ifdef DEAL_II_WITH_MPI
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, numbers::invalid_unsigned_int);
+#else
+ (void)argc;
+ (void)argv;
+ compile_time_error;
+
+#endif
+
+ if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("mpi");
+ test();
+ deallog.pop();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:mpi::Running on 1 CPU(s).
+DEAL:mpi::1D tensor: 1.00000
+DEAL:mpi::2D tensor: 1.00000 2.00000
+DEAL:mpi::3D tensor: 1.00000 2.00000 3.00000
+DEAL:mpi::Rank 2 tensor: 1.00000 2.00000 3.00000 4.00000 5.00000 6.00000 7.00000 8.00000 9.00000
+DEAL:mpi::Symmetric tensor: 1.00000 2.00000 2.00000 3.00000
--- /dev/null
+
+DEAL:mpi::Running on 10 CPU(s).
+DEAL:mpi::1D tensor: 10.0000
+DEAL:mpi::2D tensor: 10.0000 20.0000
+DEAL:mpi::3D tensor: 10.0000 20.0000 30.0000
+DEAL:mpi::Rank 2 tensor: 10.0000 20.0000 30.0000 40.0000 50.0000 60.0000 70.0000 80.0000 90.0000
+DEAL:mpi::Symmetric tensor: 10.0000 20.0000 20.0000 30.0000
--- /dev/null
+
+DEAL:mpi::Running on 4 CPU(s).
+DEAL:mpi::1D tensor: 4.00000
+DEAL:mpi::2D tensor: 4.00000 8.00000
+DEAL:mpi::3D tensor: 4.00000 8.00000 12.0000
+DEAL:mpi::Rank 2 tensor: 4.00000 8.00000 12.0000 16.0000 20.0000 24.0000 28.0000 32.0000 36.0000
+DEAL:mpi::Symmetric tensor: 4.00000 8.00000 8.00000 12.0000