<h3>Specific improvements</h3>
<ol>
+<li> New: The function Utilities::System::calculate_collective_mpi_sum
+function can be used to compute the sum of values over a number of
+MPI processes without having to deal with the underlying MPI functions.
+<br>
+(Wolfgang Bangerth, 2011/09/25)
+
<li> New: The CellAccessor::is_locally_owned function is a shortcut
for the <code>!cell-@>is_ghost() && !cell-@>is_artificial()</code> pattern
found in many places when dealing with distributed meshes.
#if defined(DEAL_II_COMPILER_SUPPORTS_MPI) || defined(DEAL_II_USE_PETSC)
#include <mpi.h>
#else
-typedef int MPI_Comm;
+
+extern boost::mutex m;typedef int MPI_Comm;
#endif
#ifdef DEAL_II_USE_TRILINOS
*/
MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
+
+ /**
+ * Return the sum over all processors of the value @p t. This function
+ * is collective over all processors given in the communicator. If
+ * deal.II is not configured for use of MPI, this function simply
+ * returns the value of @p t. This function corresponds to the
+ * <code>MPI_Allreduce</code> function, i.e. all processors receive
+ * the result of this operation.
+ *
+ * @note This function is only implemented for certain template
+ * arguments <code>T</code>, namely <code>float, double, int,
+ * unsigned int</code>.
+ */
+ template <typename T>
+ T calculate_collective_mpi_sum (const T &t,
+ const MPI_Comm &mpi_communicator);
/**
* Data structure to store the result of
len = half;
}
}
+
+
+ namespace System
+ {
+ namespace internal
+ {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ /**
+ * Return the corresponding MPI data type id for the argument given.
+ */
+ inline MPI_Datatype mpi_type_id (const int *)
+ {
+ return MPI_INT;
+ }
+
+
+ inline MPI_Datatype mpi_type_id (const unsigned int *)
+ {
+ return MPI_UNSIGNED;
+ }
+
+
+ inline MPI_Datatype mpi_type_id (const float *)
+ {
+ return MPI_FLOAT;
+ }
+
+
+ inline MPI_Datatype mpi_type_id (const double *)
+ {
+ return MPI_DOUBLE;
+ }
+#endif
+ }
+
+ template <typename T>
+ inline
+ T calculate_collective_mpi_sum (const T &t,
+ const MPI_Comm &mpi_communicator)
+ {
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ T sum;
+ MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&t)),
+ &sum, 1, internal::mpi_type_id(&t), MPI_SUM,
+ mpi_communicator);
+ return sum;
+#else
+ (void)mpi_communicator;
+ return t;
+#endif
+ }
+ }
}
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id$
+// Version: $Name$
+//
+// Copyright (C) 2009, 2010, 2011 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+// check Utilities::System::calculate_collective_mpi_sum()
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <fstream>
+
+void test()
+{
+ unsigned int myid = Utilities::System::get_this_mpi_process (MPI_COMM_WORLD);
+ const unsigned int numprocs = Utilities::System::get_n_mpi_processes (MPI_COMM_WORLD);
+
+ int int_sum, uint_sum, double_sum, float_sum;
+ int_sum
+ =
+ Utilities::System::calculate_collective_mpi_sum<int>(myid,
+ MPI_COMM_WORLD);
+ uint_sum
+ =
+ Utilities::System::calculate_collective_mpi_sum<unsigned int>(myid,
+ MPI_COMM_WORLD);
+ float_sum
+ =
+ Utilities::System::calculate_collective_mpi_sum<float>(myid,
+ MPI_COMM_WORLD);
+ double_sum
+ =
+ Utilities::System::calculate_collective_mpi_sum<double>(myid,
+ MPI_COMM_WORLD);
+
+ if (myid==0)
+ deallog << int_sum << ' ' << uint_sum << ' ' << double_sum << ' ' << float_sum << std::endl;
+}
+
+
+int main(int argc, char *argv[])
+{
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ Utilities::System::MPI_InitFinalize mpi (argc, argv);
+#else
+ (void)argc;
+ (void)argv;
+ compile_time_error;
+
+#endif
+
+ if (Utilities::System::get_this_mpi_process (MPI_COMM_WORLD) == 0)
+ {
+ std::ofstream logfile(output_file_for_mpi("collective_02").c_str());
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ deallog.push("mpi");
+ test();
+ deallog.pop();
+ }
+ else
+ test();
+}
--- /dev/null
+
+DEAL:mpi::Running on 1 CPU(s).
+DEAL:mpi::sum: 0 avg: 0 min: 0 @0 max: 0 @0
+DEAL:mpi::sum: 1.00000 avg: 1.00000 min: 1.00000 @0 max: 1.00000 @0
+DEAL:mpi::sum: 1.00000 avg: 1.00000 min: 1.00000 @0 max: 1.00000 @0
+DEAL:mpi::done
--- /dev/null
+
+DEAL:mpi::45 45 45 45
--- /dev/null
+
+DEAL:mpi::6 6 6 6