functions that were previously part of Utilities::System. Specifically, the following
functions were moved and in part renamed: Utilities::System::get_n_mpi_processes
is now Utilities::MPI::n_mpi_processes; Utilities::System::get_this_mpi_process
-is now Utilities::MPI::this_mpi_process; Utilities::System::compute_point_to_point_communication_pattern
+is now Utilities::MPI::this_mpi_process;
+Utilities::System::compute_point_to_point_communication_pattern
is now Utilities::MPI::compute_point_to_point_communication_pattern;
Utilities::System::duplicate_communicator
-is now Utilities::MPI::duplicate_communicator.
+is now Utilities::MPI::duplicate_communicator;
+Utilities::System::calculate_collective_mpi_min_max_avg
+is now Utilities::MPI::min_max_avg.
+In addition, some of the arguments of these functions have changed.
+<br>
+The previous functions should still be available, though their use
+is now deprecated.
<br>
(Wolfgang Bangerth, 2011/09/26)
* @ingroup utilities
*/
namespace MPI
- {
- /**
- * Return the sum over all processors of the value @p t. This function
- * is collective over all processors given in the communicator. If
- * deal.II is not configured for use of MPI, this function simply
- * returns the value of @p t. This function corresponds to the
- * <code>MPI_Allreduce</code> function, i.e. all processors receive
- * the result of this operation.
- *
- * @note This function is only implemented for certain template
- * arguments <code>T</code>, namely <code>float, double, int,
- * unsigned int</code>.
- */
- template <typename T>
- T sum (const T &t,
- const MPI_Comm &mpi_communicator);
-
+ {
/**
* Return the number of MPI processes
* there exist in the given communicator
* be destroyed using
* <code>MPI_Comm_free</code>.
*/
- MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
+ MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
+
+ /**
+ * Return the sum over all processors of the value @p t. This function
+ * is collective over all processors given in the communicator. If
+ * deal.II is not configured for use of MPI, this function simply
+ * returns the value of @p t. This function corresponds to the
+ * <code>MPI_Allreduce</code> function, i.e. all processors receive
+ * the result of this operation.
+ *
+ * @note This function is only implemented for certain template
+ * arguments <code>T</code>, namely <code>float, double, int,
+ * unsigned int</code>.
+ */
+ template <typename T>
+ T sum (const T &t,
+ const MPI_Comm &mpi_communicator);
+
+ /**
+ * Data structure to store the result of
+ * calculate_collective_mpi_min_max_avg.
+ */
+ struct MinMaxAvg
+ {
+ double sum;
+ double min;
+ double max;
+ unsigned int min_index;
+ unsigned int max_index;
+ double avg;
+ };
+
+ /**
+ * Returns sum, average, minimum,
+ * maximum, processor id of minimum and
+ * maximum as a collective operation of
+ * on the given MPI communicator @param
+ * mpi_communicator . Each processor's
+ * value is given in @param my_value and
+ * the result will be returned in @param
+ * result . The result is available on all
+ * machines.
+ */
+ MinMaxAvg
+ min_max_avg (const double my_value,
+ const MPI_Comm &mpi_communicator);
}
/**
* A namespace for utility functions that
bool job_supports_mpi ();
/**
- * This function is an alias for
+ * This function is an alias for
* Utilities::MPI::n_mpi_processes.
- *
+ *
* @deprecated
*/
unsigned int get_n_mpi_processes (const MPI_Comm &mpi_communicator);
/**
- * This function is an alias for
+ * This function is an alias for
* Utilities::MPI::this_mpi_process.
- *
+ *
* @deprecated
*/
unsigned int get_this_mpi_process (const MPI_Comm &mpi_communicator);
/**
- * This function is an alias for
+ * This function is an alias for
* Utilities::MPI::compute_point_to_point_communication_pattern.
- *
+ *
* @deprecated
*/
- using
+ using
Utilities::MPI::compute_point_to_point_communication_pattern;
/**
- * This function is an alias for
+ * This function is an alias for
* Utilities::MPI::duplicate_communicator.
- *
+ *
* @deprecated
*/
using Utilities::MPI::duplicate_communicator;
/**
- * Data structure to store the result of
- * calculate_collective_mpi_min_max_avg.
+ * An alias for Utilities::MPI::MinMaxAvg.
+ *
+ * @deprecated
*/
- struct MinMaxAvg
- {
- double sum;
- double min;
- double max;
- unsigned int min_index;
- unsigned int max_index;
- double avg;
- };
+ using Utilities::MPI::MinMaxAvg;
/**
- * Returns sum, average, minimum,
- * maximum, processor id of minimum and
- * maximum as a collective operation of
- * on the given MPI communicator @param
- * mpi_communicator . Each processor's
- * value is given in @param my_value and
- * the result will be returned in @param
- * result . The result is available on all
- * machines.
+ * An alias for Utilities::MPI::min_max_avg.
+ *
+ * @deprecated
*/
- void calculate_collective_mpi_min_max_avg(const MPI_Comm &mpi_communicator,
- const double my_value,
- MinMaxAvg & result);
+ void
+ calculate_collective_mpi_min_max_avg (const MPI_Comm &mpi_communicator,
+ const double my_value,
+ MinMaxAvg & result);
len = half;
}
}
-
-
+
+
namespace MPI
{
namespace internal
{
-#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
/**
* Return the corresponding MPI data type id for the argument given.
*/
return MPI_INT;
}
-
+
inline MPI_Datatype mpi_type_id (const unsigned int *)
{
return MPI_UNSIGNED;
}
#endif
}
-
+
template <typename T>
inline
T sum (const T &t,
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
T sum;
MPI_Allreduce (const_cast<void*>(static_cast<const void*>(&t)),
- &sum, 1, internal::mpi_type_id(&t), MPI_SUM,
+ &sum, 1, internal::mpi_type_id(&t), MPI_SUM,
mpi_communicator);
return sum;
#else
// behaviour in the call to
// GlobalAssemble().
double double_mode = mode;
- struct Utilities::System::MinMaxAvg result;
- Utilities::System::calculate_collective_mpi_min_max_avg(
- dynamic_cast<const Epetra_MpiComm*>(&vector_partitioner().Comm())->GetMpiComm(),
- double_mode,
- result);
+ Utilities::MPI::MinMaxAvg result
+ = Utilities::MPI::min_max_avg (double_mode,
+ dynamic_cast<const Epetra_MpiComm*>
+ (&vector_partitioner().Comm())->GetMpiComm());
Assert(result.max-result.min<1e-5,
ExcMessage ("Not all processors agree whether the last operation on "
"this vector was an addition or a set operation. This will "
#ifdef DEAL_II_COMPILER_SUPPORTS_MPI
if (sync_wall_time)
{
- Utilities::System
- ::calculate_collective_mpi_min_max_avg(mpi_communicator, time,
- this->mpi_data);
+ this->mpi_data
+ = Utilities::MPI::min_max_avg (time, mpi_communicator);
cumulative_wall_time += this->mpi_data.max;
}
return origins;
}
+
+ namespace
+ {
+ // custom MIP_Op for
+ // calculate_collective_mpi_min_max_avg
+ void max_reduce ( const void * in_lhs_,
+ void * inout_rhs_,
+ int * len,
+ MPI_Datatype * )
+ {
+ const MinMaxAvg * in_lhs = static_cast<const MinMaxAvg*>(in_lhs_);
+ MinMaxAvg * inout_rhs = static_cast<MinMaxAvg*>(inout_rhs_);
+
+ Assert(*len==1, ExcInternalError());
+
+ inout_rhs->sum += in_lhs->sum;
+ if (inout_rhs->min>in_lhs->min)
+ {
+ inout_rhs->min = in_lhs->min;
+ inout_rhs->min_index = in_lhs->min_index;
+ }
+ else if (inout_rhs->min == in_lhs->min)
+ { // choose lower cpu index when tied to make operator cumutative
+ if (inout_rhs->min_index > in_lhs->min_index)
+ inout_rhs->min_index = in_lhs->min_index;
+ }
+
+ if (inout_rhs->max < in_lhs->max)
+ {
+ inout_rhs->max = in_lhs->max;
+ inout_rhs->max_index = in_lhs->max_index;
+ }
+ else if (inout_rhs->max == in_lhs->max)
+ { // choose lower cpu index when tied to make operator cumutative
+ if (inout_rhs->max_index > in_lhs->max_index)
+ inout_rhs->max_index = in_lhs->max_index;
+ }
+ }
+ }
+
+
+
+ MinMaxAvg
+ min_max_avg(const double my_value,
+ const MPI_Comm &mpi_communicator)
+ {
+ MinMaxAvg result;
+
+ const unsigned int my_id
+ = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
+ const unsigned int numproc
+ = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+ MPI_Op op;
+ int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ MinMaxAvg in;
+ in.sum = in.min = in.max = my_value;
+ in.min_index = in.max_index = my_id;
+
+ MPI_Datatype type;
+ int lengths[]={3,2};
+ MPI_Aint displacements[]={0,offsetof(MinMaxAvg, min_index)};
+ MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
+
+ ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Type_commit(&type);
+ ierr = MPI_Allreduce ( &in, &result, 1, type, op, mpi_communicator );
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Type_free (&type);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ ierr = MPI_Op_free(&op);
+ AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
+
+ result.avg = result.sum / numproc;
+
+ return result;
+ }
+
#else
unsigned int get_n_mpi_processes (const MPI_Comm &)
{
return mpi_communicator;
}
-#endif
+
+
+
+
+ void min_max_avg(const double my_value,
+ const MPI_Comm &)
+ {
+ MinMaxAvg result;
+
+ result.sum = my_value;
+ result.avg = my_value;
+ result.min = my_value;
+ result.max = my_value;
+ result.min_index = 0;
+ result.max_index = 0;
+
+ return result;
+ }
+
+#endif
}
}
- namespace
- {
- // custom MIP_Op for
- // calculate_collective_mpi_min_max_avg
- void max_reduce ( const void * in_lhs_,
- void * inout_rhs_,
- int * len,
- MPI_Datatype * )
- {
- const MinMaxAvg * in_lhs = static_cast<const MinMaxAvg*>(in_lhs_);
- MinMaxAvg * inout_rhs = static_cast<MinMaxAvg*>(inout_rhs_);
-
- Assert(*len==1, ExcInternalError());
-
- inout_rhs->sum += in_lhs->sum;
- if (inout_rhs->min>in_lhs->min)
- {
- inout_rhs->min = in_lhs->min;
- inout_rhs->min_index = in_lhs->min_index;
- }
- else if (inout_rhs->min == in_lhs->min)
- { // choose lower cpu index when tied to make operator cumutative
- if (inout_rhs->min_index > in_lhs->min_index)
- inout_rhs->min_index = in_lhs->min_index;
- }
-
- if (inout_rhs->max < in_lhs->max)
- {
- inout_rhs->max = in_lhs->max;
- inout_rhs->max_index = in_lhs->max_index;
- }
- else if (inout_rhs->max == in_lhs->max)
- { // choose lower cpu index when tied to make operator cumutative
- if (inout_rhs->max_index > in_lhs->max_index)
- inout_rhs->max_index = in_lhs->max_index;
- }
- }
- }
-
-
-
- void calculate_collective_mpi_min_max_avg(const MPI_Comm &mpi_communicator,
- double my_value,
- MinMaxAvg & result)
- {
- const unsigned int my_id
- = dealii::Utilities::MPI::this_mpi_process(mpi_communicator);
- const unsigned int numproc
- = dealii::Utilities::MPI::n_mpi_processes(mpi_communicator);
-
- MPI_Op op;
- int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- MinMaxAvg in;
- in.sum = in.min = in.max = my_value;
- in.min_index = in.max_index = my_id;
-
- MPI_Datatype type;
- int lengths[]={3,2};
- MPI_Aint displacements[]={0,offsetof(MinMaxAvg, min_index)};
- MPI_Datatype types[]={MPI_DOUBLE, MPI_INT};
-
- ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- ierr = MPI_Type_commit(&type);
- ierr = MPI_Allreduce ( &in, &result, 1, type, op, mpi_communicator );
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- ierr = MPI_Type_free (&type);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- ierr = MPI_Op_free(&op);
- AssertThrow(ierr == MPI_SUCCESS, ExcInternalError());
-
- result.avg = result.sum / numproc;
- }
-
-
-
#else
bool job_supports_mpi ()
return false;
}
-
-
-
-
- void calculate_collective_mpi_min_max_avg(const MPI_Comm &,
- double my_value,
- MinMaxAvg & result)
- {
- result.sum = my_value;
- result.avg = my_value;
- result.min = my_value;
- result.max = my_value;
- result.min_index = 0;
- result.max_index = 0;
- }
-
-
#endif
#endif
}
-
+
unsigned int get_n_mpi_processes (const MPI_Comm &mpi_communicator)
{
return MPI::n_mpi_processes (mpi_communicator);
unsigned int get_this_mpi_process (const MPI_Comm &mpi_communicator)
{
return MPI::this_mpi_process (mpi_communicator);
- }
+ }
+
+
+
+ void calculate_collective_mpi_min_max_avg(const MPI_Comm &mpi_communicator,
+ double my_value,
+ MinMaxAvg & result)
+ {
+ result = Utilities::MPI::min_max_avg (my_value,
+ mpi_communicator);
+ }
+
+
}
// $Id$
// Version: $Name$
//
-// Copyright (C) 2009, 2010 by the deal.II authors
+// Copyright (C) 2009, 2010, 2011 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
//---------------------------------------------------------------------------
-// check Utilities::System::calculate_collective_mpi_min_max_avg()
+// check Utilities::MPI::min_max_avg()
#include "../tests.h"
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
#include <fstream>
-void print_it(Utilities::System::MinMaxAvg & result)
+void print_it(Utilities::MPI::MinMaxAvg & result)
{
deallog << "sum: " << result.sum
<< " avg: " << result.avg
unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
const unsigned int numprocs = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
-
+
if (myid==0)
deallog << "Running on " << numprocs << " CPU(s)." << std::endl;
Utilities::System::MinMaxAvg result;
+
double value = 0.0;
- Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD,
- value,
- result);
+ result = Utilities::MPI::min_max_avg(value, MPI_COMM_WORLD);
if (myid==0)
- print_it(result);
+ print_it(result);
Assert(result.sum == 0.0, ExcInternalError());
Assert(result.min == 0.0, ExcInternalError());
Assert(result.max == 0.0, ExcInternalError());
Assert(result.avg == 0.0, ExcInternalError());
value = 1.0;
- Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD,
- value,
- result);
+ result = Utilities::MPI::min_max_avg(value, MPI_COMM_WORLD);
if (myid==0)
print_it(result);
Assert(result.sum == numprocs, ExcInternalError());
value = 0.0;
if (myid==0)
value = 1.0;
-
- Utilities::System::calculate_collective_mpi_min_max_avg(MPI_COMM_WORLD,
- value,
- result);
+
+ result = Utilities::MPI::min_max_avg(value, MPI_COMM_WORLD);
if (myid==0)
print_it(result);
Assert(result.sum == 1.0, ExcInternalError());