class MPI_InitFinalize
{
public:
+ /**
+ * An enumeration data type that is used in one
+ * of the constructors. It determines how many threads
+ * the current process should be using. The options are:
+ *
+ * - @p one_thread_per_process : Each MPI process will be
+ * run as a single-threaded process. In other words, deal.II
+ * will not support running multiple tasks in parallel. This
+ * is often a useful default if you start as many MPI processes
+ * per node as there are processor cores. In this case, running
+ * each process with multiple threads would oversubscribe the
+ * available resources.
+ * - @p one_thread_per_core : This is the default behavior of
+ * sequential programs, where you want to run as many threads
+ * in parallel as there are cores on your machine. It would
+ * also be the appropriate behavior if you started only one
+ * MPI process per node of your cluster, even if these nodes
+ * have multiple cores.
+ * - @p optimal_number_of_threads : This selects the number of
+ * threads to runon this MPI process in such a way that all of
+ * the cores in your node are spoken for. In other words, if you
+ * have started one MPI process per node, this option is equivalent
+ * to @p one_thread_per_core. If you have started as many MPI
+ * processes per node as there are cores on each node, then
+ * this is equivalent to @p one_thread_per_process. On the
+ * other hand, if, for example, you start 4 MPI processes
+ * on each 16-core node, then this option will start 4 worker
+ * threads for each node. If you start 3 processes on an 8 core
+ * node, then they will start 3, 3 and 2 threads, respectively.
+ */
+ enum ThreadsPerMPIProcess
+ {
+ one_thread_per_process,
+ one_thread_per_core,
+ optimal_number_of_threads
+ };
+
/**
* Constructor. Takes the arguments from the command line (in case of
* MPI, the number of processes is specified there), and sets up a
char ** &argv) /*DEAL_II_DEPRECATED*/;
/**
- * Initialize MPI (and optionally PETSc) and set the number of threads
- * used by deal.II (and TBB) to the given parameter. If set to
+ * Initialize MPI (and, if deal.II was configured to use it, PETSc)
+ * and set the number of threads used by deal.II (via the underlying
+ * Threading Building Blocks library) to the given parameter. If set to
* numbers::invalid_unsigned_int, the number of threads is determined by
* TBB. When in doubt, set this value to 1 since MPI jobs are typically
* run in a way where one has one MPI process per available processor
* evaluates the environment variable DEAL_II_NUM_THREADS and the number
* of threads to be used will be the minimum of the argument passed here
* and the environment (if both are set).
+ *
+ * @param[in,out] argc A reference to the 'argc' argument passed to main. This
+ * argument is used to initialize MPI (and, possibly, PETSc) as they
+ * read arguments from the command line.
+ * @param[in,out] argv A reference to the 'argv' argument passed to main.
+ * @param[in] max_num_threads The maximal number of threads this MPI process
+ * should utilize.
*/
MPI_InitFinalize (int &argc,
char ** &argv,
const unsigned int max_num_threads);
+
+ /**
+ * Initialize MPI (and, if deal.II was configured to use it, PETSc)
+ * and set the number of threads used by deal.II (via the underlying
+ * Threading Building Blocks library) to using the policy described by
+ * the last argument.
+ *
+ * This function calls MultithreadInfo::set_thread_limit()
+ * unconditionally with @p max_num_threads . That function in turn also
+ * evaluates the environment variable DEAL_II_NUM_THREADS and the number
+ * of threads to be used will be the minimum of the number determined
+ * by the policy selected via the last argument and the environment
+ * (if both are set).
+ *
+ * @param[in,out] argc A reference to the 'argc' argument passed to main. This
+ * argument is used to initialize MPI (and, possibly, PETSc) as they
+ * read arguments from the command line.
+ * @param[in,out] argv A reference to the 'argv' argument passed to main.
+ * @param[in] threads_per_process A policy that describes how the number
+ * of threads this MPI process should use is determined. See the
+ * documentation of the ThreadsPerMPIProcess enum for a discussion of the
+ * possible options.
+ */
+ MPI_InitFinalize (int &argc,
+ char ** &argv,
+ const ThreadsPerMPIProcess threads_per_process);
+
/**
* Destructor. Calls <tt>MPI_Finalize()</tt> in case this class owns the
* MPI process.
/**
- * Called by the constructors.
+ * A common function called by all of the constructors.
*/
void do_init(int &argc,
- char ** &argv,
- const unsigned int max_num_threads);
+ char ** &argv);
};
namespace internal
:
owns_mpi (true)
{
- do_init(argc, argv, max_num_threads);
+ do_init(argc, argv);
+
+ // set maximum number of threads (also respecting the environment
+ // variable that the called function evaluates)
+ multithread_info.set_thread_limit(max_num_threads);
+ }
+
+
+
+ MPI_InitFinalize::MPI_InitFinalize (int &argc,
+ char ** &argv,
+ const ThreadsPerMPIProcess threads_per_process)
+ :
+ owns_mpi (true)
+ {
+ do_init(argc, argv);
+
+ // set maximum number of threads (also respecting the environment
+ // variable that the called function evaluates)
+ switch (threads_per_process)
+ {
+ case one_thread_per_process:
+ {
+ multithread_info.set_thread_limit(1);
+ break;
+ }
+ case one_thread_per_core:
+ {
+ // choose the maximal number of threads possible
+ multithread_info.set_thread_limit(numbers::invalid_unsigned_int);
+ break;
+ }
+
+ case optimal_number_of_threads:
+ {
+ // we need to figure out how many MPI processes there
+ // are on the current node, as well as how many CPU cores
+ // we have. for the first task, check what get_hostname()
+ // returns and then to an allgather so each processor
+ // gets the answer
+ //
+ // in calculating the length of the string, don't forget the
+ // terminating \0 on C-style strings
+ const std::string hostname = Utilities::System::get_hostname();
+ const unsigned int max_hostname_size = Utilities::MPI::max (hostname.size()+1,
+ MPI_COMM_WORLD);
+ std::vector<char> hostname_array (max_hostname_size);
+ std::copy (hostname.c_str(), hostname.c_str()+hostname.size()+1,
+ hostname_array.begin());
+
+ std::vector<char> all_hostnames(max_hostname_size *
+ MPI::n_mpi_processes(MPI_COMM_WORLD));
+ MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR,
+ &all_hostnames[0], max_hostname_size, MPI_CHAR,
+ MPI_COMM_WORLD);
+
+ // search how often our own hostname appears and the
+ // how-manyth instance the current process represents
+ unsigned int n_local_processes=0;
+ unsigned int nth_process_on_host = 0;
+ for (unsigned int i=0; i<MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
+ if (std::string (&all_hostnames[0] + i*max_hostname_size) == hostname)
+ {
+ ++n_local_processes;
+ if (i <= MPI::this_mpi_process (MPI_COMM_WORLD))
+ ++nth_process_on_host;
+ }
+ Assert (nth_process_on_host > 0, ExcInternalError());
+
+
+ // compute how many cores each process gets. if the number does
+ // not divide evenly, then we get one more core if we are
+ // among the first few processes
+ //
+ // if the number would be zero, round up to one since every
+ // process needs to have at least one thread
+ const unsigned int n_threads
+ = std::max(multithread_info.n_cpus / n_local_processes
+ +
+ (nth_process_on_host <= multithread_info.n_cpus % n_local_processes
+ ?
+ 1
+ :
+ 0),
+ 1U);
+
+ // finally set this number of threads
+ multithread_info.set_thread_limit(n_threads);
+ break;
+ }
+
+ default:
+ Assert (false, ExcNotImplemented());
+ }
}
:
owns_mpi (true)
{
- do_init(argc, argv, 1);
+ do_init(argc, argv);
+
+ // set maximum number of threads (also respecting the environment
+ // variable that the called function evaluates)
+ multithread_info.set_thread_limit(1);
}
void
MPI_InitFinalize::do_init(int &argc,
- char ** &argv,
- const unsigned int max_num_threads)
+ char ** &argv)
{
static bool constructor_has_already_run = false;
Assert (constructor_has_already_run == false,
#endif
constructor_has_already_run = true;
-
- // set maximum number of threads (also respecting the environment
- // variable that the called function evaluates)
- multithread_info.set_thread_limit(max_num_threads);
}