namespace MPI
{
#ifdef DEAL_II_WITH_MPI
- // Unfortunately, we have to work
- // around an oddity in the way PETSc
- // and some gcc versions interact. If
- // we use PETSc's MPI dummy
- // implementation, it expands the
- // calls to the two MPI functions
- // basically as ``(n_jobs=1, 0)'',
- // i.e. it assigns the number one to
- // the variable holding the number of
- // jobs, and then uses the comma
- // operator to let the entire
- // expression have the value zero. The
- // latter is important, since
- // ``MPI_Comm_size'' returns an error
- // code that we may want to check (we
- // don't here, but one could in
- // principle), and the trick with the
- // comma operator makes sure that both
- // the number of jobs is correctly
- // assigned, and the return value is
- // zero. Unfortunately, if some recent
- // versions of gcc detect that the
- // comma expression just stands by
- // itself, i.e. the result is not
- // assigned to another variable, then
- // they warn ``right-hand operand of
- // comma has no effect''. This
- // unwanted side effect can be
- // suppressed by casting the result of
- // the entire expression to type
- // ``void'' -- not beautiful, but
- // helps calming down unwarranted
- // compiler warnings...
+ // Unfortunately, we have to work around an oddity in the way PETSc and
+ // some gcc versions interact. If we use PETSc's MPI dummy implementation,
+ // it expands the calls to the two MPI functions basically as ``(n_jobs=1,
+ // 0)'', i.e. it assigns the number one to the variable holding the number
+ // of jobs, and then uses the comma operator to let the entire expression
+ // have the value zero. The latter is important, since ``MPI_Comm_size''
+ // returns an error code that we may want to check (we don't here, but one
+ // could in principle), and the trick with the comma operator makes sure
+ // that both the number of jobs is correctly assigned, and the return
+ // value is zero. Unfortunately, if some recent versions of gcc detect
+ // that the comma expression just stands by itself, i.e. the result is not
+ // assigned to another variable, then they warn ``right-hand operand of
+ // comma has no effect''. This unwanted side effect can be suppressed by
+ // casting the result of the entire expression to type ``void'' -- not
+ // beautiful, but helps calming down unwarranted compiler warnings...
unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator)
{
int n_jobs=1;
}
- // let all processors
- // communicate the maximal
- // number of destinations they
- // have
+ // let all processors communicate the maximal number of destinations
+ // they have
const unsigned int max_n_destinations
= Utilities::MPI::max (destinations.size(), mpi_comm);
// all processes have nothing to send/receive:
return std::vector<unsigned int>();
- // now that we know the number
- // of data packets every
- // processor wants to send, set
- // up a buffer with the maximal
- // size and copy our
- // destinations in there,
- // padded with -1's
+ // now that we know the number of data packets every processor wants to
+ // send, set up a buffer with the maximal size and copy our destinations
+ // in there, padded with -1's
std::vector<unsigned int> my_destinations(max_n_destinations,
numbers::invalid_unsigned_int);
std::copy (destinations.begin(), destinations.end(),
my_destinations.begin());
- // now exchange these (we could
- // communicate less data if we
- // used MPI_Allgatherv, but
- // we'd have to communicate
- // my_n_destinations to all
- // processors in this case,
- // which is more expensive than
- // the reduction operation
- // above in MPI_Allreduce)
+ // now exchange these (we could communicate less data if we used
+ // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
+ // processors in this case, which is more expensive than the reduction
+ // operation above in MPI_Allreduce)
std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
&all_destinations[0], max_n_destinations, MPI_UNSIGNED,
mpi_comm);
- // now we know who is going to
- // communicate with
- // whom. collect who is going
- // to communicate with us!
+ // now we know who is going to communicate with whom. collect who is
+ // going to communicate with us!
std::vector<unsigned int> origins;
for (unsigned int i=0; i<n_procs; ++i)
for (unsigned int j=0; j<max_n_destinations; ++j)
AssertThrow (mpi_err == 0,
ExcMessage ("MPI could not be initialized."));
- // disable for now because at least some implementations always return MPI_THREAD_SINGLE.
+ // disable for now because at least some implementations always return
+ // MPI_THREAD_SINGLE.
//Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
// ExcMessage("MPI reports that we are not allowed to use multiple threads."));
#else
- // make sure the compiler doesn't warn
- // about these variables
+ // make sure the compiler doesn't warn about these variables
(void)argc;
(void)argv;
#endif
if (max_num_threads != numbers::invalid_unsigned_int)
{
// set maximum number of threads (also respecting the environment
- // variable that the called function evaluates) based on what
- // the user asked
+ // variable that the called function evaluates) based on what the
+ // user asked
MultithreadInfo::set_thread_limit(max_num_threads);
}
else
// user wants automatic choice
{
#ifdef DEAL_II_WITH_MPI
- // we need to figure out how many MPI processes there
- // are on the current node, as well as how many CPU cores
- // we have. for the first task, check what get_hostname()
- // returns and then to an allgather so each processor
- // gets the answer
+ // we need to figure out how many MPI processes there are on the
+ // current node, as well as how many CPU cores we have. for the
+ // first task, check what get_hostname() returns and then to an
+ // allgather so each processor gets the answer
//
// in calculating the length of the string, don't forget the
// terminating \0 on C-style strings
&all_hostnames[0], max_hostname_size, MPI_CHAR,
MPI_COMM_WORLD);
- // search how often our own hostname appears and the
- // how-manyth instance the current process represents
+ // search how often our own hostname appears and the how-manyth
+ // instance the current process represents
unsigned int n_local_processes=0;
unsigned int nth_process_on_host = 0;
for (unsigned int i=0; i<MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
Assert (nth_process_on_host > 0, ExcInternalError());
- // compute how many cores each process gets. if the number does
- // not divide evenly, then we get one more core if we are
- // among the first few processes
+ // compute how many cores each process gets. if the number does not
+ // divide evenly, then we get one more core if we are among the
+ // first few processes
//
- // if the number would be zero, round up to one since every
- // process needs to have at least one thread
+ // if the number would be zero, round up to one since every process
+ // needs to have at least one thread
const unsigned int n_threads
= std::max(MultithreadInfo::n_cores() / n_local_processes
+
MPI_InitFinalize::~MPI_InitFinalize()
{
- // make memory pool release all PETSc/Trilinos/MPI-based vectors that are no
- // longer used at this point. this is relevant because the
- // static object destructors run for these vectors at the end of
- // the program would run after MPI_Finalize is called, leading
- // to errors
+ // make memory pool release all PETSc/Trilinos/MPI-based vectors that
+ // are no longer used at this point. this is relevant because the static
+ // object destructors run for these vectors at the end of the program
+ // would run after MPI_Finalize is called, leading to errors
#ifdef DEAL_II_WITH_MPI
// Start with the deal.II MPI vectors (need to do this before finalizing
#endif
- // Now deal with PETSc (with or without MPI). Only delete the vectors if finalize hasn't
- // been called yet, otherwise this will lead to errors.
+ // Now deal with PETSc (with or without MPI). Only delete the vectors if
+ // finalize hasn't been called yet, otherwise this will lead to errors.
#ifdef DEAL_II_WITH_PETSC
if ((PetscInitializeCalled == PETSC_TRUE)
&&
// only MPI_Finalize if we are running with MPI. We also need to do this
- // when running PETSc, because we initialize MPI ourselves before calling
- // PetscInitialize
+ // when running PETSc, because we initialize MPI ourselves before
+ // calling PetscInitialize
#ifdef DEAL_II_WITH_MPI
if (job_supports_mpi() == true)
{