From ba004e657637a41c6074f7469e4b229d49652ef7 Mon Sep 17 00:00:00 2001 From: David Wells Date: Sat, 28 May 2016 22:20:20 -0400 Subject: [PATCH] Re-wrap some comments. --- source/base/mpi.cc | 133 +++++++++++++++++---------------------------- 1 file changed, 50 insertions(+), 83 deletions(-) diff --git a/source/base/mpi.cc b/source/base/mpi.cc index a3d5262122..858a621f4f 100644 --- a/source/base/mpi.cc +++ b/source/base/mpi.cc @@ -58,39 +58,21 @@ namespace Utilities namespace MPI { #ifdef DEAL_II_WITH_MPI - // Unfortunately, we have to work - // around an oddity in the way PETSc - // and some gcc versions interact. If - // we use PETSc's MPI dummy - // implementation, it expands the - // calls to the two MPI functions - // basically as ``(n_jobs=1, 0)'', - // i.e. it assigns the number one to - // the variable holding the number of - // jobs, and then uses the comma - // operator to let the entire - // expression have the value zero. The - // latter is important, since - // ``MPI_Comm_size'' returns an error - // code that we may want to check (we - // don't here, but one could in - // principle), and the trick with the - // comma operator makes sure that both - // the number of jobs is correctly - // assigned, and the return value is - // zero. Unfortunately, if some recent - // versions of gcc detect that the - // comma expression just stands by - // itself, i.e. the result is not - // assigned to another variable, then - // they warn ``right-hand operand of - // comma has no effect''. This - // unwanted side effect can be - // suppressed by casting the result of - // the entire expression to type - // ``void'' -- not beautiful, but - // helps calming down unwarranted - // compiler warnings... + // Unfortunately, we have to work around an oddity in the way PETSc and + // some gcc versions interact. If we use PETSc's MPI dummy implementation, + // it expands the calls to the two MPI functions basically as ``(n_jobs=1, + // 0)'', i.e. it assigns the number one to the variable holding the number + // of jobs, and then uses the comma operator to let the entire expression + // have the value zero. The latter is important, since ``MPI_Comm_size'' + // returns an error code that we may want to check (we don't here, but one + // could in principle), and the trick with the comma operator makes sure + // that both the number of jobs is correctly assigned, and the return + // value is zero. Unfortunately, if some recent versions of gcc detect + // that the comma expression just stands by itself, i.e. the result is not + // assigned to another variable, then they warn ``right-hand operand of + // comma has no effect''. This unwanted side effect can be suppressed by + // casting the result of the entire expression to type ``void'' -- not + // beautiful, but helps calming down unwarranted compiler warnings... unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator) { int n_jobs=1; @@ -133,10 +115,8 @@ namespace Utilities } - // let all processors - // communicate the maximal - // number of destinations they - // have + // let all processors communicate the maximal number of destinations + // they have const unsigned int max_n_destinations = Utilities::MPI::max (destinations.size(), mpi_comm); @@ -144,36 +124,25 @@ namespace Utilities // all processes have nothing to send/receive: return std::vector(); - // now that we know the number - // of data packets every - // processor wants to send, set - // up a buffer with the maximal - // size and copy our - // destinations in there, - // padded with -1's + // now that we know the number of data packets every processor wants to + // send, set up a buffer with the maximal size and copy our destinations + // in there, padded with -1's std::vector my_destinations(max_n_destinations, numbers::invalid_unsigned_int); std::copy (destinations.begin(), destinations.end(), my_destinations.begin()); - // now exchange these (we could - // communicate less data if we - // used MPI_Allgatherv, but - // we'd have to communicate - // my_n_destinations to all - // processors in this case, - // which is more expensive than - // the reduction operation - // above in MPI_Allreduce) + // now exchange these (we could communicate less data if we used + // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all + // processors in this case, which is more expensive than the reduction + // operation above in MPI_Allreduce) std::vector all_destinations (max_n_destinations * n_procs); MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED, &all_destinations[0], max_n_destinations, MPI_UNSIGNED, mpi_comm); - // now we know who is going to - // communicate with - // whom. collect who is going - // to communicate with us! + // now we know who is going to communicate with whom. collect who is + // going to communicate with us! std::vector origins; for (unsigned int i=0; i 0, ExcInternalError()); - // compute how many cores each process gets. if the number does - // not divide evenly, then we get one more core if we are - // among the first few processes + // compute how many cores each process gets. if the number does not + // divide evenly, then we get one more core if we are among the + // first few processes // - // if the number would be zero, round up to one since every - // process needs to have at least one thread + // if the number would be zero, round up to one since every process + // needs to have at least one thread const unsigned int n_threads = std::max(MultithreadInfo::n_cores() / n_local_processes + @@ -462,11 +430,10 @@ namespace Utilities MPI_InitFinalize::~MPI_InitFinalize() { - // make memory pool release all PETSc/Trilinos/MPI-based vectors that are no - // longer used at this point. this is relevant because the - // static object destructors run for these vectors at the end of - // the program would run after MPI_Finalize is called, leading - // to errors + // make memory pool release all PETSc/Trilinos/MPI-based vectors that + // are no longer used at this point. this is relevant because the static + // object destructors run for these vectors at the end of the program + // would run after MPI_Finalize is called, leading to errors #ifdef DEAL_II_WITH_MPI // Start with the deal.II MPI vectors (need to do this before finalizing @@ -490,8 +457,8 @@ namespace Utilities #endif - // Now deal with PETSc (with or without MPI). Only delete the vectors if finalize hasn't - // been called yet, otherwise this will lead to errors. + // Now deal with PETSc (with or without MPI). Only delete the vectors if + // finalize hasn't been called yet, otherwise this will lead to errors. #ifdef DEAL_II_WITH_PETSC if ((PetscInitializeCalled == PETSC_TRUE) && @@ -518,8 +485,8 @@ namespace Utilities // only MPI_Finalize if we are running with MPI. We also need to do this - // when running PETSc, because we initialize MPI ourselves before calling - // PetscInitialize + // when running PETSc, because we initialize MPI ourselves before + // calling PetscInitialize #ifdef DEAL_II_WITH_MPI if (job_supports_mpi() == true) { -- 2.39.5