From: Marc Fehling Date: Fri, 6 Dec 2019 15:06:30 +0000 (+0100) Subject: parallel::distributed::GridRefinement: Made internal functions accessible. X-Git-Tag: v9.2.0-rc1~773^2~2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ff071c1b32f9ee3f955b59200371fff141c12205;p=dealii.git parallel::distributed::GridRefinement: Made internal functions accessible. --- diff --git a/include/deal.II/distributed/grid_refinement.h b/include/deal.II/distributed/grid_refinement.h index b4d39e8696..2d6b6fd25d 100644 --- a/include/deal.II/distributed/grid_refinement.h +++ b/include/deal.II/distributed/grid_refinement.h @@ -28,6 +28,61 @@ DEAL_II_NAMESPACE_OPEN +namespace internal +{ + namespace parallel + { + namespace distributed + { + namespace GridRefinement + { + /** + * Compute the global max and min of the criteria vector. These are + * returned only on the processor with rank zero, all others get a pair + * of zeros. + */ + template + std::pair + compute_global_min_and_max_at_root( + const dealii::Vector &criteria, + MPI_Comm mpi_communicator); + + namespace RefineAndCoarsenFixedNumber + { + /** + * Compute a threshold value so that exactly n_target_cells have a + * value that is larger. + */ + template + number + compute_threshold(const dealii::Vector & criteria, + const std::pair &global_min_and_max, + const unsigned int n_target_cells, + MPI_Comm mpi_communicator); + } // namespace RefineAndCoarsenFixedNumber + + namespace RefineAndCoarsenFixedFraction + { + /** + * Compute a threshold value so that the error accumulated over all + * criteria[i] so that + * criteria[i] > threshold + * is larger than target_error. + */ + template + number + compute_threshold(const dealii::Vector & criteria, + const std::pair &global_min_and_max, + const double target_error, + MPI_Comm mpi_communicator); + } // namespace RefineAndCoarsenFixedFraction + } // namespace GridRefinement + } // namespace distributed + } // namespace parallel +} // namespace internal + + + namespace parallel { namespace distributed diff --git a/include/deal.II/multigrid/mg_transfer.h b/include/deal.II/multigrid/mg_transfer.h index d5537f61df..518d244145 100644 --- a/include/deal.II/multigrid/mg_transfer.h +++ b/include/deal.II/multigrid/mg_transfer.h @@ -20,6 +20,8 @@ #include +#include + #include #include @@ -82,12 +84,11 @@ namespace internal const SparsityPatternType &sp, DoFHandlerType & dh) { - const parallel::TriangulationBase - *dist_tria = dynamic_cast< - const parallel::TriangulationBase *>( - &(dh.get_triangulation())); + const dealii::parallel::TriangulationBase + *dist_tria = dynamic_cast *>(&(dh.get_triangulation())); MPI_Comm communicator = dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF; @@ -115,12 +116,11 @@ namespace internal const SparsityPatternType &sp, DoFHandlerType & dh) { - const parallel::TriangulationBase - *dist_tria = dynamic_cast< - const parallel::TriangulationBase *>( - &(dh.get_triangulation())); + const dealii::parallel::TriangulationBase + *dist_tria = dynamic_cast *>(&(dh.get_triangulation())); MPI_Comm communicator = dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF; matrix.reinit(dh.locally_owned_mg_dofs(level + 1), @@ -149,12 +149,11 @@ namespace internal const SparsityPatternType &sp, DoFHandlerType & dh) { - const parallel::TriangulationBase - *dist_tria = dynamic_cast< - const parallel::TriangulationBase *>( - &(dh.get_triangulation())); + const dealii::parallel::TriangulationBase + *dist_tria = dynamic_cast *>(&(dh.get_triangulation())); MPI_Comm communicator = dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF; matrix.reinit(dh.locally_owned_mg_dofs(level + 1), @@ -182,12 +181,11 @@ namespace internal const SparsityPatternType &sp, DoFHandlerType & dh) { - const parallel::TriangulationBase - *dist_tria = dynamic_cast< - const parallel::TriangulationBase *>( - &(dh.get_triangulation())); + const dealii::parallel::TriangulationBase + *dist_tria = dynamic_cast *>(&(dh.get_triangulation())); MPI_Comm communicator = dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF; matrix.reinit(dh.locally_owned_mg_dofs(level + 1), @@ -244,12 +242,11 @@ namespace internal const SparsityPatternType &sp, const DoFHandlerType & dh) { - const parallel::TriangulationBase - *dist_tria = dynamic_cast< - const parallel::TriangulationBase *>( - &(dh.get_triangulation())); + const dealii::parallel::TriangulationBase + *dist_tria = dynamic_cast *>(&(dh.get_triangulation())); MPI_Comm communicator = dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF; // Reinit PETSc matrix diff --git a/source/distributed/grid_refinement.cc b/source/distributed/grid_refinement.cc index 47e9d439d2..661780ecaf 100644 --- a/source/distributed/grid_refinement.cc +++ b/source/distributed/grid_refinement.cc @@ -62,37 +62,6 @@ namespace } - /** - * Compute the global max and min of the criteria vector. These are returned - * only on the processor with rank zero, all others get a pair of zeros. - */ - template - std::pair - compute_global_min_and_max_at_root(const dealii::Vector &criteria, - MPI_Comm mpi_communicator) - { - // we'd like to compute the global max and min from the local ones in one - // MPI communication. we can do that by taking the elementwise minimum of - // the local min and the negative maximum over all processors - - const double local_min = min_element(criteria), - local_max = max_element(criteria); - double comp[2] = {local_min, -local_max}; - double result[2] = {0, 0}; - - // compute the minimum on processor zero - const int ierr = - MPI_Reduce(comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator); - AssertThrowMPI(ierr); - - // make sure only processor zero got something - if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0) - Assert((result[0] == 0) && (result[1] == 0), ExcInternalError()); - - return std::make_pair(result[0], -result[1]); - } - - /** * Compute the global sum over the elements of the vectors passed to this @@ -211,210 +180,245 @@ namespace cell->clear_coarsen_flag(); } } +} // namespace - namespace RefineAndCoarsenFixedNumber +namespace internal +{ + namespace parallel { - /** - * Compute a threshold value so that exactly n_target_cells have a value - * that is larger. - */ - template - number - compute_threshold(const dealii::Vector & criteria, - const std::pair &global_min_and_max, - const unsigned int n_target_cells, - MPI_Comm mpi_communicator) + namespace distributed { - double interesting_range[2] = {global_min_and_max.first, - global_min_and_max.second}; - adjust_interesting_range(interesting_range); - - const unsigned int master_mpi_rank = 0; - unsigned int iteration = 0; - - do + namespace GridRefinement + { + template + std::pair + compute_global_min_and_max_at_root( + const dealii::Vector &criteria, + MPI_Comm mpi_communicator) { - int ierr = MPI_Bcast(interesting_range, - 2, - MPI_DOUBLE, - master_mpi_rank, - mpi_communicator); + // we'd like to compute the global max and min from the local ones in + // one MPI communication. we can do that by taking the elementwise + // minimum of the local min and the negative maximum over all + // processors + + const double local_min = min_element(criteria), + local_max = max_element(criteria); + double comp[2] = {local_min, -local_max}; + double result[2] = {0, 0}; + + // compute the minimum on processor zero + const int ierr = MPI_Reduce( + comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator); AssertThrowMPI(ierr); - if (interesting_range[0] == interesting_range[1]) - return interesting_range[0]; - - const double test_threshold = - (interesting_range[0] > 0 ? - std::sqrt(interesting_range[0] * interesting_range[1]) : - (interesting_range[0] + interesting_range[1]) / 2); - - // count how many of our own elements would be above this threshold - // and then add to it the number for all the others - unsigned int my_count = - std::count_if(criteria.begin(), - criteria.end(), - [test_threshold](const double c) { - return c > test_threshold; - }); - - unsigned int total_count = 0; - - ierr = MPI_Reduce(&my_count, - &total_count, - 1, - MPI_UNSIGNED, - MPI_SUM, - master_mpi_rank, - mpi_communicator); - AssertThrowMPI(ierr); + // make sure only processor zero got something + if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0) + Assert((result[0] == 0) && (result[1] == 0), ExcInternalError()); - // now adjust the range. if we have too many cells, we take the upper - // half of the previous range, otherwise the lower half. if we have - // hit the right number, then set the range to the exact value. - // slave nodes also update their own interesting_range, however their - // results are not significant since the values will be overwritten by - // MPI_Bcast from the master node in next loop. - if (total_count > n_target_cells) - interesting_range[0] = test_threshold; - else if (total_count < n_target_cells) - interesting_range[1] = test_threshold; - else - interesting_range[0] = interesting_range[1] = test_threshold; - - // terminate the iteration after 25 go-arounds. this is necessary - // because oftentimes error indicators on cells have exactly the - // same value, and so there may not be a particular value that cuts - // the indicators in such a way that we can achieve the desired - // number of cells. using a maximal number of iterations means that - // we terminate the iteration after a fixed number N of steps if the - // indicators were perfectly badly distributed, and we make at most - // a mistake of 1/2^N in the number of cells flagged if indicators - // are perfectly equidistributed - ++iteration; - if (iteration == 25) - interesting_range[0] = interesting_range[1] = test_threshold; + return std::make_pair(result[0], -result[1]); } - while (true); - - Assert(false, ExcInternalError()); - return -1; - } - } // namespace RefineAndCoarsenFixedNumber - - namespace RefineAndCoarsenFixedFraction - { - /** - * Compute a threshold value so that the error - * accumulated over all criteria[i] so that - * criteria[i] > threshold - * is larger than target_error. - */ - template - number - compute_threshold(const dealii::Vector & criteria, - const std::pair &global_min_and_max, - const double target_error, - MPI_Comm mpi_communicator) - { - double interesting_range[2] = {global_min_and_max.first, - global_min_and_max.second}; - adjust_interesting_range(interesting_range); - - const unsigned int master_mpi_rank = 0; - unsigned int iteration = 0; - do + namespace RefineAndCoarsenFixedNumber { - int ierr = MPI_Bcast(interesting_range, - 2, - MPI_DOUBLE, - master_mpi_rank, - mpi_communicator); - AssertThrowMPI(ierr); + template + number + compute_threshold(const dealii::Vector & criteria, + const std::pair &global_min_and_max, + const unsigned int n_target_cells, + MPI_Comm mpi_communicator) + { + double interesting_range[2] = {global_min_and_max.first, + global_min_and_max.second}; + adjust_interesting_range(interesting_range); + + const unsigned int master_mpi_rank = 0; + unsigned int iteration = 0; + + do + { + int ierr = MPI_Bcast(interesting_range, + 2, + MPI_DOUBLE, + master_mpi_rank, + mpi_communicator); + AssertThrowMPI(ierr); + + if (interesting_range[0] == interesting_range[1]) + return interesting_range[0]; + + const double test_threshold = + (interesting_range[0] > 0 ? + std::sqrt(interesting_range[0] * interesting_range[1]) : + (interesting_range[0] + interesting_range[1]) / 2); + + // count how many of our own elements would be above this + // threshold and then add to it the number for all the others + unsigned int my_count = + std::count_if(criteria.begin(), + criteria.end(), + [test_threshold](const double c) { + return c > test_threshold; + }); + + unsigned int total_count = 0; + + ierr = MPI_Reduce(&my_count, + &total_count, + 1, + MPI_UNSIGNED, + MPI_SUM, + master_mpi_rank, + mpi_communicator); + AssertThrowMPI(ierr); + + // now adjust the range. if we have too many cells, we take the + // upper half of the previous range, otherwise the lower half. + // if we have hit the right number, then set the range to the + // exact value. slave nodes also update their own + // interesting_range, however their results are not significant + // since the values will be overwritten by MPI_Bcast from the + // master node in next loop. + if (total_count > n_target_cells) + interesting_range[0] = test_threshold; + else if (total_count < n_target_cells) + interesting_range[1] = test_threshold; + else + interesting_range[0] = interesting_range[1] = test_threshold; + + // terminate the iteration after 25 go-arounds. this is + // necessary because oftentimes error indicators on cells have + // exactly the same value, and so there may not be a particular + // value that cuts the indicators in such a way that we can + // achieve the desired number of cells. using a maximal number + // of iterations means that we terminate the iteration after a + // fixed number N of steps if the indicators were perfectly + // badly distributed, and we make at most a mistake of 1/2^N in + // the number of cells flagged if indicators are perfectly + // equidistributed + ++iteration; + if (iteration == 25) + interesting_range[0] = interesting_range[1] = test_threshold; + } + while (true); + + Assert(false, ExcInternalError()); + return -1; + } + } // namespace RefineAndCoarsenFixedNumber - if (interesting_range[0] == interesting_range[1]) - { - // so we have found our threshold. since we adjust the range - // at the top of the function to be slightly larger than the - // actual extremes of the refinement criteria values, we can end - // up in a situation where the threshold is in fact larger than - // the maximal refinement indicator. in such cases, we get no - // refinement at all. thus, cap the threshold by the actual - // largest value - double final_threshold = - std::min(interesting_range[0], global_min_and_max.second); - ierr = MPI_Bcast(&final_threshold, - 1, - MPI_DOUBLE, - master_mpi_rank, - mpi_communicator); - AssertThrowMPI(ierr); - - return final_threshold; - } - - const double test_threshold = - (interesting_range[0] > 0 ? - std::sqrt(interesting_range[0] * interesting_range[1]) : - (interesting_range[0] + interesting_range[1]) / 2); - - // accumulate the error of those our own elements above this threshold - // and then add to it the number for all the others - double my_error = 0; - for (unsigned int i = 0; i < criteria.size(); ++i) - if (criteria(i) > test_threshold) - my_error += criteria(i); - - double total_error = 0.; - - ierr = MPI_Reduce(&my_error, - &total_error, - 1, - MPI_DOUBLE, - MPI_SUM, - master_mpi_rank, - mpi_communicator); - AssertThrowMPI(ierr); - // now adjust the range. if we have too many cells, we take the upper - // half of the previous range, otherwise the lower half. if we have - // hit the right number, then set the range to the exact value. - // slave nodes also update their own interesting_range, however their - // results are not significant since the values will be overwritten by - // MPI_Bcast from the master node in next loop. - if (total_error > target_error) - interesting_range[0] = test_threshold; - else if (total_error < target_error) - interesting_range[1] = test_threshold; - else - interesting_range[0] = interesting_range[1] = test_threshold; - - // terminate the iteration after 25 go-arounds. this is - // necessary because oftentimes error indicators on cells - // have exactly the same value, and so there may not be a - // particular value that cuts the indicators in such a way - // that we can achieve the desired number of cells. using a - // max of 25 iterations means that we terminate the - // iteration after 25 steps if the indicators were perfectly - // badly distributed, and we make at most a mistake of - // 1/2^25 in the number of cells flagged if indicators are - // perfectly equidistributed - ++iteration; - if (iteration == 25) - interesting_range[0] = interesting_range[1] = test_threshold; - } - while (true); - Assert(false, ExcInternalError()); - return -1; - } - } // namespace RefineAndCoarsenFixedFraction -} // namespace + namespace RefineAndCoarsenFixedFraction + { + template + number + compute_threshold(const dealii::Vector & criteria, + const std::pair &global_min_and_max, + const double target_error, + MPI_Comm mpi_communicator) + { + double interesting_range[2] = {global_min_and_max.first, + global_min_and_max.second}; + adjust_interesting_range(interesting_range); + + const unsigned int master_mpi_rank = 0; + unsigned int iteration = 0; + + do + { + int ierr = MPI_Bcast(interesting_range, + 2, + MPI_DOUBLE, + master_mpi_rank, + mpi_communicator); + AssertThrowMPI(ierr); + + if (interesting_range[0] == interesting_range[1]) + { + // so we have found our threshold. since we adjust the range + // at the top of the function to be slightly larger than the + // actual extremes of the refinement criteria values, we can + // end up in a situation where the threshold is in fact + // larger than the maximal refinement indicator. in such + // cases, we get no refinement at all. thus, cap the + // threshold by the actual largest value + double final_threshold = + std::min(interesting_range[0], global_min_and_max.second); + ierr = MPI_Bcast(&final_threshold, + 1, + MPI_DOUBLE, + master_mpi_rank, + mpi_communicator); + AssertThrowMPI(ierr); + + return final_threshold; + } + + const double test_threshold = + (interesting_range[0] > 0 ? + std::sqrt(interesting_range[0] * interesting_range[1]) : + (interesting_range[0] + interesting_range[1]) / 2); + + // accumulate the error of those our own elements above this + // threshold and then add to it the number for all the others + double my_error = 0; + for (unsigned int i = 0; i < criteria.size(); ++i) + if (criteria(i) > test_threshold) + my_error += criteria(i); + + double total_error = 0.; + + ierr = MPI_Reduce(&my_error, + &total_error, + 1, + MPI_DOUBLE, + MPI_SUM, + master_mpi_rank, + mpi_communicator); + AssertThrowMPI(ierr); + + // now adjust the range. if we have too many cells, we take the + // upper half of the previous range, otherwise the lower half. + // if we have hit the right number, then set the range to the + // exact value. slave nodes also update their own + // interesting_range, however their results are not significant + // since the values will be overwritten by MPI_Bcast from the + // master node in next loop. + if (total_error > target_error) + interesting_range[0] = test_threshold; + else if (total_error < target_error) + interesting_range[1] = test_threshold; + else + interesting_range[0] = interesting_range[1] = test_threshold; + + // terminate the iteration after 25 go-arounds. this is + // necessary because oftentimes error indicators on cells + // have exactly the same value, and so there may not be a + // particular value that cuts the indicators in such a way + // that we can achieve the desired number of cells. using a + // max of 25 iterations means that we terminate the + // iteration after 25 steps if the indicators were perfectly + // badly distributed, and we make at most a mistake of + // 1/2^25 in the number of cells flagged if indicators are + // perfectly equidistributed + ++iteration; + if (iteration == 25) + interesting_range[0] = interesting_range[1] = test_threshold; + } + while (true); + + Assert(false, ExcInternalError()); + return -1; + } + } // namespace RefineAndCoarsenFixedFraction + } // namespace GridRefinement + } // namespace distributed + } // namespace parallel +} // namespace internal @@ -463,27 +467,31 @@ namespace parallel // figure out the global max and min of the indicators. we don't need it // here, but it's a collective communication call const std::pair global_min_and_max = - compute_global_min_and_max_at_root(locally_owned_indicators, - mpi_communicator); + dealii::internal::parallel::distributed::GridRefinement:: + compute_global_min_and_max_at_root(locally_owned_indicators, + mpi_communicator); double top_threshold, bottom_threshold; - top_threshold = RefineAndCoarsenFixedNumber::compute_threshold( - locally_owned_indicators, - global_min_and_max, - static_cast(adjusted_fractions.first * - tria.n_global_active_cells()), - mpi_communicator); + top_threshold = dealii::internal::parallel::distributed:: + GridRefinement::RefineAndCoarsenFixedNumber::compute_threshold( + locally_owned_indicators, + global_min_and_max, + static_cast(adjusted_fractions.first * + tria.n_global_active_cells()), + mpi_communicator); // compute bottom threshold only if necessary. otherwise use a threshold // lower than the smallest value we have locally if (adjusted_fractions.second > 0) - bottom_threshold = RefineAndCoarsenFixedNumber::compute_threshold( - locally_owned_indicators, - global_min_and_max, - static_cast(std::ceil( - (1 - adjusted_fractions.second) * tria.n_global_active_cells())), - mpi_communicator); + bottom_threshold = dealii::internal::parallel::distributed:: + GridRefinement::RefineAndCoarsenFixedNumber::compute_threshold( + locally_owned_indicators, + global_min_and_max, + static_cast( + std::ceil((1 - adjusted_fractions.second) * + tria.n_global_active_cells())), + mpi_communicator); else { bottom_threshold = @@ -527,25 +535,28 @@ namespace parallel // figure out the global max and min of the indicators. we don't need it // here, but it's a collective communication call const std::pair global_min_and_max = - compute_global_min_and_max_at_root(locally_owned_indicators, - mpi_communicator); + dealii::internal::parallel::distributed::GridRefinement:: + compute_global_min_and_max_at_root(locally_owned_indicators, + mpi_communicator); const double total_error = compute_global_sum(locally_owned_indicators, mpi_communicator); double top_threshold, bottom_threshold; - top_threshold = RefineAndCoarsenFixedFraction::compute_threshold( - locally_owned_indicators, - global_min_and_max, - top_fraction_of_error * total_error, - mpi_communicator); - // compute bottom threshold only if necessary. otherwise use a threshold - // lower than the smallest value we have locally - if (bottom_fraction_of_error > 0) - bottom_threshold = RefineAndCoarsenFixedFraction::compute_threshold( + top_threshold = dealii::internal::parallel::distributed:: + GridRefinement::RefineAndCoarsenFixedFraction::compute_threshold( locally_owned_indicators, global_min_and_max, - (1 - bottom_fraction_of_error) * total_error, + top_fraction_of_error * total_error, mpi_communicator); + // compute bottom threshold only if necessary. otherwise use a threshold + // lower than the smallest value we have locally + if (bottom_fraction_of_error > 0) + bottom_threshold = dealii::internal::parallel::distributed:: + GridRefinement::RefineAndCoarsenFixedFraction::compute_threshold( + locally_owned_indicators, + global_min_and_max, + (1 - bottom_fraction_of_error) * total_error, + mpi_communicator); else { bottom_threshold = diff --git a/source/distributed/grid_refinement.inst.in b/source/distributed/grid_refinement.inst.in index acc289a0b9..3588a6f19f 100644 --- a/source/distributed/grid_refinement.inst.in +++ b/source/distributed/grid_refinement.inst.in @@ -15,6 +15,44 @@ +for (S : REAL_SCALARS) + { + namespace internal + \{ + namespace parallel + \{ + namespace distributed + \{ + namespace GridRefinement + \{ + template std::pair + compute_global_min_and_max_at_root(const dealii::Vector &, + MPI_Comm); + + namespace RefineAndCoarsenFixedNumber + \{ + template S + compute_threshold(const dealii::Vector &, + const std::pair &, + const unsigned int, + MPI_Comm); + \} + namespace RefineAndCoarsenFixedFraction + \{ + template S + compute_threshold(const dealii::Vector &, + const std::pair &, + const double, + MPI_Comm); + \} + \} + \} + \} + \} + } + + + for (S : REAL_SCALARS; deal_II_dimension : DIMENSIONS) { namespace parallel