#include <deal.II/base/mg_level_object.h>
+#include <deal.II/distributed/tria_base.h>
+
#include <deal.II/dofs/dof_handler.h>
#include <deal.II/lac/affine_constraints.h>
const SparsityPatternType &sp,
DoFHandlerType & dh)
{
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
- *dist_tria = dynamic_cast<
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
- &(dh.get_triangulation()));
+ const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
+ *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+ DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
const SparsityPatternType &sp,
DoFHandlerType & dh)
{
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
- *dist_tria = dynamic_cast<
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
- &(dh.get_triangulation()));
+ const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
+ *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+ DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
const SparsityPatternType &sp,
DoFHandlerType & dh)
{
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
- *dist_tria = dynamic_cast<
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
- &(dh.get_triangulation()));
+ const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
+ *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+ DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
const SparsityPatternType &sp,
DoFHandlerType & dh)
{
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
- *dist_tria = dynamic_cast<
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
- &(dh.get_triangulation()));
+ const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
+ *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+ DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
const SparsityPatternType &sp,
const DoFHandlerType & dh)
{
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension>
- *dist_tria = dynamic_cast<
- const parallel::TriangulationBase<DoFHandlerType::dimension,
- DoFHandlerType::space_dimension> *>(
- &(dh.get_triangulation()));
+ const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension>
+ *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+ DoFHandlerType::dimension,
+ DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
MPI_Comm communicator =
dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
// Reinit PETSc matrix
}
- /**
- * Compute the global max and min of the criteria vector. These are returned
- * only on the processor with rank zero, all others get a pair of zeros.
- */
- template <typename number>
- std::pair<number, number>
- compute_global_min_and_max_at_root(const dealii::Vector<number> &criteria,
- MPI_Comm mpi_communicator)
- {
- // we'd like to compute the global max and min from the local ones in one
- // MPI communication. we can do that by taking the elementwise minimum of
- // the local min and the negative maximum over all processors
-
- const double local_min = min_element(criteria),
- local_max = max_element(criteria);
- double comp[2] = {local_min, -local_max};
- double result[2] = {0, 0};
-
- // compute the minimum on processor zero
- const int ierr =
- MPI_Reduce(comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator);
- AssertThrowMPI(ierr);
-
- // make sure only processor zero got something
- if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
- Assert((result[0] == 0) && (result[1] == 0), ExcInternalError());
-
- return std::make_pair(result[0], -result[1]);
- }
-
-
/**
* Compute the global sum over the elements of the vectors passed to this
cell->clear_coarsen_flag();
}
}
+} // namespace
- namespace RefineAndCoarsenFixedNumber
+namespace internal
+{
+ namespace parallel
{
- /**
- * Compute a threshold value so that exactly n_target_cells have a value
- * that is larger.
- */
- template <typename number>
- number
- compute_threshold(const dealii::Vector<number> & criteria,
- const std::pair<double, double> &global_min_and_max,
- const unsigned int n_target_cells,
- MPI_Comm mpi_communicator)
+ namespace distributed
{
- double interesting_range[2] = {global_min_and_max.first,
- global_min_and_max.second};
- adjust_interesting_range(interesting_range);
-
- const unsigned int master_mpi_rank = 0;
- unsigned int iteration = 0;
-
- do
+ namespace GridRefinement
+ {
+ template <typename number>
+ std::pair<number, number>
+ compute_global_min_and_max_at_root(
+ const dealii::Vector<number> &criteria,
+ MPI_Comm mpi_communicator)
{
- int ierr = MPI_Bcast(interesting_range,
- 2,
- MPI_DOUBLE,
- master_mpi_rank,
- mpi_communicator);
+ // we'd like to compute the global max and min from the local ones in
+ // one MPI communication. we can do that by taking the elementwise
+ // minimum of the local min and the negative maximum over all
+ // processors
+
+ const double local_min = min_element(criteria),
+ local_max = max_element(criteria);
+ double comp[2] = {local_min, -local_max};
+ double result[2] = {0, 0};
+
+ // compute the minimum on processor zero
+ const int ierr = MPI_Reduce(
+ comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator);
AssertThrowMPI(ierr);
- if (interesting_range[0] == interesting_range[1])
- return interesting_range[0];
-
- const double test_threshold =
- (interesting_range[0] > 0 ?
- std::sqrt(interesting_range[0] * interesting_range[1]) :
- (interesting_range[0] + interesting_range[1]) / 2);
-
- // count how many of our own elements would be above this threshold
- // and then add to it the number for all the others
- unsigned int my_count =
- std::count_if(criteria.begin(),
- criteria.end(),
- [test_threshold](const double c) {
- return c > test_threshold;
- });
-
- unsigned int total_count = 0;
-
- ierr = MPI_Reduce(&my_count,
- &total_count,
- 1,
- MPI_UNSIGNED,
- MPI_SUM,
- master_mpi_rank,
- mpi_communicator);
- AssertThrowMPI(ierr);
+ // make sure only processor zero got something
+ if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
+ Assert((result[0] == 0) && (result[1] == 0), ExcInternalError());
- // now adjust the range. if we have too many cells, we take the upper
- // half of the previous range, otherwise the lower half. if we have
- // hit the right number, then set the range to the exact value.
- // slave nodes also update their own interesting_range, however their
- // results are not significant since the values will be overwritten by
- // MPI_Bcast from the master node in next loop.
- if (total_count > n_target_cells)
- interesting_range[0] = test_threshold;
- else if (total_count < n_target_cells)
- interesting_range[1] = test_threshold;
- else
- interesting_range[0] = interesting_range[1] = test_threshold;
-
- // terminate the iteration after 25 go-arounds. this is necessary
- // because oftentimes error indicators on cells have exactly the
- // same value, and so there may not be a particular value that cuts
- // the indicators in such a way that we can achieve the desired
- // number of cells. using a maximal number of iterations means that
- // we terminate the iteration after a fixed number N of steps if the
- // indicators were perfectly badly distributed, and we make at most
- // a mistake of 1/2^N in the number of cells flagged if indicators
- // are perfectly equidistributed
- ++iteration;
- if (iteration == 25)
- interesting_range[0] = interesting_range[1] = test_threshold;
+ return std::make_pair(result[0], -result[1]);
}
- while (true);
-
- Assert(false, ExcInternalError());
- return -1;
- }
- } // namespace RefineAndCoarsenFixedNumber
-
- namespace RefineAndCoarsenFixedFraction
- {
- /**
- * Compute a threshold value so that the error
- * accumulated over all criteria[i] so that
- * criteria[i] > threshold
- * is larger than target_error.
- */
- template <typename number>
- number
- compute_threshold(const dealii::Vector<number> & criteria,
- const std::pair<double, double> &global_min_and_max,
- const double target_error,
- MPI_Comm mpi_communicator)
- {
- double interesting_range[2] = {global_min_and_max.first,
- global_min_and_max.second};
- adjust_interesting_range(interesting_range);
-
- const unsigned int master_mpi_rank = 0;
- unsigned int iteration = 0;
- do
+ namespace RefineAndCoarsenFixedNumber
{
- int ierr = MPI_Bcast(interesting_range,
- 2,
- MPI_DOUBLE,
- master_mpi_rank,
- mpi_communicator);
- AssertThrowMPI(ierr);
+ template <typename number>
+ number
+ compute_threshold(const dealii::Vector<number> & criteria,
+ const std::pair<double, double> &global_min_and_max,
+ const unsigned int n_target_cells,
+ MPI_Comm mpi_communicator)
+ {
+ double interesting_range[2] = {global_min_and_max.first,
+ global_min_and_max.second};
+ adjust_interesting_range(interesting_range);
+
+ const unsigned int master_mpi_rank = 0;
+ unsigned int iteration = 0;
+
+ do
+ {
+ int ierr = MPI_Bcast(interesting_range,
+ 2,
+ MPI_DOUBLE,
+ master_mpi_rank,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ if (interesting_range[0] == interesting_range[1])
+ return interesting_range[0];
+
+ const double test_threshold =
+ (interesting_range[0] > 0 ?
+ std::sqrt(interesting_range[0] * interesting_range[1]) :
+ (interesting_range[0] + interesting_range[1]) / 2);
+
+ // count how many of our own elements would be above this
+ // threshold and then add to it the number for all the others
+ unsigned int my_count =
+ std::count_if(criteria.begin(),
+ criteria.end(),
+ [test_threshold](const double c) {
+ return c > test_threshold;
+ });
+
+ unsigned int total_count = 0;
+
+ ierr = MPI_Reduce(&my_count,
+ &total_count,
+ 1,
+ MPI_UNSIGNED,
+ MPI_SUM,
+ master_mpi_rank,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ // now adjust the range. if we have too many cells, we take the
+ // upper half of the previous range, otherwise the lower half.
+ // if we have hit the right number, then set the range to the
+ // exact value. slave nodes also update their own
+ // interesting_range, however their results are not significant
+ // since the values will be overwritten by MPI_Bcast from the
+ // master node in next loop.
+ if (total_count > n_target_cells)
+ interesting_range[0] = test_threshold;
+ else if (total_count < n_target_cells)
+ interesting_range[1] = test_threshold;
+ else
+ interesting_range[0] = interesting_range[1] = test_threshold;
+
+ // terminate the iteration after 25 go-arounds. this is
+ // necessary because oftentimes error indicators on cells have
+ // exactly the same value, and so there may not be a particular
+ // value that cuts the indicators in such a way that we can
+ // achieve the desired number of cells. using a maximal number
+ // of iterations means that we terminate the iteration after a
+ // fixed number N of steps if the indicators were perfectly
+ // badly distributed, and we make at most a mistake of 1/2^N in
+ // the number of cells flagged if indicators are perfectly
+ // equidistributed
+ ++iteration;
+ if (iteration == 25)
+ interesting_range[0] = interesting_range[1] = test_threshold;
+ }
+ while (true);
+
+ Assert(false, ExcInternalError());
+ return -1;
+ }
+ } // namespace RefineAndCoarsenFixedNumber
- if (interesting_range[0] == interesting_range[1])
- {
- // so we have found our threshold. since we adjust the range
- // at the top of the function to be slightly larger than the
- // actual extremes of the refinement criteria values, we can end
- // up in a situation where the threshold is in fact larger than
- // the maximal refinement indicator. in such cases, we get no
- // refinement at all. thus, cap the threshold by the actual
- // largest value
- double final_threshold =
- std::min(interesting_range[0], global_min_and_max.second);
- ierr = MPI_Bcast(&final_threshold,
- 1,
- MPI_DOUBLE,
- master_mpi_rank,
- mpi_communicator);
- AssertThrowMPI(ierr);
-
- return final_threshold;
- }
-
- const double test_threshold =
- (interesting_range[0] > 0 ?
- std::sqrt(interesting_range[0] * interesting_range[1]) :
- (interesting_range[0] + interesting_range[1]) / 2);
-
- // accumulate the error of those our own elements above this threshold
- // and then add to it the number for all the others
- double my_error = 0;
- for (unsigned int i = 0; i < criteria.size(); ++i)
- if (criteria(i) > test_threshold)
- my_error += criteria(i);
-
- double total_error = 0.;
-
- ierr = MPI_Reduce(&my_error,
- &total_error,
- 1,
- MPI_DOUBLE,
- MPI_SUM,
- master_mpi_rank,
- mpi_communicator);
- AssertThrowMPI(ierr);
- // now adjust the range. if we have too many cells, we take the upper
- // half of the previous range, otherwise the lower half. if we have
- // hit the right number, then set the range to the exact value.
- // slave nodes also update their own interesting_range, however their
- // results are not significant since the values will be overwritten by
- // MPI_Bcast from the master node in next loop.
- if (total_error > target_error)
- interesting_range[0] = test_threshold;
- else if (total_error < target_error)
- interesting_range[1] = test_threshold;
- else
- interesting_range[0] = interesting_range[1] = test_threshold;
-
- // terminate the iteration after 25 go-arounds. this is
- // necessary because oftentimes error indicators on cells
- // have exactly the same value, and so there may not be a
- // particular value that cuts the indicators in such a way
- // that we can achieve the desired number of cells. using a
- // max of 25 iterations means that we terminate the
- // iteration after 25 steps if the indicators were perfectly
- // badly distributed, and we make at most a mistake of
- // 1/2^25 in the number of cells flagged if indicators are
- // perfectly equidistributed
- ++iteration;
- if (iteration == 25)
- interesting_range[0] = interesting_range[1] = test_threshold;
- }
- while (true);
- Assert(false, ExcInternalError());
- return -1;
- }
- } // namespace RefineAndCoarsenFixedFraction
-} // namespace
+ namespace RefineAndCoarsenFixedFraction
+ {
+ template <typename number>
+ number
+ compute_threshold(const dealii::Vector<number> & criteria,
+ const std::pair<double, double> &global_min_and_max,
+ const double target_error,
+ MPI_Comm mpi_communicator)
+ {
+ double interesting_range[2] = {global_min_and_max.first,
+ global_min_and_max.second};
+ adjust_interesting_range(interesting_range);
+
+ const unsigned int master_mpi_rank = 0;
+ unsigned int iteration = 0;
+
+ do
+ {
+ int ierr = MPI_Bcast(interesting_range,
+ 2,
+ MPI_DOUBLE,
+ master_mpi_rank,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ if (interesting_range[0] == interesting_range[1])
+ {
+ // so we have found our threshold. since we adjust the range
+ // at the top of the function to be slightly larger than the
+ // actual extremes of the refinement criteria values, we can
+ // end up in a situation where the threshold is in fact
+ // larger than the maximal refinement indicator. in such
+ // cases, we get no refinement at all. thus, cap the
+ // threshold by the actual largest value
+ double final_threshold =
+ std::min(interesting_range[0], global_min_and_max.second);
+ ierr = MPI_Bcast(&final_threshold,
+ 1,
+ MPI_DOUBLE,
+ master_mpi_rank,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ return final_threshold;
+ }
+
+ const double test_threshold =
+ (interesting_range[0] > 0 ?
+ std::sqrt(interesting_range[0] * interesting_range[1]) :
+ (interesting_range[0] + interesting_range[1]) / 2);
+
+ // accumulate the error of those our own elements above this
+ // threshold and then add to it the number for all the others
+ double my_error = 0;
+ for (unsigned int i = 0; i < criteria.size(); ++i)
+ if (criteria(i) > test_threshold)
+ my_error += criteria(i);
+
+ double total_error = 0.;
+
+ ierr = MPI_Reduce(&my_error,
+ &total_error,
+ 1,
+ MPI_DOUBLE,
+ MPI_SUM,
+ master_mpi_rank,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ // now adjust the range. if we have too many cells, we take the
+ // upper half of the previous range, otherwise the lower half.
+ // if we have hit the right number, then set the range to the
+ // exact value. slave nodes also update their own
+ // interesting_range, however their results are not significant
+ // since the values will be overwritten by MPI_Bcast from the
+ // master node in next loop.
+ if (total_error > target_error)
+ interesting_range[0] = test_threshold;
+ else if (total_error < target_error)
+ interesting_range[1] = test_threshold;
+ else
+ interesting_range[0] = interesting_range[1] = test_threshold;
+
+ // terminate the iteration after 25 go-arounds. this is
+ // necessary because oftentimes error indicators on cells
+ // have exactly the same value, and so there may not be a
+ // particular value that cuts the indicators in such a way
+ // that we can achieve the desired number of cells. using a
+ // max of 25 iterations means that we terminate the
+ // iteration after 25 steps if the indicators were perfectly
+ // badly distributed, and we make at most a mistake of
+ // 1/2^25 in the number of cells flagged if indicators are
+ // perfectly equidistributed
+ ++iteration;
+ if (iteration == 25)
+ interesting_range[0] = interesting_range[1] = test_threshold;
+ }
+ while (true);
+
+ Assert(false, ExcInternalError());
+ return -1;
+ }
+ } // namespace RefineAndCoarsenFixedFraction
+ } // namespace GridRefinement
+ } // namespace distributed
+ } // namespace parallel
+} // namespace internal
// figure out the global max and min of the indicators. we don't need it
// here, but it's a collective communication call
const std::pair<Number, Number> global_min_and_max =
- compute_global_min_and_max_at_root(locally_owned_indicators,
- mpi_communicator);
+ dealii::internal::parallel::distributed::GridRefinement::
+ compute_global_min_and_max_at_root(locally_owned_indicators,
+ mpi_communicator);
double top_threshold, bottom_threshold;
- top_threshold = RefineAndCoarsenFixedNumber::compute_threshold(
- locally_owned_indicators,
- global_min_and_max,
- static_cast<unsigned int>(adjusted_fractions.first *
- tria.n_global_active_cells()),
- mpi_communicator);
+ top_threshold = dealii::internal::parallel::distributed::
+ GridRefinement::RefineAndCoarsenFixedNumber::compute_threshold(
+ locally_owned_indicators,
+ global_min_and_max,
+ static_cast<unsigned int>(adjusted_fractions.first *
+ tria.n_global_active_cells()),
+ mpi_communicator);
// compute bottom threshold only if necessary. otherwise use a threshold
// lower than the smallest value we have locally
if (adjusted_fractions.second > 0)
- bottom_threshold = RefineAndCoarsenFixedNumber::compute_threshold(
- locally_owned_indicators,
- global_min_and_max,
- static_cast<unsigned int>(std::ceil(
- (1 - adjusted_fractions.second) * tria.n_global_active_cells())),
- mpi_communicator);
+ bottom_threshold = dealii::internal::parallel::distributed::
+ GridRefinement::RefineAndCoarsenFixedNumber::compute_threshold(
+ locally_owned_indicators,
+ global_min_and_max,
+ static_cast<unsigned int>(
+ std::ceil((1 - adjusted_fractions.second) *
+ tria.n_global_active_cells())),
+ mpi_communicator);
else
{
bottom_threshold =
// figure out the global max and min of the indicators. we don't need it
// here, but it's a collective communication call
const std::pair<double, double> global_min_and_max =
- compute_global_min_and_max_at_root(locally_owned_indicators,
- mpi_communicator);
+ dealii::internal::parallel::distributed::GridRefinement::
+ compute_global_min_and_max_at_root(locally_owned_indicators,
+ mpi_communicator);
const double total_error =
compute_global_sum(locally_owned_indicators, mpi_communicator);
double top_threshold, bottom_threshold;
- top_threshold = RefineAndCoarsenFixedFraction::compute_threshold(
- locally_owned_indicators,
- global_min_and_max,
- top_fraction_of_error * total_error,
- mpi_communicator);
- // compute bottom threshold only if necessary. otherwise use a threshold
- // lower than the smallest value we have locally
- if (bottom_fraction_of_error > 0)
- bottom_threshold = RefineAndCoarsenFixedFraction::compute_threshold(
+ top_threshold = dealii::internal::parallel::distributed::
+ GridRefinement::RefineAndCoarsenFixedFraction::compute_threshold(
locally_owned_indicators,
global_min_and_max,
- (1 - bottom_fraction_of_error) * total_error,
+ top_fraction_of_error * total_error,
mpi_communicator);
+ // compute bottom threshold only if necessary. otherwise use a threshold
+ // lower than the smallest value we have locally
+ if (bottom_fraction_of_error > 0)
+ bottom_threshold = dealii::internal::parallel::distributed::
+ GridRefinement::RefineAndCoarsenFixedFraction::compute_threshold(
+ locally_owned_indicators,
+ global_min_and_max,
+ (1 - bottom_fraction_of_error) * total_error,
+ mpi_communicator);
else
{
bottom_threshold =