]> https://gitweb.dealii.org/ - dealii.git/commitdiff
parallel::distributed::GridRefinement: Made internal functions accessible.
authorMarc Fehling <marc.fehling@gmx.net>
Fri, 6 Dec 2019 15:06:30 +0000 (16:06 +0100)
committerMarc Fehling <marc.fehling@gmx.net>
Fri, 13 Dec 2019 10:18:17 +0000 (11:18 +0100)
include/deal.II/distributed/grid_refinement.h
include/deal.II/multigrid/mg_transfer.h
source/distributed/grid_refinement.cc
source/distributed/grid_refinement.inst.in

index b4d39e86965fb9ad6fb948fc874934b33d496755..2d6b6fd25da524c3f6bb4a773679bd11dd988ac3 100644 (file)
 
 DEAL_II_NAMESPACE_OPEN
 
+namespace internal
+{
+  namespace parallel
+  {
+    namespace distributed
+    {
+      namespace GridRefinement
+      {
+        /**
+         * Compute the global max and min of the criteria vector. These are
+         * returned only on the processor with rank zero, all others get a pair
+         * of zeros.
+         */
+        template <typename number>
+        std::pair<number, number>
+        compute_global_min_and_max_at_root(
+          const dealii::Vector<number> &criteria,
+          MPI_Comm                      mpi_communicator);
+
+        namespace RefineAndCoarsenFixedNumber
+        {
+          /**
+           * Compute a threshold value so that exactly n_target_cells have a
+           * value that is larger.
+           */
+          template <typename number>
+          number
+          compute_threshold(const dealii::Vector<number> &   criteria,
+                            const std::pair<double, double> &global_min_and_max,
+                            const unsigned int               n_target_cells,
+                            MPI_Comm                         mpi_communicator);
+        } // namespace RefineAndCoarsenFixedNumber
+
+        namespace RefineAndCoarsenFixedFraction
+        {
+          /**
+           * Compute a threshold value so that the error accumulated over all
+           * criteria[i] so that
+           *     criteria[i] > threshold
+           * is larger than target_error.
+           */
+          template <typename number>
+          number
+          compute_threshold(const dealii::Vector<number> &   criteria,
+                            const std::pair<double, double> &global_min_and_max,
+                            const double                     target_error,
+                            MPI_Comm                         mpi_communicator);
+        } // namespace RefineAndCoarsenFixedFraction
+      }   // namespace GridRefinement
+    }     // namespace distributed
+  }       // namespace parallel
+} // namespace internal
+
+
+
 namespace parallel
 {
   namespace distributed
index d5537f61df251c6b6f8b26a03c2b0f0a2aa129f1..518d2441459cab4eaba4dea86cdff783c7611dbc 100644 (file)
@@ -20,6 +20,8 @@
 
 #include <deal.II/base/mg_level_object.h>
 
+#include <deal.II/distributed/tria_base.h>
+
 #include <deal.II/dofs/dof_handler.h>
 
 #include <deal.II/lac/affine_constraints.h>
@@ -82,12 +84,11 @@ namespace internal
            const SparsityPatternType &sp,
            DoFHandlerType &           dh)
     {
-      const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                        DoFHandlerType::space_dimension>
-        *dist_tria = dynamic_cast<
-          const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                            DoFHandlerType::space_dimension> *>(
-          &(dh.get_triangulation()));
+      const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+                                                DoFHandlerType::space_dimension>
+        *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+          DoFHandlerType::dimension,
+          DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
       MPI_Comm communicator =
         dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
 
@@ -115,12 +116,11 @@ namespace internal
            const SparsityPatternType &sp,
            DoFHandlerType &           dh)
     {
-      const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                        DoFHandlerType::space_dimension>
-        *dist_tria = dynamic_cast<
-          const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                            DoFHandlerType::space_dimension> *>(
-          &(dh.get_triangulation()));
+      const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+                                                DoFHandlerType::space_dimension>
+        *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+          DoFHandlerType::dimension,
+          DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
       MPI_Comm communicator =
         dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
       matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
@@ -149,12 +149,11 @@ namespace internal
            const SparsityPatternType &sp,
            DoFHandlerType &           dh)
     {
-      const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                        DoFHandlerType::space_dimension>
-        *dist_tria = dynamic_cast<
-          const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                            DoFHandlerType::space_dimension> *>(
-          &(dh.get_triangulation()));
+      const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+                                                DoFHandlerType::space_dimension>
+        *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+          DoFHandlerType::dimension,
+          DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
       MPI_Comm communicator =
         dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
       matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
@@ -182,12 +181,11 @@ namespace internal
            const SparsityPatternType &sp,
            DoFHandlerType &           dh)
     {
-      const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                        DoFHandlerType::space_dimension>
-        *dist_tria = dynamic_cast<
-          const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                            DoFHandlerType::space_dimension> *>(
-          &(dh.get_triangulation()));
+      const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+                                                DoFHandlerType::space_dimension>
+        *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+          DoFHandlerType::dimension,
+          DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
       MPI_Comm communicator =
         dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
       matrix.reinit(dh.locally_owned_mg_dofs(level + 1),
@@ -244,12 +242,11 @@ namespace internal
            const SparsityPatternType &sp,
            const DoFHandlerType &     dh)
     {
-      const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                        DoFHandlerType::space_dimension>
-        *dist_tria = dynamic_cast<
-          const parallel::TriangulationBase<DoFHandlerType::dimension,
-                                            DoFHandlerType::space_dimension> *>(
-          &(dh.get_triangulation()));
+      const dealii::parallel::TriangulationBase<DoFHandlerType::dimension,
+                                                DoFHandlerType::space_dimension>
+        *dist_tria = dynamic_cast<const dealii::parallel::TriangulationBase<
+          DoFHandlerType::dimension,
+          DoFHandlerType::space_dimension> *>(&(dh.get_triangulation()));
       MPI_Comm communicator =
         dist_tria != nullptr ? dist_tria->get_communicator() : MPI_COMM_SELF;
       // Reinit PETSc matrix
index 47e9d439d2278dd6054a7198bec5190ce0536b87..661780ecafdfdc005de5c0287124de414f47b007 100644 (file)
@@ -62,37 +62,6 @@ namespace
   }
 
 
-  /**
-   * Compute the global max and min of the criteria vector. These are returned
-   * only on the processor with rank zero, all others get a pair of zeros.
-   */
-  template <typename number>
-  std::pair<number, number>
-  compute_global_min_and_max_at_root(const dealii::Vector<number> &criteria,
-                                     MPI_Comm mpi_communicator)
-  {
-    // we'd like to compute the global max and min from the local ones in one
-    // MPI communication. we can do that by taking the elementwise minimum of
-    // the local min and the negative maximum over all processors
-
-    const double local_min = min_element(criteria),
-                 local_max = max_element(criteria);
-    double comp[2]         = {local_min, -local_max};
-    double result[2]       = {0, 0};
-
-    // compute the minimum on processor zero
-    const int ierr =
-      MPI_Reduce(comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator);
-    AssertThrowMPI(ierr);
-
-    // make sure only processor zero got something
-    if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
-      Assert((result[0] == 0) && (result[1] == 0), ExcInternalError());
-
-    return std::make_pair(result[0], -result[1]);
-  }
-
-
 
   /**
    * Compute the global sum over the elements of the vectors passed to this
@@ -211,210 +180,245 @@ namespace
           cell->clear_coarsen_flag();
         }
   }
+} // namespace
 
 
 
-  namespace RefineAndCoarsenFixedNumber
+namespace internal
+{
+  namespace parallel
   {
-    /**
-     * Compute a threshold value so that exactly n_target_cells have a value
-     * that is larger.
-     */
-    template <typename number>
-    number
-    compute_threshold(const dealii::Vector<number> &   criteria,
-                      const std::pair<double, double> &global_min_and_max,
-                      const unsigned int               n_target_cells,
-                      MPI_Comm                         mpi_communicator)
+    namespace distributed
     {
-      double interesting_range[2] = {global_min_and_max.first,
-                                     global_min_and_max.second};
-      adjust_interesting_range(interesting_range);
-
-      const unsigned int master_mpi_rank = 0;
-      unsigned int       iteration       = 0;
-
-      do
+      namespace GridRefinement
+      {
+        template <typename number>
+        std::pair<number, number>
+        compute_global_min_and_max_at_root(
+          const dealii::Vector<number> &criteria,
+          MPI_Comm                      mpi_communicator)
         {
-          int ierr = MPI_Bcast(interesting_range,
-                               2,
-                               MPI_DOUBLE,
-                               master_mpi_rank,
-                               mpi_communicator);
+          // we'd like to compute the global max and min from the local ones in
+          // one MPI communication. we can do that by taking the elementwise
+          // minimum of the local min and the negative maximum over all
+          // processors
+
+          const double local_min = min_element(criteria),
+                       local_max = max_element(criteria);
+          double comp[2]         = {local_min, -local_max};
+          double result[2]       = {0, 0};
+
+          // compute the minimum on processor zero
+          const int ierr = MPI_Reduce(
+            comp, result, 2, MPI_DOUBLE, MPI_MIN, 0, mpi_communicator);
           AssertThrowMPI(ierr);
 
-          if (interesting_range[0] == interesting_range[1])
-            return interesting_range[0];
-
-          const double test_threshold =
-            (interesting_range[0] > 0 ?
-               std::sqrt(interesting_range[0] * interesting_range[1]) :
-               (interesting_range[0] + interesting_range[1]) / 2);
-
-          // count how many of our own elements would be above this threshold
-          // and then add to it the number for all the others
-          unsigned int my_count =
-            std::count_if(criteria.begin(),
-                          criteria.end(),
-                          [test_threshold](const double c) {
-                            return c > test_threshold;
-                          });
-
-          unsigned int total_count = 0;
-
-          ierr = MPI_Reduce(&my_count,
-                            &total_count,
-                            1,
-                            MPI_UNSIGNED,
-                            MPI_SUM,
-                            master_mpi_rank,
-                            mpi_communicator);
-          AssertThrowMPI(ierr);
+          // make sure only processor zero got something
+          if (Utilities::MPI::this_mpi_process(mpi_communicator) != 0)
+            Assert((result[0] == 0) && (result[1] == 0), ExcInternalError());
 
-          // now adjust the range. if we have too many cells, we take the upper
-          // half of the previous range, otherwise the lower half. if we have
-          // hit the right number, then set the range to the exact value.
-          // slave nodes also update their own interesting_range, however their
-          // results are not significant since the values will be overwritten by
-          // MPI_Bcast from the master node in next loop.
-          if (total_count > n_target_cells)
-            interesting_range[0] = test_threshold;
-          else if (total_count < n_target_cells)
-            interesting_range[1] = test_threshold;
-          else
-            interesting_range[0] = interesting_range[1] = test_threshold;
-
-          // terminate the iteration after 25 go-arounds. this is necessary
-          // because oftentimes error indicators on cells have exactly the
-          // same value, and so there may not be a particular value that cuts
-          // the indicators in such a way that we can achieve the desired
-          // number of cells. using a maximal number of iterations means that
-          // we terminate the iteration after a fixed number N of steps if the
-          // indicators were perfectly badly distributed, and we make at most
-          // a mistake of 1/2^N in the number of cells flagged if indicators
-          // are perfectly equidistributed
-          ++iteration;
-          if (iteration == 25)
-            interesting_range[0] = interesting_range[1] = test_threshold;
+          return std::make_pair(result[0], -result[1]);
         }
-      while (true);
-
-      Assert(false, ExcInternalError());
-      return -1;
-    }
-  } // namespace RefineAndCoarsenFixedNumber
-
 
 
-  namespace RefineAndCoarsenFixedFraction
-  {
-    /**
-     * Compute a threshold value so that the error
-     * accumulated over all criteria[i] so that
-     *     criteria[i] > threshold
-     * is larger than target_error.
-     */
-    template <typename number>
-    number
-    compute_threshold(const dealii::Vector<number> &   criteria,
-                      const std::pair<double, double> &global_min_and_max,
-                      const double                     target_error,
-                      MPI_Comm                         mpi_communicator)
-    {
-      double interesting_range[2] = {global_min_and_max.first,
-                                     global_min_and_max.second};
-      adjust_interesting_range(interesting_range);
-
-      const unsigned int master_mpi_rank = 0;
-      unsigned int       iteration       = 0;
 
-      do
+        namespace RefineAndCoarsenFixedNumber
         {
-          int ierr = MPI_Bcast(interesting_range,
-                               2,
-                               MPI_DOUBLE,
-                               master_mpi_rank,
-                               mpi_communicator);
-          AssertThrowMPI(ierr);
+          template <typename number>
+          number
+          compute_threshold(const dealii::Vector<number> &   criteria,
+                            const std::pair<double, double> &global_min_and_max,
+                            const unsigned int               n_target_cells,
+                            MPI_Comm                         mpi_communicator)
+          {
+            double interesting_range[2] = {global_min_and_max.first,
+                                           global_min_and_max.second};
+            adjust_interesting_range(interesting_range);
+
+            const unsigned int master_mpi_rank = 0;
+            unsigned int       iteration       = 0;
+
+            do
+              {
+                int ierr = MPI_Bcast(interesting_range,
+                                     2,
+                                     MPI_DOUBLE,
+                                     master_mpi_rank,
+                                     mpi_communicator);
+                AssertThrowMPI(ierr);
+
+                if (interesting_range[0] == interesting_range[1])
+                  return interesting_range[0];
+
+                const double test_threshold =
+                  (interesting_range[0] > 0 ?
+                     std::sqrt(interesting_range[0] * interesting_range[1]) :
+                     (interesting_range[0] + interesting_range[1]) / 2);
+
+                // count how many of our own elements would be above this
+                // threshold and then add to it the number for all the others
+                unsigned int my_count =
+                  std::count_if(criteria.begin(),
+                                criteria.end(),
+                                [test_threshold](const double c) {
+                                  return c > test_threshold;
+                                });
+
+                unsigned int total_count = 0;
+
+                ierr = MPI_Reduce(&my_count,
+                                  &total_count,
+                                  1,
+                                  MPI_UNSIGNED,
+                                  MPI_SUM,
+                                  master_mpi_rank,
+                                  mpi_communicator);
+                AssertThrowMPI(ierr);
+
+                // now adjust the range. if we have too many cells, we take the
+                // upper half of the previous range, otherwise the lower half.
+                // if we have hit the right number, then set the range to the
+                // exact value. slave nodes also update their own
+                // interesting_range, however their results are not significant
+                // since the values will be overwritten by MPI_Bcast from the
+                // master node in next loop.
+                if (total_count > n_target_cells)
+                  interesting_range[0] = test_threshold;
+                else if (total_count < n_target_cells)
+                  interesting_range[1] = test_threshold;
+                else
+                  interesting_range[0] = interesting_range[1] = test_threshold;
+
+                // terminate the iteration after 25 go-arounds. this is
+                // necessary because oftentimes error indicators on cells have
+                // exactly the same value, and so there may not be a particular
+                // value that cuts the indicators in such a way that we can
+                // achieve the desired number of cells. using a maximal number
+                // of iterations means that we terminate the iteration after a
+                // fixed number N of steps if the indicators were perfectly
+                // badly distributed, and we make at most a mistake of 1/2^N in
+                // the number of cells flagged if indicators are perfectly
+                // equidistributed
+                ++iteration;
+                if (iteration == 25)
+                  interesting_range[0] = interesting_range[1] = test_threshold;
+              }
+            while (true);
+
+            Assert(false, ExcInternalError());
+            return -1;
+          }
+        } // namespace RefineAndCoarsenFixedNumber
 
-          if (interesting_range[0] == interesting_range[1])
-            {
-              // so we have found our threshold. since we adjust the range
-              // at the top of the function to be slightly larger than the
-              // actual extremes of the refinement criteria values, we can end
-              // up in a situation where the threshold is in fact larger than
-              // the maximal refinement indicator. in such cases, we get no
-              // refinement at all. thus, cap the threshold by the actual
-              // largest value
-              double final_threshold =
-                std::min(interesting_range[0], global_min_and_max.second);
-              ierr = MPI_Bcast(&final_threshold,
-                               1,
-                               MPI_DOUBLE,
-                               master_mpi_rank,
-                               mpi_communicator);
-              AssertThrowMPI(ierr);
-
-              return final_threshold;
-            }
-
-          const double test_threshold =
-            (interesting_range[0] > 0 ?
-               std::sqrt(interesting_range[0] * interesting_range[1]) :
-               (interesting_range[0] + interesting_range[1]) / 2);
-
-          // accumulate the error of those our own elements above this threshold
-          // and then add to it the number for all the others
-          double my_error = 0;
-          for (unsigned int i = 0; i < criteria.size(); ++i)
-            if (criteria(i) > test_threshold)
-              my_error += criteria(i);
-
-          double total_error = 0.;
-
-          ierr = MPI_Reduce(&my_error,
-                            &total_error,
-                            1,
-                            MPI_DOUBLE,
-                            MPI_SUM,
-                            master_mpi_rank,
-                            mpi_communicator);
-          AssertThrowMPI(ierr);
 
-          // now adjust the range. if we have too many cells, we take the upper
-          // half of the previous range, otherwise the lower half. if we have
-          // hit the right number, then set the range to the exact value.
-          // slave nodes also update their own interesting_range, however their
-          // results are not significant since the values will be overwritten by
-          // MPI_Bcast from the master node in next loop.
-          if (total_error > target_error)
-            interesting_range[0] = test_threshold;
-          else if (total_error < target_error)
-            interesting_range[1] = test_threshold;
-          else
-            interesting_range[0] = interesting_range[1] = test_threshold;
-
-          // terminate the iteration after 25 go-arounds. this is
-          // necessary because oftentimes error indicators on cells
-          // have exactly the same value, and so there may not be a
-          // particular value that cuts the indicators in such a way
-          // that we can achieve the desired number of cells. using a
-          // max of 25 iterations means that we terminate the
-          // iteration after 25 steps if the indicators were perfectly
-          // badly distributed, and we make at most a mistake of
-          // 1/2^25 in the number of cells flagged if indicators are
-          // perfectly equidistributed
-          ++iteration;
-          if (iteration == 25)
-            interesting_range[0] = interesting_range[1] = test_threshold;
-        }
-      while (true);
 
-      Assert(false, ExcInternalError());
-      return -1;
-    }
-  } // namespace RefineAndCoarsenFixedFraction
-} // namespace
+        namespace RefineAndCoarsenFixedFraction
+        {
+          template <typename number>
+          number
+          compute_threshold(const dealii::Vector<number> &   criteria,
+                            const std::pair<double, double> &global_min_and_max,
+                            const double                     target_error,
+                            MPI_Comm                         mpi_communicator)
+          {
+            double interesting_range[2] = {global_min_and_max.first,
+                                           global_min_and_max.second};
+            adjust_interesting_range(interesting_range);
+
+            const unsigned int master_mpi_rank = 0;
+            unsigned int       iteration       = 0;
+
+            do
+              {
+                int ierr = MPI_Bcast(interesting_range,
+                                     2,
+                                     MPI_DOUBLE,
+                                     master_mpi_rank,
+                                     mpi_communicator);
+                AssertThrowMPI(ierr);
+
+                if (interesting_range[0] == interesting_range[1])
+                  {
+                    // so we have found our threshold. since we adjust the range
+                    // at the top of the function to be slightly larger than the
+                    // actual extremes of the refinement criteria values, we can
+                    // end up in a situation where the threshold is in fact
+                    // larger than the maximal refinement indicator. in such
+                    // cases, we get no refinement at all. thus, cap the
+                    // threshold by the actual largest value
+                    double final_threshold =
+                      std::min(interesting_range[0], global_min_and_max.second);
+                    ierr = MPI_Bcast(&final_threshold,
+                                     1,
+                                     MPI_DOUBLE,
+                                     master_mpi_rank,
+                                     mpi_communicator);
+                    AssertThrowMPI(ierr);
+
+                    return final_threshold;
+                  }
+
+                const double test_threshold =
+                  (interesting_range[0] > 0 ?
+                     std::sqrt(interesting_range[0] * interesting_range[1]) :
+                     (interesting_range[0] + interesting_range[1]) / 2);
+
+                // accumulate the error of those our own elements above this
+                // threshold and then add to it the number for all the others
+                double my_error = 0;
+                for (unsigned int i = 0; i < criteria.size(); ++i)
+                  if (criteria(i) > test_threshold)
+                    my_error += criteria(i);
+
+                double total_error = 0.;
+
+                ierr = MPI_Reduce(&my_error,
+                                  &total_error,
+                                  1,
+                                  MPI_DOUBLE,
+                                  MPI_SUM,
+                                  master_mpi_rank,
+                                  mpi_communicator);
+                AssertThrowMPI(ierr);
+
+                // now adjust the range. if we have too many cells, we take the
+                // upper half of the previous range, otherwise the lower half.
+                // if we have hit the right number, then set the range to the
+                // exact value. slave nodes also update their own
+                // interesting_range, however their results are not significant
+                // since the values will be overwritten by MPI_Bcast from the
+                // master node in next loop.
+                if (total_error > target_error)
+                  interesting_range[0] = test_threshold;
+                else if (total_error < target_error)
+                  interesting_range[1] = test_threshold;
+                else
+                  interesting_range[0] = interesting_range[1] = test_threshold;
+
+                // terminate the iteration after 25 go-arounds. this is
+                // necessary because oftentimes error indicators on cells
+                // have exactly the same value, and so there may not be a
+                // particular value that cuts the indicators in such a way
+                // that we can achieve the desired number of cells. using a
+                // max of 25 iterations means that we terminate the
+                // iteration after 25 steps if the indicators were perfectly
+                // badly distributed, and we make at most a mistake of
+                // 1/2^25 in the number of cells flagged if indicators are
+                // perfectly equidistributed
+                ++iteration;
+                if (iteration == 25)
+                  interesting_range[0] = interesting_range[1] = test_threshold;
+              }
+            while (true);
+
+            Assert(false, ExcInternalError());
+            return -1;
+          }
+        } // namespace RefineAndCoarsenFixedFraction
+      }   // namespace GridRefinement
+    }     // namespace distributed
+  }       // namespace parallel
+} // namespace internal
 
 
 
@@ -463,27 +467,31 @@ namespace parallel
         // figure out the global max and min of the indicators. we don't need it
         // here, but it's a collective communication call
         const std::pair<Number, Number> global_min_and_max =
-          compute_global_min_and_max_at_root(locally_owned_indicators,
-                                             mpi_communicator);
+          dealii::internal::parallel::distributed::GridRefinement::
+            compute_global_min_and_max_at_root(locally_owned_indicators,
+                                               mpi_communicator);
 
 
         double top_threshold, bottom_threshold;
-        top_threshold = RefineAndCoarsenFixedNumber::compute_threshold(
-          locally_owned_indicators,
-          global_min_and_max,
-          static_cast<unsigned int>(adjusted_fractions.first *
-                                    tria.n_global_active_cells()),
-          mpi_communicator);
+        top_threshold = dealii::internal::parallel::distributed::
+          GridRefinement::RefineAndCoarsenFixedNumber::compute_threshold(
+            locally_owned_indicators,
+            global_min_and_max,
+            static_cast<unsigned int>(adjusted_fractions.first *
+                                      tria.n_global_active_cells()),
+            mpi_communicator);
 
         // compute bottom threshold only if necessary. otherwise use a threshold
         // lower than the smallest value we have locally
         if (adjusted_fractions.second > 0)
-          bottom_threshold = RefineAndCoarsenFixedNumber::compute_threshold(
-            locally_owned_indicators,
-            global_min_and_max,
-            static_cast<unsigned int>(std::ceil(
-              (1 - adjusted_fractions.second) * tria.n_global_active_cells())),
-            mpi_communicator);
+          bottom_threshold = dealii::internal::parallel::distributed::
+            GridRefinement::RefineAndCoarsenFixedNumber::compute_threshold(
+              locally_owned_indicators,
+              global_min_and_max,
+              static_cast<unsigned int>(
+                std::ceil((1 - adjusted_fractions.second) *
+                          tria.n_global_active_cells())),
+              mpi_communicator);
         else
           {
             bottom_threshold =
@@ -527,25 +535,28 @@ namespace parallel
         // figure out the global max and min of the indicators. we don't need it
         // here, but it's a collective communication call
         const std::pair<double, double> global_min_and_max =
-          compute_global_min_and_max_at_root(locally_owned_indicators,
-                                             mpi_communicator);
+          dealii::internal::parallel::distributed::GridRefinement::
+            compute_global_min_and_max_at_root(locally_owned_indicators,
+                                               mpi_communicator);
 
         const double total_error =
           compute_global_sum(locally_owned_indicators, mpi_communicator);
         double top_threshold, bottom_threshold;
-        top_threshold = RefineAndCoarsenFixedFraction::compute_threshold(
-          locally_owned_indicators,
-          global_min_and_max,
-          top_fraction_of_error * total_error,
-          mpi_communicator);
-        // compute bottom threshold only if necessary. otherwise use a threshold
-        // lower than the smallest value we have locally
-        if (bottom_fraction_of_error > 0)
-          bottom_threshold = RefineAndCoarsenFixedFraction::compute_threshold(
+        top_threshold = dealii::internal::parallel::distributed::
+          GridRefinement::RefineAndCoarsenFixedFraction::compute_threshold(
             locally_owned_indicators,
             global_min_and_max,
-            (1 - bottom_fraction_of_error) * total_error,
+            top_fraction_of_error * total_error,
             mpi_communicator);
+        // compute bottom threshold only if necessary. otherwise use a threshold
+        // lower than the smallest value we have locally
+        if (bottom_fraction_of_error > 0)
+          bottom_threshold = dealii::internal::parallel::distributed::
+            GridRefinement::RefineAndCoarsenFixedFraction::compute_threshold(
+              locally_owned_indicators,
+              global_min_and_max,
+              (1 - bottom_fraction_of_error) * total_error,
+              mpi_communicator);
         else
           {
             bottom_threshold =
index acc289a0b90e74bca9e164910231f35167a20fa0..3588a6f19f46db383ba5782753463354938a2f82 100644 (file)
 
 
 
+for (S : REAL_SCALARS)
+  {
+    namespace internal
+    \{
+      namespace parallel
+      \{
+        namespace distributed
+        \{
+          namespace GridRefinement
+          \{
+            template std::pair<S, S>
+            compute_global_min_and_max_at_root<S>(const dealii::Vector<S> &,
+                                                  MPI_Comm);
+
+            namespace RefineAndCoarsenFixedNumber
+            \{
+              template S
+              compute_threshold<S>(const dealii::Vector<S> &,
+                                   const std::pair<double, double> &,
+                                   const unsigned int,
+                                   MPI_Comm);
+            \}
+            namespace RefineAndCoarsenFixedFraction
+            \{
+              template S
+              compute_threshold<S>(const dealii::Vector<S> &,
+                                   const std::pair<double, double> &,
+                                   const double,
+                                   MPI_Comm);
+            \}
+          \}
+        \}
+      \}
+    \}
+  }
+
+
+
 for (S : REAL_SCALARS; deal_II_dimension : DIMENSIONS)
   {
     namespace parallel

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.