#include <numeric>
#include <algorithm>
#include <limits>
+#include <iomanip>
DEAL_II_NAMESPACE_OPEN
* others get a pair of zeros.
*/
template <typename number>
- std::pair<double,double>
+ std::pair<number,number>
compute_global_min_and_max_at_root (const Vector<number> &criteria,
MPI_Comm mpi_communicator)
{
void
get_locally_owned_indicators (const parallel::distributed::Triangulation<dim,spacedim> &tria,
const Vector &criteria,
- dealii::Vector<float> &locally_owned_indicators)
+ dealii::Vector<typename Vector::value_type> &locally_owned_indicators)
{
Assert (locally_owned_indicators.size() == tria.n_locally_owned_active_cells(),
ExcInternalError());
0, mpi_communicator);
if (interesting_range[0] == interesting_range[1])
- return interesting_range[0];
+ {
+ // so we have found our threshold. since we adjust
+ // the range at the top of the function to be slightly
+ // larger than the actual extremes of the refinement
+ // criteria values, we can end up in a situation where
+ // the threshold is in fact larger than the maximal
+ // refinement indicator. in such cases, we get no
+ // refinement at all. thus, cap the threshold by the
+ // actual largest value
+ double final_threshold = std::min (interesting_range[0],
+ global_min_and_max.second);
+ MPI_Bcast (&final_threshold, 1, MPI_DOUBLE,
+ 0, mpi_communicator);
+
+ return final_threshold;
+ }
const double test_threshold
= (interesting_range[0] > 0
0, mpi_communicator);
if (interesting_range[0] == interesting_range[1])
- return interesting_range[0];
+ {
+ // we got the final value. the master adjusts the
+ // value one more time, so receive it from there
+ double final_threshold = -1;
+ MPI_Bcast (&final_threshold, 1, MPI_DOUBLE,
+ 0, mpi_communicator);
+
+ return final_threshold;
+ }
// count how many elements
// there are that are bigger
// vector of indicators the
// ones that correspond to
// cells that we locally own
- dealii::Vector<float>
+ dealii::Vector<typename Vector::value_type>
locally_owned_indicators (tria.n_locally_owned_active_cells());
get_locally_owned_indicators (tria,
criteria,
// need it here, but it's a
// collective communication
// call
- const std::pair<double,double> global_min_and_max
+ const std::pair<typename Vector::value_type,typename Vector::value_type> global_min_and_max
= compute_global_min_and_max_at_root (locally_owned_indicators,
mpi_communicator);
// vector of indicators the
// ones that correspond to
// cells that we locally own
- dealii::Vector<float>
+ dealii::Vector<typename Vector::value_type>
locally_owned_indicators (tria.n_locally_owned_active_cells());
get_locally_owned_indicators (tria,
criteria,
top_fraction_of_error *
total_error,
mpi_communicator);
-
// compute bottom
// threshold only if
// necessary. otherwise
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// A testcase by Andrea Bonito (in modified form): Given a particular
+// set of refinement indicators, we get no refinement at all because
+// the threshold is computed to be *larger* than the largest
+// refinement indicator
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/tensor.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/grid_refinement.h>
+#include <deal.II/grid/grid_refinement.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_boundary_lib.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/base/utilities.h>
+
+
+#include <fstream>
+
+
+void test()
+{
+ const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ // create a mesh with 120 cells (because that's what Andrea's
+ // original testcase had)
+ parallel::distributed::Triangulation<2> triangulation(MPI_COMM_WORLD);
+ std::vector<unsigned int> subdivisions(2);
+ subdivisions[0] = 120;
+ subdivisions[1] = 1;
+ GridGenerator::subdivided_hyper_rectangle (triangulation,
+ subdivisions,
+ Point<2>(),
+ Point<2>(1,1));
+ // initialize the refinement indicators with a set of particular values from the original
+ // testcase
+ const double values[] =
+ {
+ 1.48589e-08,
+ 3.31859e-06,
+ 3.31859e-06,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 0.0025918,
+ 3.31859e-06,
+ 1.48589e-08,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 1.48589e-08,
+ 3.31859e-06,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 0.0025918,
+ 3.31859e-06,
+ 3.31859e-06,
+ 1.48589e-08,
+ 0.0025918,
+ 0.0025918,
+ 3.31859e-06,
+ 3.31859e-06,
+ 0.0025918,
+ 0.0025918,
+ 1.48589e-08,
+ 3.31859e-06,
+ 3.31859e-06,
+ 0.0025918,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 3.31859e-06,
+ 1.48589e-08,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 1.48589e-08,
+ 3.31859e-06,
+ 0.0025918,
+ 0.0025918,
+ 3.31859e-06,
+ 0.0025918,
+ 3.31859e-06,
+ 3.31859e-06,
+ 1.48589e-08,
+ 0.0120662,
+ 0.0446999,
+ 0.0446999,
+ 0.0644361,
+ 0.0446999,
+ 0.0644361,
+ 0.0644361,
+ 3.14051,
+ 0.0446999,
+ 0.0120662,
+ 0.0644361,
+ 0.0446999,
+ 0.0644361,
+ 0.0446999,
+ 3.14051,
+ 0.0644361,
+ 0.0446999,
+ 0.0644361,
+ 0.0120662,
+ 0.0446999,
+ 0.0644361,
+ 3.14051,
+ 0.0446999,
+ 0.0644361,
+ 0.0644361,
+ 0.0446999,
+ 0.0446999,
+ 0.0120662,
+ 3.14051,
+ 0.0644361,
+ 0.0644361,
+ 0.0446999,
+ 0.0446999,
+ 0.0644361,
+ 0.0644361,
+ 3.14051,
+ 0.0120662,
+ 0.0446999,
+ 0.0446999,
+ 0.0644361,
+ 0.0644361,
+ 0.0446999,
+ 3.14051,
+ 0.0644361,
+ 0.0446999,
+ 0.0120662,
+ 0.0644361,
+ 0.0446999,
+ 0.0644361,
+ 3.14051,
+ 0.0446999,
+ 0.0644361,
+ 0.0446999,
+ 0.0644361,
+ 0.0120662,
+ 0.0446999,
+ 3.14051,
+ 0.0644361,
+ 0.0644361,
+ 0.0446999,
+ 0.0644361,
+ 0.0446999,
+ 0.0446999,
+ 0.0120662
+ };
+
+ Vector<double> estimated_error_per_cell(&values[0],
+ &values[0] + 120);
+
+ deallog << "Number of cells before refinement: " << triangulation.n_active_cells()
+ << std::endl;
+
+ parallel::distributed::GridRefinement::
+ refine_and_coarsen_fixed_fraction (triangulation,
+ estimated_error_per_cell,
+ 0.3, 0.0);
+ triangulation.execute_coarsening_and_refinement ();
+
+ deallog << "Number of cells after refinement: " << triangulation.n_active_cells()
+ << std::endl;
+
+}
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+
+ deallog.push(Utilities::int_to_string(myid));
+
+ if (myid == 0)
+ {
+ std::ofstream logfile("output");
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ test();
+ }
+ else
+ test();
+
+}