]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Minor cleanup of hp::DoFHandler. 7933/head
authorMarc Fehling <marc.fehling@gmx.net>
Wed, 17 Apr 2019 20:16:37 +0000 (22:16 +0200)
committerMarc Fehling <marc.fehling@gmx.net>
Fri, 10 May 2019 14:51:11 +0000 (16:51 +0200)
include/deal.II/hp/dof_handler.h
source/dofs/dof_handler_policy.cc
source/hp/dof_handler.cc
tests/mpi/hp_cell_weights_04.cc [new file with mode: 0644]
tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output [new file with mode: 0644]
tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output.1 [new file with mode: 0644]
tests/mpi/hp_refinement_01.cc
tests/mpi/hp_refinement_02.cc

index 6a11be2f7abeb07bdffb7adab593bfaaaca97428..064503e3580891604a8075b9fa8b27353870e675 100644 (file)
@@ -1122,7 +1122,8 @@ namespace hp
 
     /**
      * A function that will be triggered through a triangulation
-     * signal just before the associated Triangulation is modified.
+     * signal just before the associated Triangulation or
+     * parallel::shared::Triangulation is modified.
      *
      * The function that stores the active_fe_indices of all cells that will
      * be refined or coarsened before the refinement happens, so that
@@ -1131,18 +1132,6 @@ namespace hp
     void
     pre_active_fe_index_transfer();
 
-    /**
-     * A function that will be triggered through a triangulation
-     * signal just before the associated parallel::shared::Triangulation is
-     * modified.
-     *
-     * The function that stores the active_fe_indices of all cells that will
-     * be refined or coarsened before the refinement happens, so that
-     * they can be set again after refinement.
-     */
-    void
-    pre_shared_active_fe_index_transfer();
-
     /**
      * A function that will be triggered through a triangulation
      * signal just before the associated parallel::distributed::Triangulation is
@@ -1156,7 +1145,8 @@ namespace hp
 
     /**
      * A function that will be triggered through a triangulation
-     * signal just after the associated Triangulation is modified.
+     * signal just after the associated Triangulation or
+     * parallel::shared::Triangulation is modified.
      *
      * The function that restores the active_fe_indices of all cells that
      * were refined or coarsened.
@@ -1164,17 +1154,6 @@ namespace hp
     void
     post_active_fe_index_transfer();
 
-    /**
-     * A function that will be triggered through a triangulation
-     * signal just after the associated parallel::shared::Triangulation is
-     * modified.
-     *
-     * The function that restores the active_fe_indices of all cells that
-     * were refined or coarsened.
-     */
-    void
-    post_shared_active_fe_index_transfer();
-
     /**
      * A function that will be triggered through a triangulation
      * signal just after the associated parallel::distributed::Triangulation is
index 671309ccf56f36b1832a67ed2f6729cd5165b88f..a2035437998458d4d23ce3d385e92960bbc08065 100644 (file)
@@ -3705,16 +3705,12 @@ namespace internal
           {
             saved_subdomain_ids.resize(tr->n_active_cells());
 
-            typename parallel::shared::Triangulation<dim, spacedim>::
-              active_cell_iterator
-                cell = this->dof_handler->get_triangulation().begin_active(),
-                endc = this->dof_handler->get_triangulation().end();
-
             const std::vector<types::subdomain_id> &true_subdomain_ids =
               tr->get_true_subdomain_ids_of_cells();
 
-            for (unsigned int index = 0; cell != endc; ++cell, ++index)
+            for (const auto &cell : tr->active_cell_iterators())
               {
+                const unsigned int index   = cell->active_cell_index();
                 saved_subdomain_ids[index] = cell->subdomain_id();
                 cell->set_subdomain_id(true_subdomain_ids[index]);
               }
@@ -3834,15 +3830,9 @@ namespace internal
 
         // finally, restore current subdomain ids
         if (tr->with_artificial_cells())
-          {
-            typename parallel::shared::Triangulation<dim, spacedim>::
-              active_cell_iterator
-                cell = this->dof_handler->get_triangulation().begin_active(),
-                endc = this->dof_handler->get_triangulation().end();
-
-            for (unsigned int index = 0; cell != endc; ++cell, ++index)
-              cell->set_subdomain_id(saved_subdomain_ids[index]);
-          }
+          for (const auto &cell : tr->active_cell_iterators())
+            cell->set_subdomain_id(
+              saved_subdomain_ids[cell->active_cell_index()]);
 
         // return a NumberCache object made up from the sets of locally
         // owned DoFs
index 78d5017b2238dabf81ca8c56caf8f1582feba58a..329a7c2ef0991f86a5614d21626e59a52b5f5518 100644 (file)
@@ -961,7 +961,13 @@ namespace internal
         /**
          * Given a hp::DoFHandler object, make sure that the active_fe_indices
          * that a user has set for locally owned cells are communicated to all
-         * ghost cells as well.
+         * other relevant cells as well.
+         *
+         * For parallel::shared::Triangulation objects,
+         * this information is distributed on both ghost and artificial cells.
+         *
+         * In case a parallel::distributed::Triangulation is used,
+         * active_fe_indices are communicated only to ghost cells.
          */
         template <int dim, int spacedim>
         static void
@@ -994,14 +1000,14 @@ namespace internal
                                   tr->get_communicator(),
                                   active_fe_indices);
 
-              // now go back and fill the active_fe_index on ghost
+              // now go back and fill the active_fe_index on all other
               // cells. we would like to call cell->set_active_fe_index(),
               // but that function does not allow setting these indices on
               // non-locally_owned cells. so we have to work around the
               // issue a little bit by accessing the underlying data
               // structures directly
               for (const auto &cell : dof_handler.active_cell_iterators())
-                if (cell->is_ghost())
+                if (!cell->is_locally_owned())
                   dof_handler.levels[cell->level()]->set_active_fe_index(
                     cell->index(),
                     active_fe_indices[cell->active_cell_index()]);
@@ -1191,12 +1197,9 @@ namespace internal
                    ++child_index)
                 {
                   const auto &child = parent->child(child_index);
-
-                  if (child->is_locally_owned())
-                    {
-                      Assert(child->active(), ExcInternalError());
-                      child->set_active_fe_index(pair.second);
-                    }
+                  Assert(child->is_locally_owned() && child->active(),
+                         ExcInternalError());
+                  child->set_active_fe_index(pair.second);
                 }
             }
 
@@ -1205,12 +1208,9 @@ namespace internal
           for (const auto &pair : fe_transfer->coarsened_cells_fe_index)
             {
               const auto &cell = pair.first;
-
-              if (cell->is_locally_owned())
-                {
-                  Assert(cell->active(), ExcInternalError());
-                  cell->set_active_fe_index(pair.second);
-                }
+              Assert(cell->is_locally_owned() && cell->active(),
+                     ExcInternalError());
+              cell->set_active_fe_index(pair.second);
             }
         }
       };
@@ -1537,7 +1537,7 @@ namespace hp
     // stored as protected data of this object, but for simplicity we
     // use the cell-wise access.
     for (const auto &cell : active_cell_iterators())
-      if (cell->is_locally_owned())
+      if (!cell->is_artificial())
         active_fe_indices[cell->active_cell_index()] = cell->active_fe_index();
   }
 
@@ -1571,6 +1571,15 @@ namespace hp
   void
   DoFHandler<dim, spacedim>::set_fe(const hp::FECollection<dim, spacedim> &ff)
   {
+    Assert(
+      tria != nullptr,
+      ExcMessage(
+        "You need to set the Triangulation in the DoFHandler using initialize() or "
+        "in the constructor before you can distribute DoFs."));
+    Assert(tria->n_levels() > 0,
+           ExcMessage("The Triangulation you are using is empty!"));
+    Assert(ff.size() > 0, ExcMessage("The hp::FECollection given is empty!"));
+
     // don't create a new object if the one we have is already appropriate
     if (fe_collection != ff)
       fe_collection = hp::FECollection<dim, spacedim>(ff);
@@ -1578,6 +1587,9 @@ namespace hp
     // ensure that the active_fe_indices vectors are initialized correctly
     create_active_fe_table();
 
+    // initialize all p-refinement and p-coarsening flags
+    create_p_adaptation_flags();
+
     // make sure every processor knows the active_fe_indices
     // on both its own cells and all ghost cells
     dealii::internal::hp::DoFHandlerImplementation::Implementation::
@@ -1586,13 +1598,10 @@ namespace hp
     // make sure that the fe collection is large enough to
     // cover all fe indices presently in use on the mesh
     for (const auto &cell : active_cell_iterators())
-      if (cell->is_locally_owned())
+      if (!cell->is_artificial())
         Assert(cell->active_fe_index() < fe_collection.size(),
                ExcInvalidFEIndex(cell->active_fe_index(),
                                  fe_collection.size()));
-
-    // initialize all p-refinement and p-coarsening flags
-    create_p_adaptation_flags();
   }
 
 
@@ -1602,53 +1611,40 @@ namespace hp
   DoFHandler<dim, spacedim>::distribute_dofs(
     const hp::FECollection<dim, spacedim> &ff)
   {
-    Assert(
-      tria != nullptr,
-      ExcMessage(
-        "You need to set the Triangulation in the DoFHandler using initialize() or "
-        "in the constructor before you can distribute DoFs."));
-    Assert(tria->n_levels() > 0,
-           ExcMessage("The Triangulation you are using is empty!"));
-    Assert(ff.size() > 0, ExcMessage("The hp::FECollection given is empty!"));
+    // assign the fe_collection and initialize all active_fe_indices
+    set_fe(ff);
 
     // If an underlying shared::Tria allows artificial cells,
     // then save the current set of subdomain ids, and set
     // subdomain ids to the "true" owner of each cell. we later
     // restore these flags
-    std::vector<types::subdomain_id> saved_subdomain_ids;
-    if (const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
-          (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
-            &get_triangulation())))
-      if (shared_tria->with_artificial_cells())
-        {
-          saved_subdomain_ids.resize(shared_tria->n_active_cells());
-
-          const std::vector<types::subdomain_id> &true_subdomain_ids =
-            shared_tria->get_true_subdomain_ids_of_cells();
+    std::vector<types::subdomain_id>                      saved_subdomain_ids;
+    const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
+      (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
+        &get_triangulation()));
+    if (shared_tria != nullptr && shared_tria->with_artificial_cells())
+      {
+        saved_subdomain_ids.resize(shared_tria->n_active_cells());
 
-          for (const auto &cell : shared_tria->active_cell_iterators())
-            {
-              const unsigned int index   = cell->active_cell_index();
-              saved_subdomain_ids[index] = cell->subdomain_id();
-              cell->set_subdomain_id(true_subdomain_ids[index]);
-            }
-        }
+        const std::vector<types::subdomain_id> &true_subdomain_ids =
+          shared_tria->get_true_subdomain_ids_of_cells();
 
-    // assign the fe_collection and initialize all active_fe_indices
-    set_fe(ff);
+        for (const auto &cell : shared_tria->active_cell_iterators())
+          {
+            const unsigned int index   = cell->active_cell_index();
+            saved_subdomain_ids[index] = cell->subdomain_id();
+            cell->set_subdomain_id(true_subdomain_ids[index]);
+          }
+      }
 
     // then allocate space for all the other tables
     dealii::internal::hp::DoFHandlerImplementation::Implementation::
       reserve_space(*this);
 
     // now undo the subdomain modification
-    if (const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
-          (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
-            &get_triangulation())))
-      if (shared_tria->with_artificial_cells())
-        for (const auto &cell : shared_tria->active_cell_iterators())
-          cell->set_subdomain_id(
-            saved_subdomain_ids[cell->active_cell_index()]);
+    if (shared_tria != nullptr && shared_tria->with_artificial_cells())
+      for (const auto &cell : shared_tria->active_cell_iterators())
+        cell->set_subdomain_id(saved_subdomain_ids[cell->active_cell_index()]);
 
 
     // Clear user flags because we will need them. But first we save
@@ -1709,6 +1705,7 @@ namespace hp
           internal::DoFHandlerImplementation::Policy::ParallelDistributed<
             DoFHandler<dim, spacedim>>>(*this);
 
+        // repartitioning signals
         tria_listeners.push_back(
           this->tria->signals.pre_distributed_repartition.connect(std::bind(
             &DoFHandler<dim,
@@ -1720,6 +1717,7 @@ namespace hp
                         spacedim>::post_distributed_active_fe_index_transfer,
             std::ref(*this))));
 
+        // refinement signals
         tria_listeners.push_back(
           this->tria->signals.pre_distributed_refinement.connect(std::bind(
             &DoFHandler<dim,
@@ -1731,36 +1729,28 @@ namespace hp
                         spacedim>::post_distributed_active_fe_index_transfer,
             std::ref(*this))));
 
+        // serialization signals
         tria_listeners.push_back(
           this->tria->signals.post_distributed_save.connect(
             std::bind(&DoFHandler<dim, spacedim>::
                         post_distributed_serialization_of_active_fe_indices,
                       std::ref(*this))));
       }
-    else if (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>
-                            *>(&this->get_triangulation()) != nullptr)
-      {
-        policy =
-          std_cxx14::make_unique<internal::DoFHandlerImplementation::Policy::
-                                   ParallelShared<DoFHandler<dim, spacedim>>>(
-            *this);
-
-        tria_listeners.push_back(
-          this->tria->signals.pre_refinement.connect(std::bind(
-            &DoFHandler<dim, spacedim>::pre_shared_active_fe_index_transfer,
-            std::ref(*this))));
-        tria_listeners.push_back(
-          this->tria->signals.post_refinement.connect(std::bind(
-            &DoFHandler<dim, spacedim>::post_shared_active_fe_index_transfer,
-            std::ref(*this))));
-      }
     else
       {
-        policy =
-          std_cxx14::make_unique<internal::DoFHandlerImplementation::Policy::
-                                   Sequential<DoFHandler<dim, spacedim>>>(
-            *this);
+        if (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>
+                           *>(&this->get_triangulation()) != nullptr)
+          policy =
+            std_cxx14::make_unique<internal::DoFHandlerImplementation::Policy::
+                                     ParallelShared<DoFHandler<dim, spacedim>>>(
+              *this);
+        else
+          policy =
+            std_cxx14::make_unique<internal::DoFHandlerImplementation::Policy::
+                                     Sequential<DoFHandler<dim, spacedim>>>(
+              *this);
 
+        // refinement signals
         tria_listeners.push_back(this->tria->signals.pre_refinement.connect(
           std::bind(&DoFHandler<dim, spacedim>::pre_active_fe_index_transfer,
                     std::ref(*this))));
@@ -2021,72 +2011,6 @@ namespace hp
 
 
 
-  template <int dim, int spacedim>
-  void
-  DoFHandler<dim, spacedim>::pre_shared_active_fe_index_transfer()
-  {
-#ifndef DEAL_II_WITH_MPI
-    Assert(false, ExcInternalError());
-#else
-    // Finite elements need to be assigned to each cell by calling
-    // distribute_dofs() first to make this functionality available.
-    if (fe_collection.size() > 0)
-      {
-        Assert(active_fe_index_transfer == nullptr, ExcInternalError());
-
-        active_fe_index_transfer =
-          std_cxx14::make_unique<ActiveFEIndexTransfer>();
-
-        // If the underlying shared::Tria allows artificial cells,
-        // then save the current set of subdomain ids, and set
-        // subdomain ids to the "true" owner of each cell. We later
-        // restore these flags.
-        const parallel::shared::Triangulation<dim, spacedim> *shared_tria =
-          (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
-            &(*tria)));
-        Assert(shared_tria != nullptr, ExcInternalError());
-
-        std::vector<types::subdomain_id> saved_subdomain_ids;
-        if (shared_tria->with_artificial_cells())
-          {
-            saved_subdomain_ids.resize(shared_tria->n_active_cells());
-
-            const std::vector<types::subdomain_id> &true_subdomain_ids =
-              shared_tria->get_true_subdomain_ids_of_cells();
-
-            for (const auto &cell : active_cell_iterators())
-              {
-                const unsigned int index   = cell->active_cell_index();
-                saved_subdomain_ids[index] = cell->subdomain_id();
-                cell->set_subdomain_id(true_subdomain_ids[index]);
-              }
-
-            // Make sure every processor knows the active_fe_indices
-            // on both its own cells and all ghost cells.
-            dealii::internal::hp::DoFHandlerImplementation::Implementation::
-              communicate_active_fe_indices(*this);
-          }
-
-        // Now do what we would do in the sequential case.
-        dealii::internal::hp::DoFHandlerImplementation::Implementation::
-          collect_fe_indices_on_cells_to_be_refined(*this);
-
-        // Finally, restore current subdomain_ids.
-        if (shared_tria->with_artificial_cells())
-          for (const auto &cell : active_cell_iterators())
-            {
-              if (cell->is_artificial())
-                cell->set_subdomain_id(numbers::invalid_subdomain_id);
-              else
-                cell->set_subdomain_id(
-                  saved_subdomain_ids[cell->active_cell_index()]);
-            }
-      }
-#endif
-  }
-
-
-
   template <int dim, int spacedim>
   void
   DoFHandler<dim, spacedim>::pre_distributed_active_fe_index_transfer()
@@ -2133,9 +2057,15 @@ namespace hp
           for (unsigned int child_index = 0;
                child_index < pair.first->n_children();
                ++child_index)
-            active_fe_index_transfer
-              ->active_fe_indices[pair.first->child(child_index)
-                                    ->active_cell_index()] = pair.second;
+            {
+              // Make sure that all children belong to the same subdomain.
+              Assert(pair.first->child(child_index)->is_locally_owned(),
+                     ExcInternalError());
+
+              active_fe_index_transfer
+                ->active_fe_indices[pair.first->child(child_index)
+                                      ->active_cell_index()] = pair.second;
+            }
 
         // Create transfer object and attach to it.
         const auto *distributed_tria = dynamic_cast<
@@ -2171,48 +2101,18 @@ namespace hp
       {
         Assert(active_fe_index_transfer != nullptr, ExcInternalError());
 
-        // For Triangulation and p::s::Triangulation, the old cell iterators
-        // are still valid. There is no need to transfer data in this case,
-        // and we can re-use our previously gathered information from the
-        // container.
-
-        dealii::internal::hp::DoFHandlerImplementation::Implementation::
-          distribute_fe_indices_on_refined_cells(*this);
-
-        // Free memory.
-        active_fe_index_transfer.reset();
-      }
-  }
-
-
-
-  template <int dim, int spacedim>
-  void
-  DoFHandler<dim, spacedim>::post_shared_active_fe_index_transfer()
-  {
-#ifndef DEAL_II_WITH_MPI
-    Assert(false, ExcInternalError());
-#else
-    // Finite elements need to be assigned to each cell by calling
-    // distribute_dofs() first to make this functionality available.
-    if (fe_collection.size() > 0)
-      {
-        Assert(active_fe_index_transfer != nullptr, ExcInternalError());
-
-        // Do what we normally do in the sequential case.
         dealii::internal::hp::DoFHandlerImplementation::Implementation::
           distribute_fe_indices_on_refined_cells(*this);
 
         // We have to distribute the information about active_fe_indices
-        // on all processors, if a parallel::shared::Triangulation
-        // has been used.
+        // of all cells (including the artificial ones) on all processors,
+        // if a parallel::shared::Triangulation has been used.
         dealii::internal::hp::DoFHandlerImplementation::Implementation::
           communicate_active_fe_indices(*this);
 
         // Free memory.
         active_fe_index_transfer.reset();
       }
-#endif
   }
 
 
diff --git a/tests/mpi/hp_cell_weights_04.cc b/tests/mpi/hp_cell_weights_04.cc
new file mode 100644 (file)
index 0000000..3b0461b
--- /dev/null
@@ -0,0 +1,122 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Cell Weights Test
+// -----------------
+// Create a 4x4(x4) grid, on which all cells are associated with a Q1
+// element besides the very first one, which has a Q5 element.
+// We choose a cell weighting algorithm based on the number of degrees
+// of freedom and check if load is balanced as expected after
+// repartitioning the triangulation. The expected accumulated weight on
+// each processor should correlate to the sum of all degrees of
+// freedom on all cells of the corresponding subdomain.
+// We employ a large proportionality factor on our weighting function
+// to neglect the standard weight of '1000' per cell.
+//
+// This test works on a parallel::shared::Triangulation with METIS
+// as a partitioner. Cell weighting with ZOLTAN was not available
+// during the time this test was written.
+//
+// We consider aritifical cells in this case.
+
+
+#include <deal.II/distributed/cell_weights.h>
+#include <deal.II/distributed/shared_tria.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/hp/dof_handler.h>
+
+#include "../tests.h"
+
+
+template <int dim>
+void
+test()
+{
+  parallel::shared::Triangulation<dim> tria(
+    MPI_COMM_WORLD,
+    ::Triangulation<dim>::none,
+    true,
+    parallel::shared::Triangulation<dim>::Settings::partition_metis);
+
+  GridGenerator::hyper_cube(tria);
+  tria.refine_global(2);
+
+  // Apply ndof cell weights.
+  hp::FECollection<dim> fe_collection;
+  fe_collection.push_back(FE_Q<dim>(1));
+  fe_collection.push_back(FE_Q<dim>(5));
+
+  hp::DoFHandler<dim> dh(tria);
+  dh.set_fe(fe_collection);
+  // default: active_fe_index = 0
+  for (auto &cell : dh.active_cell_iterators())
+    if (cell->is_locally_owned())
+      if (cell->id().to_string() == "0_2:00")
+        cell->set_active_fe_index(1);
+
+  deallog << "Number of cells before repartitioning: "
+          << tria.n_locally_owned_active_cells() << std::endl;
+  {
+    unsigned int dof_counter = 0;
+    for (auto &cell : dh.active_cell_iterators())
+      if (cell->is_locally_owned())
+        dof_counter += cell->get_fe().dofs_per_cell;
+    deallog << "  Cumulative dofs per cell: " << dof_counter << std::endl;
+  }
+
+
+  parallel::CellWeights<dim> cell_weights(dh);
+  cell_weights.register_ndofs_weighting(100000);
+
+  // we didn't mark any cells, but we want to repartition our domain
+  tria.execute_coarsening_and_refinement();
+
+
+  deallog << "Number of cells after repartitioning: "
+          << tria.n_locally_owned_active_cells() << std::endl;
+  {
+    unsigned int dof_counter = 0;
+    for (auto &cell : dh.active_cell_iterators())
+      if (cell->is_locally_owned())
+        dof_counter += cell->get_fe().dofs_per_cell;
+    deallog << "  Cumulative dofs per cell: " << dof_counter << std::endl;
+  }
+
+  // make sure no processor is hanging
+  MPI_Barrier(MPI_COMM_WORLD);
+
+  deallog << "OK" << std::endl;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+  MPILogInitAll                    log;
+
+  deallog.push("2d");
+  test<2>();
+  deallog.pop();
+  deallog.push("3d");
+  test<3>();
+  deallog.pop();
+}
diff --git a/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output b/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..d1d118c
--- /dev/null
@@ -0,0 +1,23 @@
+
+DEAL:0:2d::Number of cells before repartitioning: 8
+DEAL:0:2d::  Cumulative dofs per cell: 32
+DEAL:0:2d::Number of cells after repartitioning: 13
+DEAL:0:2d::  Cumulative dofs per cell: 52
+DEAL:0:2d::OK
+DEAL:0:3d::Number of cells before repartitioning: 33
+DEAL:0:3d::  Cumulative dofs per cell: 264
+DEAL:0:3d::Number of cells after repartitioning: 47
+DEAL:0:3d::  Cumulative dofs per cell: 376
+DEAL:0:3d::OK
+
+DEAL:1:2d::Number of cells before repartitioning: 8
+DEAL:1:2d::  Cumulative dofs per cell: 64
+DEAL:1:2d::Number of cells after repartitioning: 3
+DEAL:1:2d::  Cumulative dofs per cell: 44
+DEAL:1:2d::OK
+DEAL:1:3d::Number of cells before repartitioning: 31
+DEAL:1:3d::  Cumulative dofs per cell: 456
+DEAL:1:3d::Number of cells after repartitioning: 17
+DEAL:1:3d::  Cumulative dofs per cell: 344
+DEAL:1:3d::OK
+
diff --git a/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output.1 b/tests/mpi/hp_cell_weights_04.with_mpi=true.with_metis=true.mpirun=2.output.1
new file mode 100644 (file)
index 0000000..f42d31e
--- /dev/null
@@ -0,0 +1,23 @@
+
+DEAL:0:2d::Number of cells before repartitioning: 8
+DEAL:0:2d::  Cumulative dofs per cell: 64
+DEAL:0:2d::Number of cells after repartitioning: 13
+DEAL:0:2d::  Cumulative dofs per cell: 52
+DEAL:0:2d::OK
+DEAL:0:3d::Number of cells before repartitioning: 32
+DEAL:0:3d::  Cumulative dofs per cell: 256
+DEAL:0:3d::Number of cells after repartitioning: 47
+DEAL:0:3d::  Cumulative dofs per cell: 376
+DEAL:0:3d::OK
+
+DEAL:1:2d::Number of cells before repartitioning: 8
+DEAL:1:2d::  Cumulative dofs per cell: 32
+DEAL:1:2d::Number of cells after repartitioning: 3
+DEAL:1:2d::  Cumulative dofs per cell: 44
+DEAL:1:2d::OK
+DEAL:1:3d::Number of cells before repartitioning: 32
+DEAL:1:3d::  Cumulative dofs per cell: 464
+DEAL:1:3d::Number of cells after repartitioning: 17
+DEAL:1:3d::  Cumulative dofs per cell: 344
+DEAL:1:3d::OK
+
index 610511c50ad9a542d196d7a410bd60ffdee6c23a..129ec7436cf94447aca9a8bcbcbb0e4674217184 100644 (file)
@@ -88,7 +88,7 @@ test()
         }
     }
 
-  dh.distribute_dofs(fe_collection);
+  dh.set_fe(fe_collection);
 
   // ----- refine -----
   tria.execute_coarsening_and_refinement();
index e00386c29db1a59b1c102314ee44d06a9249263c..06e51dc0474380ac4c7a7229566b5b2f9fd5060e 100644 (file)
@@ -88,7 +88,7 @@ test()
         }
     }
 
-  dh.distribute_dofs(fe_collection);
+  dh.set_fe(fe_collection);
 
   // ----- refine -----
   tria.execute_coarsening_and_refinement();

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.