From 975dbe9ca29a21864b9f61eddadc509c1e169783 Mon Sep 17 00:00:00 2001 From: tcclevenger Date: Tue, 19 Sep 2017 19:19:14 -0400 Subject: [PATCH] add distribute_mg_dofs for shared::Triangulation --- .../changes/major/20171010ConradClevenger | 3 + include/deal.II/distributed/shared_tria.h | 41 +- include/deal.II/distributed/tria.h | 1 - include/deal.II/distributed/tria_base.h | 5 + .../deal.II/multigrid/mg_transfer.templates.h | 10 +- source/distributed/shared_tria.cc | 39 +- source/distributed/tria.cc | 70 +-- source/distributed/tria_base.cc | 74 +++ source/dofs/dof_handler_policy.cc | 228 ++++++- source/multigrid/mg_transfer_internal.cc | 6 +- tests/multigrid/step-50_02.cc | 578 ++++++++++++++++++ ...pi=true.with_trilinos=true.mpirun=3.output | 13 + tests/sharedtria/tria_mg_dof_01.cc | 148 +++++ ..._mg_dof_01.mpirun=3.with_p4est=true.output | 8 + tests/sharedtria/tria_mg_dof_02.cc | 121 ++++ .../sharedtria/tria_mg_dof_02.mpirun=3.output | 155 +++++ 16 files changed, 1415 insertions(+), 85 deletions(-) create mode 100644 doc/news/changes/major/20171010ConradClevenger create mode 100644 tests/multigrid/step-50_02.cc create mode 100644 tests/multigrid/step-50_02.with_mpi=true.with_trilinos=true.mpirun=3.output create mode 100644 tests/sharedtria/tria_mg_dof_01.cc create mode 100644 tests/sharedtria/tria_mg_dof_01.mpirun=3.with_p4est=true.output create mode 100644 tests/sharedtria/tria_mg_dof_02.cc create mode 100644 tests/sharedtria/tria_mg_dof_02.mpirun=3.output diff --git a/doc/news/changes/major/20171010ConradClevenger b/doc/news/changes/major/20171010ConradClevenger new file mode 100644 index 0000000000..0db855b1c6 --- /dev/null +++ b/doc/news/changes/major/20171010ConradClevenger @@ -0,0 +1,3 @@ +New: The function distribute_mg_dofs has been written for a parallel::shared::Triangulation. This allows for geometric multigrid computations on an adaptively refined mesh using a shared triangulation with the possibility of a user defined partitioning of the active and level cells. +
+(Conrad Clevenger, 2017/10/10) diff --git a/include/deal.II/distributed/shared_tria.h b/include/deal.II/distributed/shared_tria.h index 20d865ea79..930b5797a9 100644 --- a/include/deal.II/distributed/shared_tria.h +++ b/include/deal.II/distributed/shared_tria.h @@ -262,6 +262,16 @@ namespace parallel */ const std::vector &get_true_subdomain_ids_of_cells() const; + /** + * Return a vector of length Triangulation::n_cells(level) where each + * element stores the level subdomain id of the owner of this cell. The + * elements of the vector are obviously the same as the level subdomain ids + * for locally owned and ghost cells, but are also correct for + * artificial cells that do not store who the owner of the cell is in + * their level_subdomain_id field. + */ + const std::vector &get_true_level_subdomain_ids_of_cells(const unsigned int level) const; + /** * Return allow_artificial_cells , namely true if artificial cells are * allowed. @@ -270,6 +280,12 @@ namespace parallel private: + /** + * Override the function to update the number cache so we can fill data + * like @p level_ghost_owners. + */ + virtual void update_number_cache (); + /** * Settings */ @@ -288,16 +304,27 @@ namespace parallel /** * A vector containing subdomain IDs of cells obtained by partitioning - * using METIS. In case allow_artificial_cells is false, this vector is + * using either zorder, METIS, or a user-defined partitioning scheme. + * In case allow_artificial_cells is false, this vector is * consistent with IDs stored in cell->subdomain_id() of the * triangulation class. When allow_artificial_cells is true, cells which * are artificial will have cell->subdomain_id() == numbers::artificial; * - * The original parition information is stored to allow using sequential + * The original partition information is stored to allow using sequential * DoF distribution and partitioning functions with semi-artificial * cells. */ std::vector true_subdomain_ids_of_cells; + + /** + * A vector containing level subdomain IDs of cells obtained by partitioning + * each level. + * + * The original parition information is stored to allow using sequential + * DoF distribution and partitioning functions with semi-artificial + * cells. + */ + std::vector> true_level_subdomain_ids_of_cells; }; template @@ -342,6 +369,11 @@ namespace parallel */ const std::vector &get_true_subdomain_ids_of_cells() const; + /** + * A dummy function to return empty vector. + */ + const std::vector &get_true_level_subdomain_ids_of_cells(const unsigned int level) const; + /** * A dummy function which always returns true. */ @@ -352,6 +384,11 @@ namespace parallel * A dummy vector. */ std::vector true_subdomain_ids_of_cells; + + /** + * A dummy vector. + */ + std::vector true_level_subdomain_ids_of_cells; }; } diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index ce465beeea..499bb5bbfc 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -712,7 +712,6 @@ namespace parallel /** * Override the function to update the number cache so we can fill data * like @p level_ghost_owners. - * */ virtual void update_number_cache (); diff --git a/include/deal.II/distributed/tria_base.h b/include/deal.II/distributed/tria_base.h index 74ccda8714..c1781f529f 100644 --- a/include/deal.II/distributed/tria_base.h +++ b/include/deal.II/distributed/tria_base.h @@ -219,6 +219,11 @@ namespace parallel * Update the number_cache variable after mesh creation or refinement. */ virtual void update_number_cache (); + + /** + * Store MPI ranks of level ghost owners of this processor on all levels. + */ + void fill_level_ghost_owners (); }; } // namespace parallel diff --git a/include/deal.II/multigrid/mg_transfer.templates.h b/include/deal.II/multigrid/mg_transfer.templates.h index d0858185ce..2346866b7b 100644 --- a/include/deal.II/multigrid/mg_transfer.templates.h +++ b/include/deal.II/multigrid/mg_transfer.templates.h @@ -140,20 +140,16 @@ namespace const std::vector &, MGLevelObject &v) { - const dealii::parallel::distributed::Triangulation *tria = - (dynamic_cast*> + const dealii::parallel::Triangulation *tria = + (dynamic_cast*> (&mg_dof.get_triangulation())); - AssertThrow(tria!=nullptr, ExcMessage("multigrid with Trilinos vectors only works with distributed Triangulation!")); + AssertThrow(tria!=nullptr, ExcMessage("multigrid with Trilinos vectors only works with a parallel Triangulation!")); -#ifdef DEAL_II_WITH_P4EST for (unsigned int level=v.min_level(); level<=v.max_level(); ++level) { v[level].reinit(mg_dof.locally_owned_mg_dofs(level), tria->get_communicator()); } -#else - (void)v; -#endif } #endif } diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc index 91f77b48d4..681554f27d 100644 --- a/source/distributed/shared_tria.cc +++ b/source/distributed/shared_tria.cc @@ -111,10 +111,14 @@ namespace parallel // loop over all cells in multigrid hierarchy and mark artificial: if (settings & construct_multigrid_hierarchy) { + true_level_subdomain_ids_of_cells.resize(this->n_levels()); + std::function::cell_iterator &)> predicate = IteratorFilters::LocallyOwnedLevelCell(); for (unsigned int lvl=0; lvln_levels(); ++lvl) { + true_level_subdomain_ids_of_cells[lvl].resize(this->n_cells(lvl)); + const std::vector::cell_iterator> level_halo_layer_vector = GridTools::compute_cell_halo_layer_on_level (*this, predicate, lvl); std::set::cell_iterator> @@ -123,8 +127,11 @@ namespace parallel typename parallel::shared::Triangulation::cell_iterator cell = this->begin(lvl), endc = this->end(lvl); - for (; cell != endc; cell++) + for (unsigned int index=0; cell != endc; cell++, index++) { + // Store true level subdomain IDs before setting artificial + true_level_subdomain_ids_of_cells[lvl][index] = cell->level_subdomain_id(); + // for active cells, we must have knowledge of level subdomain ids of // all neighbors to our subdomain, not just neighbors on the same level. // if the cells subdomain id was not set to artitficial above, we will @@ -227,6 +234,15 @@ namespace parallel + template + const std::vector & + Triangulation::get_true_level_subdomain_ids_of_cells(const unsigned int level) const + { + return true_level_subdomain_ids_of_cells[level]; + } + + + template void Triangulation::execute_coarsening_and_refinement () @@ -274,6 +290,18 @@ namespace parallel this->update_number_cache (); } + + + template + void + Triangulation:: + update_number_cache () + { + parallel::Triangulation::update_number_cache(); + + if (settings & construct_multigrid_hierarchy) + parallel::Triangulation::fill_level_ghost_owners(); + } } } @@ -301,6 +329,15 @@ namespace parallel return true_subdomain_ids_of_cells; } + + + template + const std::vector & + Triangulation::get_true_level_subdomain_ids_of_cells(const unsigned int) const + { + Assert (false, ExcNotImplemented()); + return true_level_subdomain_ids_of_cells; + } } } diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 20bcca1b72..b8c7a375be 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1899,6 +1899,8 @@ namespace parallel return dealii::internal::p4est::functions::checksum (parallel_forest); } + + template void Triangulation:: @@ -1906,76 +1908,12 @@ namespace parallel { parallel::Triangulation::update_number_cache(); - if (this->n_levels() == 0) - return; - if (settings & construct_multigrid_hierarchy) - { - // find level ghost owners - for (typename Triangulation::cell_iterator - cell = this->begin(); - cell != this->end(); - ++cell) - if (cell->level_subdomain_id() != numbers::artificial_subdomain_id - && cell->level_subdomain_id() != this->locally_owned_subdomain()) - this->number_cache.level_ghost_owners.insert(cell->level_subdomain_id()); - -#ifdef DEBUG - // Check that level_ghost_owners is symmetric by sending a message - // to everyone - { - int ierr = MPI_Barrier(this->mpi_communicator); - AssertThrowMPI(ierr); - - // important: preallocate to avoid (re)allocation: - std::vector requests (this->number_cache.level_ghost_owners.size()); - int dummy = 0; - unsigned int req_counter = 0; - - for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); - it != this->number_cache.level_ghost_owners.end(); - ++it, ++req_counter) - { - Assert (typeid(types::subdomain_id) - == typeid(unsigned int), - ExcNotImplemented()); - ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED, - *it, 9001, this->mpi_communicator, - &requests[req_counter]); - AssertThrowMPI(ierr); - } - - for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); - it != this->number_cache.level_ghost_owners.end(); - ++it) - { - Assert (typeid(types::subdomain_id) - == typeid(unsigned int), - ExcNotImplemented()); - unsigned int dummy; - ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED, - *it, 9001, this->mpi_communicator, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - } - - if (requests.size() > 0) - { - ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); - AssertThrowMPI(ierr); - } - - ierr = MPI_Barrier(this->mpi_communicator); - AssertThrowMPI(ierr); - } -#endif - - Assert(this->number_cache.level_ghost_owners.size() < Utilities::MPI::n_mpi_processes(this->mpi_communicator), - ExcInternalError()); - } + parallel::Triangulation::fill_level_ghost_owners(); } + template typename dealii::internal::p4est::types::tree * Triangulation:: diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index 940ffe0706..222b32d898 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -215,6 +215,80 @@ namespace parallel static_cast(0)); number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), this->mpi_communicator); } + + + + template + void + Triangulation::fill_level_ghost_owners () + { + number_cache.level_ghost_owners.clear (); + + if (this->n_levels() > 0) + { + // find level ghost owners + for (typename Triangulation::cell_iterator + cell = this->begin(); + cell != this->end(); + ++cell) + if (cell->level_subdomain_id() != numbers::artificial_subdomain_id + && cell->level_subdomain_id() != this->locally_owned_subdomain()) + this->number_cache.level_ghost_owners.insert(cell->level_subdomain_id()); + +#ifdef DEBUG + // Check that level_ghost_owners is symmetric by sending a message + // to everyone + { + int ierr = MPI_Barrier(this->mpi_communicator); + AssertThrowMPI(ierr); + + // important: preallocate to avoid (re)allocation: + std::vector requests (this->number_cache.level_ghost_owners.size()); + int dummy = 0; + unsigned int req_counter = 0; + + for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); + it != this->number_cache.level_ghost_owners.end(); + ++it, ++req_counter) + { + Assert (typeid(types::subdomain_id) + == typeid(unsigned int), + ExcNotImplemented()); + ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED, + *it, 9001, this->mpi_communicator, + &requests[req_counter]); + AssertThrowMPI(ierr); + } + + for (std::set::iterator it = this->number_cache.level_ghost_owners.begin(); + it != this->number_cache.level_ghost_owners.end(); + ++it) + { + Assert (typeid(types::subdomain_id) + == typeid(unsigned int), + ExcNotImplemented()); + unsigned int dummy; + ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED, + *it, 9001, this->mpi_communicator, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + + if (requests.size() > 0) + { + ierr = MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE); + AssertThrowMPI(ierr); + } + + ierr = MPI_Barrier(this->mpi_communicator); + AssertThrowMPI(ierr); + } +#endif + + Assert(this->number_cache.level_ghost_owners.size() < Utilities::MPI::n_mpi_processes(this->mpi_communicator), + ExcInternalError()); + } + } #else template void diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index 3386327657..fd78e7cd92 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -2414,6 +2414,68 @@ namespace internal return subdomain_association; } + + + /** + * level subdomain association. Similar to the above function only + * for level meshes. This function assigns boundary dofs in + * the same way as parallel::distributed::Triangulation (proc with + * smallest index) instead of the coin flip method above. + */ + template + std::vector + get_dof_level_subdomain_association (const DoFHandlerType &dof_handler, + const types::global_dof_index n_dofs_on_level, + const unsigned int n_procs, + const unsigned int level) + { + (void)n_procs; + std::vector level_subdomain_association (n_dofs_on_level, + numbers::invalid_subdomain_id); + std::vector local_dof_indices; + local_dof_indices.reserve (DoFTools::max_dofs_per_cell(dof_handler)); + + // loop over all cells and record which subdomain a DoF belongs to. + // interface goes to proccessor with smaller subdomain id + typename DoFHandlerType::cell_iterator + cell = dof_handler.begin(level), + endc = dof_handler.end(level); + for (; cell!=endc; ++cell) + { + // get the owner of the cell; note that we have made sure above that + // all cells are either locally owned or ghosts (not artificial), so + // this call will always yield the true owner + const types::subdomain_id level_subdomain_id = cell->level_subdomain_id(); + const unsigned int dofs_per_cell = cell->get_fe().dofs_per_cell; + local_dof_indices.resize (dofs_per_cell); + cell->get_mg_dof_indices (local_dof_indices); + + // set level subdomain ids. if dofs already have their values set then + // they must be on partition interfaces. in that case assign them to the + // processor wit the smaller subdomain id. + for (unsigned int i=0; i level_subdomain_id) + { + level_subdomain_association[local_dof_indices[i]] = level_subdomain_id; + } + } + + Assert (std::find (level_subdomain_association.begin(), + level_subdomain_association.end(), + numbers::invalid_subdomain_id) + == level_subdomain_association.end(), + ExcInternalError()); + + Assert (*std::max_element (level_subdomain_association.begin(), + level_subdomain_association.end()) + < n_procs, + ExcInternalError()); + + return level_subdomain_association; + } } @@ -2581,12 +2643,168 @@ namespace internal ParallelShared:: distribute_mg_dofs () const { - // this is not currently implemented; the algorithm should work - // as above, though: first call the sequential numbering - // algorithm, then re-enumerate subdomain-wise - Assert(false, ExcNotImplemented()); + const unsigned int dim = DoFHandlerType::dimension; + const unsigned int spacedim = DoFHandlerType::space_dimension; - return std::vector(); + const parallel::shared::Triangulation *tr = + (dynamic_cast*> (&this->dof_handler->get_triangulation())); + Assert(tr != nullptr, ExcInternalError()); + + const unsigned int n_procs = Utilities::MPI::n_mpi_processes(tr->get_communicator()); + const unsigned int n_levels = tr->n_global_levels(); + + std::vector number_caches; + number_caches.reserve(n_levels); + + // We create an index set for each level + for (unsigned int lvl=0; lvl saved_level_subdomain_ids; + saved_level_subdomain_ids.resize(tr->n_cells(lvl)); + { + typename parallel::shared::Triangulation::cell_iterator + cell = this->dof_handler->get_triangulation().begin(lvl), + endc = this->dof_handler->get_triangulation().end(lvl); + + const std::vector &true_level_subdomain_ids + = tr->get_true_level_subdomain_ids_of_cells(lvl); + + for (unsigned int index=0; cell != endc; ++cell, ++index) + { + saved_level_subdomain_ids[index] = cell->level_subdomain_id(); + cell->set_level_subdomain_id(true_level_subdomain_ids[index]); + } + } + + // Next let the sequential algorithm do its magic. it is going to + // enumerate DoFs on all cells on the given level, regardless of owner + const types::global_dof_index n_dofs_on_level = + Implementation::distribute_dofs_on_level(numbers::invalid_subdomain_id, + *this->dof_handler, + lvl); + + // then re-enumerate them based on their level subdomain association. + // for this, we first have to identify for each current DoF + // index which subdomain they belong to. ideally, we would + // like to call DoFRenumbering::subdomain_wise(), but + // because the NumberCache of the current DoFHandler is not + // fully set up yet, we can't quite do that. also, that + // function has to deal with other kinds of triangulations as + // well, whereas we here know what kind of triangulation + // we have and can simplify the code accordingly + std::vector new_dof_indices (n_dofs_on_level, + numbers::invalid_dof_index); + { + // first get the association of each dof with a subdomain and + // determine the total number of subdomain ids used + const std::vector level_subdomain_association + = get_dof_level_subdomain_association (*this->dof_handler, + n_dofs_on_level, n_procs, lvl); + + // then renumber the subdomains by first looking at those belonging + // to subdomain 0, then those of subdomain 1, etc. note that the + // algorithm is stable, i.e. if two dofs i,j have idof_handler, lvl, true); + + // update the number cache. for this, we first have to find the level subdomain + // association for each DoF again following renumbering, from which we + // can then compute the IndexSets of locally owned DoFs for all processors. + // all other fields then follow from this + // + // given the way we enumerate degrees of freedom, the locally owned + // ranges must all be contiguous and consecutive. this makes filling + // the IndexSets cheap. an assertion at the top verifies that this + // assumption is true + const std::vector level_subdomain_association + = get_dof_level_subdomain_association (*this->dof_handler, + n_dofs_on_level, n_procs, lvl); + + for (unsigned int i=1; i= level_subdomain_association[i-1], + ExcInternalError()); + + std::vector locally_owned_dofs_per_processor (n_procs, + IndexSet(n_dofs_on_level)); + { + // we know that the set of subdomain indices is contiguous from + // the assertion above; find the start and end index for each + // processor, taking into account that sometimes a processor + // may not in fact have any DoFs at all. we do the latter + // by just identifying contiguous ranges of level_subdomain_ids + // and filling IndexSets for those subdomains; subdomains + // that don't appear will lead to IndexSets that are simply + // never touched and remain empty as initialized above. + unsigned int start_index = 0; + unsigned int end_index = 0; + while (start_index < n_dofs_on_level) + { + while ((end_index) < n_dofs_on_level && + (level_subdomain_association[end_index] == level_subdomain_association[start_index])) + ++end_index; + + // we've now identified a range of same indices. set that + // range in the corresponding IndexSet + if (end_index > start_index) + { + const unsigned int level_subdomain_owner = level_subdomain_association[start_index]; + locally_owned_dofs_per_processor[level_subdomain_owner] + .add_range (start_index, end_index); + } + + // then move on to thinking about the next range + start_index = end_index; + } + } + + // finally, restore current level subdomain ids + { + typename parallel::shared::Triangulation::cell_iterator + cell = this->dof_handler->get_triangulation().begin(lvl), + endc = this->dof_handler->get_triangulation().end(lvl); + + for (unsigned int index=0; cell != endc; ++cell, ++index) + cell->set_level_subdomain_id(saved_level_subdomain_ids[index]); + + // add NumberCache for current level + number_caches.emplace_back (NumberCache(locally_owned_dofs_per_processor, + this->dof_handler->get_triangulation() + .locally_owned_subdomain ())); + } + } + + return number_caches; } diff --git a/source/multigrid/mg_transfer_internal.cc b/source/multigrid/mg_transfer_internal.cc index b9cb4fb7d1..69b6c14204 100644 --- a/source/multigrid/mg_transfer_internal.cc +++ b/source/multigrid/mg_transfer_internal.cc @@ -154,10 +154,10 @@ namespace internal } } - const dealii::parallel::distributed::Triangulation *tria = - (dynamic_cast*> + const dealii::parallel::Triangulation *tria = + (dynamic_cast*> (&mg_dof.get_triangulation())); - AssertThrow(send_data_temp.size()==0 || tria!=nullptr, ExcMessage("parallel Multigrid only works with a distributed Triangulation!")); + AssertThrow(send_data_temp.size()==0 || tria!=nullptr, ExcMessage("We should only be sending information with a parallel Triangulation!")); #ifdef DEAL_II_WITH_MPI if (tria) diff --git a/tests/multigrid/step-50_02.cc b/tests/multigrid/step-50_02.cc new file mode 100644 index 0000000000..f150a09caa --- /dev/null +++ b/tests/multigrid/step-50_02.cc @@ -0,0 +1,578 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2003 - 2017 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE at + * the top level of the deal.II distribution. + * + * --------------------------------------------------------------------- + + * + * + */ + +// Working version of step-50 with a parallel::shared::Triangulation + +#include "../tests.h" + + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + + +#include + + +namespace LA +{ + using namespace dealii::LinearAlgebraTrilinos; +} + +#include +#include +#include + +namespace Step50 +{ + using namespace dealii; + + + + template + class LaplaceProblem + { + public: + LaplaceProblem (const unsigned int deg); + void run (); + + private: + void setup_system (); + void assemble_system (); + void assemble_multigrid (); + void solve (); + void refine_grid (); + + parallel::shared::Triangulation triangulation; + FE_Q fe; + DoFHandler mg_dof_handler; + + typedef LA::MPI::SparseMatrix matrix_t; + typedef LA::MPI::Vector vector_t; + + matrix_t system_matrix; + + IndexSet locally_relevant_set; + + ConstraintMatrix constraints; + + vector_t solution; + vector_t system_rhs; + + const unsigned int degree; + + MGLevelObject mg_matrices; + MGLevelObject mg_interface_matrices; + MGConstrainedDoFs mg_constrained_dofs; + }; + + + + + + template + class Coefficient : public Function + { + public: + Coefficient () : Function() {} + + virtual double value (const Point &p, + const unsigned int component = 0) const; + + virtual void value_list (const std::vector > &points, + std::vector &values, + const unsigned int component = 0) const; + }; + + + + template + double Coefficient::value (const Point &p, + const unsigned int) const + { + if (p.square() < 0.5*0.5) + return 5; + else + return 1; + } + + + + template + void Coefficient::value_list (const std::vector > &points, + std::vector &values, + const unsigned int component) const + { + (void)component; + const unsigned int n_points = points.size(); + + Assert (values.size() == n_points, + ExcDimensionMismatch (values.size(), n_points)); + + Assert (component == 0, + ExcIndexRange (component, 0, 1)); + + for (unsigned int i=0; i::value (points[i]); + } + + + template + LaplaceProblem::LaplaceProblem (const unsigned int degree) + : + triangulation (MPI_COMM_WORLD, + typename Triangulation::MeshSmoothing + (Triangulation::limit_level_difference_at_vertices), true, + // parallel::shared::Triangulation::Settings::partition_custom_signal), + typename parallel::shared::Triangulation::Settings + (parallel::shared::Triangulation::partition_zorder | + parallel::shared::Triangulation::construct_multigrid_hierarchy)), + fe (degree), + mg_dof_handler (triangulation), + degree(degree) + {} + + + + template + void LaplaceProblem::setup_system () + { + mg_dof_handler.distribute_dofs (fe); + mg_dof_handler.distribute_mg_dofs (); + + DoFTools::extract_locally_relevant_dofs (mg_dof_handler, + locally_relevant_set); + + solution.reinit(mg_dof_handler.locally_owned_dofs(), MPI_COMM_WORLD); + system_rhs.reinit(mg_dof_handler.locally_owned_dofs(), MPI_COMM_WORLD); + + constraints.reinit (locally_relevant_set); + DoFTools::make_hanging_node_constraints (mg_dof_handler, constraints); + + std::set dirichlet_boundary_ids; + typename FunctionMap::type dirichlet_boundary; + Functions::ConstantFunction homogeneous_dirichlet_bc (1.0); + dirichlet_boundary_ids.insert(0); + dirichlet_boundary[0] = &homogeneous_dirichlet_bc; + VectorTools::interpolate_boundary_values (mg_dof_handler, + dirichlet_boundary, + constraints); + constraints.close (); + + DynamicSparsityPattern dsp(mg_dof_handler.n_dofs(), mg_dof_handler.n_dofs()); + DoFTools::make_sparsity_pattern (mg_dof_handler, dsp, constraints); + system_matrix.reinit (mg_dof_handler.locally_owned_dofs(), dsp, MPI_COMM_WORLD, true); + + + mg_constrained_dofs.clear(); + mg_constrained_dofs.initialize(mg_dof_handler); + mg_constrained_dofs.make_zero_boundary_constraints(mg_dof_handler, dirichlet_boundary_ids); + + + const unsigned int n_levels = triangulation.n_global_levels(); + + mg_interface_matrices.resize(0, n_levels-1); + mg_interface_matrices.clear_elements (); + mg_matrices.resize(0, n_levels-1); + mg_matrices.clear_elements (); + + for (unsigned int level=0; level + void LaplaceProblem::assemble_system () + { + const QGauss quadrature_formula(degree+1); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + Vector cell_rhs (dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + const Coefficient coefficient; + std::vector coefficient_values (n_q_points); + + typename DoFHandler::active_cell_iterator + cell = mg_dof_handler.begin_active(), + endc = mg_dof_handler.end(); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + { + cell_matrix = 0; + cell_rhs = 0; + + fe_values.reinit (cell); + + coefficient.value_list (fe_values.get_quadrature_points(), + coefficient_values); + + for (unsigned int q_point=0; q_pointget_dof_indices (local_dof_indices); + constraints.distribute_local_to_global (cell_matrix, cell_rhs, + local_dof_indices, + system_matrix, system_rhs); + } + + system_matrix.compress(VectorOperation::add); + system_rhs.compress(VectorOperation::add); + } + + + + template + void LaplaceProblem::assemble_multigrid () + { + QGauss quadrature_formula(1+degree); + + FEValues fe_values (fe, quadrature_formula, + update_values | update_gradients | + update_quadrature_points | update_JxW_values); + + const unsigned int dofs_per_cell = fe.dofs_per_cell; + const unsigned int n_q_points = quadrature_formula.size(); + + FullMatrix cell_matrix (dofs_per_cell, dofs_per_cell); + + std::vector local_dof_indices (dofs_per_cell); + + const Coefficient coefficient; + std::vector coefficient_values (n_q_points); + + + + std::vector boundary_constraints (triangulation.n_global_levels()); + ConstraintMatrix empty_constraints; + for (unsigned int level=0; level::cell_iterator cell = mg_dof_handler.begin(), + endc = mg_dof_handler.end(); + + for (; cell!=endc; ++cell) + if (cell->level_subdomain_id()==triangulation.locally_owned_subdomain()) + { + cell_matrix = 0; + fe_values.reinit (cell); + + coefficient.value_list (fe_values.get_quadrature_points(), + coefficient_values); + + for (unsigned int q_point=0; q_pointget_mg_dof_indices (local_dof_indices); + + boundary_constraints[cell->level()] + .distribute_local_to_global (cell_matrix, + local_dof_indices, + mg_matrices[cell->level()]); + + + const IndexSet &interface_dofs_on_level + = mg_constrained_dofs.get_refinement_edge_indices(cell->level()); + const unsigned int lvl = cell->level(); + + for (unsigned int i=0; ilevel()]); + } + + for (unsigned int i=0; i + void LaplaceProblem::solve () + { + MGTransferPrebuilt mg_transfer(mg_constrained_dofs); + mg_transfer.build_matrices(mg_dof_handler); + + matrix_t &coarse_matrix = mg_matrices[0]; + + SolverControl coarse_solver_control (1000, 1e-10, false, false); + SolverCG coarse_solver(coarse_solver_control); + PreconditionIdentity id; + MGCoarseGridIterativeSolver, matrix_t, PreconditionIdentity> + coarse_grid_solver(coarse_solver, coarse_matrix, id); + + typedef LA::MPI::PreconditionJacobi Smoother; + MGSmootherPrecondition mg_smoother; + mg_smoother.initialize(mg_matrices, Smoother::AdditionalData(0.5)); + mg_smoother.set_steps(2); + + mg::Matrix mg_matrix(mg_matrices); + mg::Matrix mg_interface_up(mg_interface_matrices); + mg::Matrix mg_interface_down(mg_interface_matrices); + + Multigrid mg(mg_matrix, + coarse_grid_solver, + mg_transfer, + mg_smoother, + mg_smoother); + mg.set_edge_matrices(mg_interface_down, mg_interface_up); + + PreconditionMG > + preconditioner(mg_dof_handler, mg, mg_transfer); + + + SolverControl solver_control (500, 1e-8*system_rhs.l2_norm(), false); + SolverCG solver (solver_control); + + if (false) + { + /* + TrilinosWrappers::PreconditionAMG prec; + + TrilinosWrappers::PreconditionAMG::AdditionalData Amg_data; + Amg_data.elliptic = true; + Amg_data.higher_order_elements = true; + Amg_data.smoother_sweeps = 2; + Amg_data.aggregation_threshold = 0.02; + + prec.initialize (system_matrix, + Amg_data); + solver.solve (system_matrix, solution, system_rhs, prec); + */ + } + else + { + solver.solve (system_matrix, solution, system_rhs, + preconditioner); + } + deallog << " CG converged in " << solver_control.last_step() << " iterations." << std::endl; + + constraints.distribute (solution); + } + + + + template + void LaplaceProblem::refine_grid () + { + for (typename Triangulation::active_cell_iterator cell=triangulation.begin_active(); cell != triangulation.end(); ++cell) + for (unsigned int v=0; v::vertices_per_cell; ++v) + if (cell->vertex(v)[0] <= 0.5 && cell->vertex(v)[1] <= 0.5) + cell->set_refine_flag(); + triangulation.execute_coarsening_and_refinement (); + } + + + + template + void LaplaceProblem::run () + { + for (unsigned int cycle=0; cycle<2; ++cycle) + { + deallog << "Cycle " << cycle << ':' << std::endl; + + if (cycle == 0) + { + GridGenerator::hyper_cube (triangulation); + triangulation.refine_global (5); + } + else + refine_grid (); + + deallog << " Number of active cells: " + << triangulation.n_global_active_cells() + << std::endl; + + setup_system (); + + deallog << " Number of degrees of freedom: " + << mg_dof_handler.n_dofs() + << " (by level: "; + for (unsigned int level=0; level laplace_problem(1/*degree*/); + laplace_problem.run (); + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + throw; + } + + return 0; +} diff --git a/tests/multigrid/step-50_02.with_mpi=true.with_trilinos=true.mpirun=3.output b/tests/multigrid/step-50_02.with_mpi=true.with_trilinos=true.mpirun=3.output new file mode 100644 index 0000000000..02d1ad5efe --- /dev/null +++ b/tests/multigrid/step-50_02.with_mpi=true.with_trilinos=true.mpirun=3.output @@ -0,0 +1,13 @@ + +DEAL::Cycle 0: +DEAL:: Number of active cells: 1024 +DEAL:: Number of degrees of freedom: 1089 (by level: 4, 9, 25, 81, 289, 1089) +DEAL:cg::Starting value 29.7769 +DEAL:cg::Convergence step 7 value 8.10686e-08 +DEAL:: CG converged in 7 iterations. +DEAL::Cycle 1: +DEAL:: Number of active cells: 1891 +DEAL:: Number of degrees of freedom: 1990 (by level: 4, 9, 25, 81, 289, 1089, 1225) +DEAL:cg::Starting value 41.0662 +DEAL:cg::Convergence step 7 value 2.21064e-07 +DEAL:: CG converged in 7 iterations. diff --git a/tests/sharedtria/tria_mg_dof_01.cc b/tests/sharedtria/tria_mg_dof_01.cc new file mode 100644 index 0000000000..948091bea3 --- /dev/null +++ b/tests/sharedtria/tria_mg_dof_01.cc @@ -0,0 +1,148 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2008 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// distibute mg dofs on a shared and distributed mesh and compare +// Note: this doesnt pass for all meshes since, for some complicated refinement +// schemes, cells may be traversed in different order in a distributed +// triangulation as opposed to a shared triangulation. + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +template +void compare_meshes (DoFHandler &shared_dof_handler, DoFHandler &distributed_dof_handler) +{ + FE_Q fe (2); + + shared_dof_handler.distribute_dofs(fe); + shared_dof_handler.distribute_mg_dofs(); + + distributed_dof_handler.distribute_dofs(fe); + distributed_dof_handler.distribute_mg_dofs(); + + unsigned int n_levels = distributed_dof_handler.get_triangulation().n_global_levels(); + for (unsigned int lvl=0; lvl::cell_iterator + cell = distributed_dof_handler.begin(lvl), + endc = distributed_dof_handler.end(lvl); + for (; cell!=endc; ++cell) + { + if (cell->level_subdomain_id()==numbers::artificial_subdomain_id) + continue; + + typename Triangulation::cell_iterator + tria_shared_cell = cell->id().to_cell(shared_dof_handler.get_triangulation()); + typename DoFHandler::cell_iterator dof_shared_cell(&shared_dof_handler.get_triangulation(), + tria_shared_cell->level(), + tria_shared_cell->index(), + &shared_dof_handler); + + std::vector distributed_cell_dofs(fe.dofs_per_cell); + std::vector shared_cell_dofs(fe.dofs_per_cell); + cell->get_mg_dof_indices(distributed_cell_dofs); + dof_shared_cell->get_mg_dof_indices(shared_cell_dofs); + for (unsigned int i=0; i +void test() +{ + parallel::shared::Triangulation shared_tria(MPI_COMM_WORLD,typename Triangulation::MeshSmoothing + (Triangulation::limit_level_difference_at_vertices), true, + typename parallel::shared::Triangulation::Settings + (parallel::shared::Triangulation::partition_zorder | + parallel::shared::Triangulation::construct_multigrid_hierarchy)); + DoFHandler shared_dof_handler(shared_tria); + + parallel::distributed::Triangulation distributed_tria(MPI_COMM_WORLD,Triangulation:: + limit_level_difference_at_vertices, + parallel::distributed::Triangulation::construct_multigrid_hierarchy); + DoFHandler distributed_dof_handler(distributed_tria); + + GridGenerator::hyper_cube(shared_tria); + shared_tria.refine_global(2); + for (typename Triangulation::active_cell_iterator cell=shared_tria.begin_active(); cell != shared_tria.end(); ++cell) + { + if (dim==1) + if (cell->center()[0] < 0.5) + cell->set_refine_flag(); + if (dim==2) + if (cell->center()[0] < 0.5 && cell->center()[1] < 0.5) + cell->set_refine_flag(); + if (dim==3) + if (cell->center()[0] < 0.5 && cell->center()[1] && cell->center()[2] < 0.5) + cell->set_refine_flag(); + } + shared_tria.execute_coarsening_and_refinement(); + + GridGenerator::hyper_cube(distributed_tria); + distributed_tria.refine_global(2); + for (typename Triangulation::active_cell_iterator cell=distributed_tria.begin_active(); cell != distributed_tria.end(); ++cell) + { + if (dim==1) + if (cell->is_locally_owned() && cell->center()[0] < 0.5) + cell->set_refine_flag(); + if (dim==2) + if (cell->is_locally_owned() && cell->center()[0] < 0.5 && cell->center()[1] < 0.5) + cell->set_refine_flag(); + if (dim==3) + if (cell->is_locally_owned() && cell->center()[0] < 0.5 && cell->center()[1] && cell->center()[2] < 0.5) + cell->set_refine_flag(); + } + distributed_tria.execute_coarsening_and_refinement(); + + compare_meshes(shared_dof_handler,distributed_dof_handler); +} + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + deallog.push("2d"); + test<2>(); + deallog.pop(); + deallog.push("3d"); + test<3>(); + deallog.pop(); + + deallog << "OK" << std::endl; +} + diff --git a/tests/sharedtria/tria_mg_dof_01.mpirun=3.with_p4est=true.output b/tests/sharedtria/tria_mg_dof_01.mpirun=3.with_p4est=true.output new file mode 100644 index 0000000000..3342d14fa8 --- /dev/null +++ b/tests/sharedtria/tria_mg_dof_01.mpirun=3.with_p4est=true.output @@ -0,0 +1,8 @@ + +DEAL:0::OK + +DEAL:1::OK + + +DEAL:2::OK + diff --git a/tests/sharedtria/tria_mg_dof_02.cc b/tests/sharedtria/tria_mg_dof_02.cc new file mode 100644 index 0000000000..7978bd51f3 --- /dev/null +++ b/tests/sharedtria/tria_mg_dof_02.cc @@ -0,0 +1,121 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2008 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// distribute mg dofs for a shared triangulation and +// check level_ghost_owners is set properly + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +template +void write_dof_data (DoFHandler &dof_handler) +{ + FE_Q fe (2); + + dof_handler.distribute_dofs(fe); + dof_handler.distribute_mg_dofs(); + + unsigned int n_levels = dof_handler.get_triangulation().n_global_levels(); + for (unsigned int lvl=0; lvl dof_index_per_proc = dof_handler.locally_owned_mg_dofs_per_processor(lvl); + for (unsigned int i=0; i::cell_iterator + cell = dof_handler.begin(lvl), + endc = dof_handler.end(lvl); + for (; cell!=endc; ++cell) + { + if (cell->level_subdomain_id()==numbers::artificial_subdomain_id) + continue; + + std::vector local_mg_dof_indices(fe.dofs_per_cell); + cell->get_mg_dof_indices(local_mg_dof_indices); + deallog << "proc " << Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) << ", " + << "cell " << cell->id() << ", mg_dof_indices: "; + for (unsigned int i=0; i +void test() +{ + parallel::shared::Triangulation tria(MPI_COMM_WORLD,typename Triangulation::MeshSmoothing + (Triangulation::limit_level_difference_at_vertices), true, + typename parallel::shared::Triangulation::Settings + (parallel::shared::Triangulation::partition_zorder | + parallel::shared::Triangulation::construct_multigrid_hierarchy)); + + DoFHandler dof_handler(tria); + + GridGenerator::hyper_cube(tria,-1,1); + + tria.refine_global(1); + + for (typename Triangulation::active_cell_iterator cell=tria.begin_active(); cell != tria.end(); ++cell) + { + if (dim==2) + if (cell->center()[0] < 0) + cell->set_refine_flag(); + if (dim==3) + if (cell->center()[0] < 0 && cell->center()[1] < 0) + cell->set_refine_flag(); + } + tria.execute_coarsening_and_refinement(); + + deallog << "proc " << Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) << ", level_ghost_owners: "; + for (std::set::iterator it=tria.level_ghost_owners().begin(); it!=tria.level_ghost_owners().end(); ++it) + deallog << *it << " "; + deallog << std::endl; + + write_dof_data(dof_handler); +} + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + deallog.push("2d"); + test<2>(); + deallog.pop(); + deallog.push("3d"); + test<3>(); + deallog.pop(); +} + diff --git a/tests/sharedtria/tria_mg_dof_02.mpirun=3.output b/tests/sharedtria/tria_mg_dof_02.mpirun=3.output new file mode 100644 index 0000000000..f6720e9f34 --- /dev/null +++ b/tests/sharedtria/tria_mg_dof_02.mpirun=3.output @@ -0,0 +1,155 @@ + +DEAL:0:2d::proc 0, level_ghost_owners: 1 2 +DEAL:0:2d::{[0,8]} +DEAL:0:2d::{} +DEAL:0:2d::{} +DEAL:0:2d::proc 0, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 +DEAL:0:2d::{[0,8]} +DEAL:0:2d::{[9,14]} +DEAL:0:2d::{[15,24]} +DEAL:0:2d::proc 0, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 +DEAL:0:2d::proc 0, cell 0_1:1, mg_dof_indices: 1 9 3 10 5 11 12 13 14 +DEAL:0:2d::proc 0, cell 0_1:2, mg_dof_indices: 2 3 15 16 17 18 7 19 20 +DEAL:0:2d::proc 0, cell 0_1:3, mg_dof_indices: 3 10 16 21 18 22 13 23 24 +DEAL:0:2d::{[0,24]} +DEAL:0:2d::{} +DEAL:0:2d::{[25,44]} +DEAL:0:2d::proc 0, cell 0_2:00, mg_dof_indices: 0 1 2 3 4 5 6 7 8 +DEAL:0:2d::proc 0, cell 0_2:01, mg_dof_indices: 1 9 3 10 5 11 12 13 14 +DEAL:0:2d::proc 0, cell 0_2:02, mg_dof_indices: 2 3 15 16 17 18 7 19 20 +DEAL:0:2d::proc 0, cell 0_2:03, mg_dof_indices: 3 10 16 21 18 22 13 23 24 +DEAL:0:2d::proc 0, cell 0_2:20, mg_dof_indices: 15 16 25 26 27 28 19 29 30 +DEAL:0:2d::proc 0, cell 0_2:21, mg_dof_indices: 16 21 26 31 28 32 23 33 34 +DEAL:0:3d::proc 0, level_ghost_owners: 1 2 +DEAL:0:3d::{[0,26]} +DEAL:0:3d::{} +DEAL:0:3d::{} +DEAL:0:3d::proc 0, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 +DEAL:0:3d::{[0,26]} +DEAL:0:3d::{[27,74]} +DEAL:0:3d::{[75,124]} +DEAL:0:3d::proc 0, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 +DEAL:0:3d::proc 0, cell 0_1:1, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 +DEAL:0:3d::proc 0, cell 0_1:2, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 +DEAL:0:3d::proc 0, cell 0_1:3, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 +DEAL:0:3d::proc 0, cell 0_1:4, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 +DEAL:0:3d::proc 0, cell 0_1:5, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 +DEAL:0:3d::proc 0, cell 0_1:6, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 +DEAL:0:3d::proc 0, cell 0_1:7, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 +DEAL:0:3d::{[0,124]} +DEAL:0:3d::{} +DEAL:0:3d::{[125,224]} +DEAL:0:3d::proc 0, cell 0_2:00, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 +DEAL:0:3d::proc 0, cell 0_2:01, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 +DEAL:0:3d::proc 0, cell 0_2:02, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 +DEAL:0:3d::proc 0, cell 0_2:03, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 +DEAL:0:3d::proc 0, cell 0_2:04, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 +DEAL:0:3d::proc 0, cell 0_2:05, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 +DEAL:0:3d::proc 0, cell 0_2:06, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 +DEAL:0:3d::proc 0, cell 0_2:07, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 +DEAL:0:3d::proc 0, cell 0_2:40, mg_dof_indices: 75 76 77 78 125 126 127 128 79 80 81 82 129 130 131 132 133 134 135 136 137 138 139 140 91 141 142 +DEAL:0:3d::proc 0, cell 0_2:41, mg_dof_indices: 76 93 78 94 126 143 128 144 80 95 96 97 130 145 146 147 134 148 136 149 138 150 151 152 103 153 154 +DEAL:0:3d::proc 0, cell 0_2:42, mg_dof_indices: 77 78 105 106 127 128 155 156 107 108 82 109 157 158 132 159 135 136 160 161 162 163 140 164 115 165 166 +DEAL:0:3d::proc 0, cell 0_2:43, mg_dof_indices: 78 94 106 117 128 144 156 167 108 118 97 119 158 168 147 169 136 149 161 170 163 171 152 172 123 173 174 + +DEAL:1:2d::proc 1, level_ghost_owners: 0 2 +DEAL:1:2d::{[0,8]} +DEAL:1:2d::{} +DEAL:1:2d::{} +DEAL:1:2d::proc 1, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 +DEAL:1:2d::{[0,8]} +DEAL:1:2d::{[9,14]} +DEAL:1:2d::{[15,24]} +DEAL:1:2d::proc 1, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 +DEAL:1:2d::proc 1, cell 0_1:1, mg_dof_indices: 1 9 3 10 5 11 12 13 14 +DEAL:1:2d::proc 1, cell 0_1:2, mg_dof_indices: 2 3 15 16 17 18 7 19 20 +DEAL:1:2d::proc 1, cell 0_1:3, mg_dof_indices: 3 10 16 21 18 22 13 23 24 +DEAL:1:2d::{[0,24]} +DEAL:1:2d::{} +DEAL:1:2d::{[25,44]} +DEAL:1:2d::proc 1, cell 0_2:01, mg_dof_indices: 1 9 3 10 5 11 12 13 14 +DEAL:1:2d::proc 1, cell 0_2:03, mg_dof_indices: 3 10 16 21 18 22 13 23 24 +DEAL:1:2d::proc 1, cell 0_2:21, mg_dof_indices: 16 21 26 31 28 32 23 33 34 +DEAL:1:3d::proc 1, level_ghost_owners: 0 2 +DEAL:1:3d::{[0,26]} +DEAL:1:3d::{} +DEAL:1:3d::{} +DEAL:1:3d::proc 1, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 +DEAL:1:3d::{[0,26]} +DEAL:1:3d::{[27,74]} +DEAL:1:3d::{[75,124]} +DEAL:1:3d::proc 1, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 +DEAL:1:3d::proc 1, cell 0_1:1, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 +DEAL:1:3d::proc 1, cell 0_1:2, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 +DEAL:1:3d::proc 1, cell 0_1:3, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 +DEAL:1:3d::proc 1, cell 0_1:4, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 +DEAL:1:3d::proc 1, cell 0_1:5, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 +DEAL:1:3d::proc 1, cell 0_1:6, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 +DEAL:1:3d::proc 1, cell 0_1:7, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 +DEAL:1:3d::{[0,124]} +DEAL:1:3d::{} +DEAL:1:3d::{[125,224]} +DEAL:1:3d::proc 1, cell 0_2:01, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 +DEAL:1:3d::proc 1, cell 0_2:02, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 +DEAL:1:3d::proc 1, cell 0_2:03, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 +DEAL:1:3d::proc 1, cell 0_2:05, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 +DEAL:1:3d::proc 1, cell 0_2:06, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 +DEAL:1:3d::proc 1, cell 0_2:07, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 +DEAL:1:3d::proc 1, cell 0_2:41, mg_dof_indices: 76 93 78 94 126 143 128 144 80 95 96 97 130 145 146 147 134 148 136 149 138 150 151 152 103 153 154 +DEAL:1:3d::proc 1, cell 0_2:42, mg_dof_indices: 77 78 105 106 127 128 155 156 107 108 82 109 157 158 132 159 135 136 160 161 162 163 140 164 115 165 166 +DEAL:1:3d::proc 1, cell 0_2:43, mg_dof_indices: 78 94 106 117 128 144 156 167 108 118 97 119 158 168 147 169 136 149 161 170 163 171 152 172 123 173 174 + + +DEAL:2:2d::proc 2, level_ghost_owners: 0 1 +DEAL:2:2d::{[0,8]} +DEAL:2:2d::{} +DEAL:2:2d::{} +DEAL:2:2d::proc 2, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 +DEAL:2:2d::{[0,8]} +DEAL:2:2d::{[9,14]} +DEAL:2:2d::{[15,24]} +DEAL:2:2d::proc 2, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 +DEAL:2:2d::proc 2, cell 0_1:1, mg_dof_indices: 1 9 3 10 5 11 12 13 14 +DEAL:2:2d::proc 2, cell 0_1:2, mg_dof_indices: 2 3 15 16 17 18 7 19 20 +DEAL:2:2d::proc 2, cell 0_1:3, mg_dof_indices: 3 10 16 21 18 22 13 23 24 +DEAL:2:2d::{[0,24]} +DEAL:2:2d::{} +DEAL:2:2d::{[25,44]} +DEAL:2:2d::proc 2, cell 0_2:02, mg_dof_indices: 2 3 15 16 17 18 7 19 20 +DEAL:2:2d::proc 2, cell 0_2:03, mg_dof_indices: 3 10 16 21 18 22 13 23 24 +DEAL:2:2d::proc 2, cell 0_2:20, mg_dof_indices: 15 16 25 26 27 28 19 29 30 +DEAL:2:2d::proc 2, cell 0_2:21, mg_dof_indices: 16 21 26 31 28 32 23 33 34 +DEAL:2:2d::proc 2, cell 0_2:22, mg_dof_indices: 25 26 35 36 37 38 29 39 40 +DEAL:2:2d::proc 2, cell 0_2:23, mg_dof_indices: 26 31 36 41 38 42 33 43 44 +DEAL:2:3d::proc 2, level_ghost_owners: 0 1 +DEAL:2:3d::{[0,26]} +DEAL:2:3d::{} +DEAL:2:3d::{} +DEAL:2:3d::proc 2, cell 0_0:, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 +DEAL:2:3d::{[0,26]} +DEAL:2:3d::{[27,74]} +DEAL:2:3d::{[75,124]} +DEAL:2:3d::proc 2, cell 0_1:0, mg_dof_indices: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 +DEAL:2:3d::proc 2, cell 0_1:1, mg_dof_indices: 1 27 3 28 5 29 7 30 9 31 32 33 13 34 35 36 17 37 19 38 21 39 40 41 42 43 44 +DEAL:2:3d::proc 2, cell 0_1:2, mg_dof_indices: 2 3 45 46 6 7 47 48 49 50 11 51 52 53 15 54 18 19 55 56 57 58 23 59 60 61 62 +DEAL:2:3d::proc 2, cell 0_1:3, mg_dof_indices: 3 28 46 63 7 30 48 64 50 65 33 66 53 67 36 68 19 38 56 69 58 70 41 71 72 73 74 +DEAL:2:3d::proc 2, cell 0_1:4, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 +DEAL:2:3d::proc 2, cell 0_1:5, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 +DEAL:2:3d::proc 2, cell 0_1:6, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 +DEAL:2:3d::proc 2, cell 0_1:7, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 +DEAL:2:3d::{[0,124]} +DEAL:2:3d::{} +DEAL:2:3d::{[125,224]} +DEAL:2:3d::proc 2, cell 0_2:04, mg_dof_indices: 4 5 6 7 75 76 77 78 12 13 14 15 79 80 81 82 83 84 85 86 87 88 89 90 25 91 92 +DEAL:2:3d::proc 2, cell 0_2:05, mg_dof_indices: 5 29 7 30 76 93 78 94 13 34 35 36 80 95 96 97 84 98 86 99 88 100 101 102 43 103 104 +DEAL:2:3d::proc 2, cell 0_2:06, mg_dof_indices: 6 7 47 48 77 78 105 106 52 53 15 54 107 108 82 109 85 86 110 111 112 113 90 114 61 115 116 +DEAL:2:3d::proc 2, cell 0_2:07, mg_dof_indices: 7 30 48 64 78 94 106 117 53 67 36 68 108 118 97 119 86 99 111 120 113 121 102 122 73 123 124 +DEAL:2:3d::proc 2, cell 0_2:40, mg_dof_indices: 75 76 77 78 125 126 127 128 79 80 81 82 129 130 131 132 133 134 135 136 137 138 139 140 91 141 142 +DEAL:2:3d::proc 2, cell 0_2:41, mg_dof_indices: 76 93 78 94 126 143 128 144 80 95 96 97 130 145 146 147 134 148 136 149 138 150 151 152 103 153 154 +DEAL:2:3d::proc 2, cell 0_2:42, mg_dof_indices: 77 78 105 106 127 128 155 156 107 108 82 109 157 158 132 159 135 136 160 161 162 163 140 164 115 165 166 +DEAL:2:3d::proc 2, cell 0_2:43, mg_dof_indices: 78 94 106 117 128 144 156 167 108 118 97 119 158 168 147 169 136 149 161 170 163 171 152 172 123 173 174 +DEAL:2:3d::proc 2, cell 0_2:44, mg_dof_indices: 125 126 127 128 175 176 177 178 129 130 131 132 179 180 181 182 183 184 185 186 187 188 189 190 141 191 192 +DEAL:2:3d::proc 2, cell 0_2:45, mg_dof_indices: 126 143 128 144 176 193 178 194 130 145 146 147 180 195 196 197 184 198 186 199 188 200 201 202 153 203 204 +DEAL:2:3d::proc 2, cell 0_2:46, mg_dof_indices: 127 128 155 156 177 178 205 206 157 158 132 159 207 208 182 209 185 186 210 211 212 213 190 214 165 215 216 +DEAL:2:3d::proc 2, cell 0_2:47, mg_dof_indices: 128 144 156 167 178 194 206 217 158 168 147 169 208 218 197 219 186 199 211 220 213 221 202 222 173 223 224 + -- 2.39.5