From c68aad68182b73d8c7df950e25d9b98e82402c34 Mon Sep 17 00:00:00 2001 From: Denis Davydov Date: Wed, 12 Aug 2015 13:47:33 +0200 Subject: [PATCH] Introduce parallel::shared::Triangulation It encapsulates ditribution of cells among processors and in the majority of cases behaves like distributed triangulation counterpart. --- doc/news/changes.h | 9 + include/deal.II/distributed/shared_tria.h | 150 +++++++++++ include/deal.II/distributed/tria.h | 122 +-------- include/deal.II/distributed/tria_base.h | 192 ++++++++++++++ include/deal.II/dofs/dof_handler_policy.h | 102 ++++++-- include/deal.II/dofs/dof_tools.h | 16 ++ .../deal.II/grid/tria_accessor.templates.h | 58 +++-- source/distributed/CMakeLists.txt | 4 + source/distributed/shared_tria.cc | 128 ++++++++++ source/distributed/shared_tria.inst.in | 36 +++ source/distributed/tria.cc | 236 +++--------------- source/distributed/tria_base.cc | 222 ++++++++++++++++ source/distributed/tria_base.inst.in | 33 +++ source/dofs/dof_handler.cc | 22 +- source/dofs/dof_handler_policy.cc | 176 ++++++++++++- source/dofs/dof_handler_policy.inst.in | 3 + source/dofs/dof_renumbering.cc | 12 +- source/dofs/dof_tools.cc | 50 ++++ source/dofs/dof_tools.inst.in | 28 ++- source/hp/dof_handler.cc | 29 ++- tests/CMakeLists.txt | 2 +- tests/sharedtria/CMakeLists.txt | 5 + tests/sharedtria/dof_01.cc | 148 +++++++++++ .../dof_01.with_metis=true.mpirun=3.output | 50 ++++ .../sharedtria/dof_01.with_metis=true.output | 16 ++ tests/sharedtria/dof_02.cc | 149 +++++++++++ .../dof_02.with_metis=true.mpirun=3.output | 50 ++++ .../sharedtria/dof_02.with_metis=true.output | 16 ++ tests/sharedtria/tria_01.cc | 107 ++++++++ .../tria_01.with_metis=true.mpirun=3.output | 41 +++ .../sharedtria/tria_01.with_metis=true.output | 13 + 31 files changed, 1840 insertions(+), 385 deletions(-) create mode 100644 include/deal.II/distributed/shared_tria.h create mode 100644 include/deal.II/distributed/tria_base.h create mode 100644 source/distributed/shared_tria.cc create mode 100644 source/distributed/shared_tria.inst.in create mode 100644 source/distributed/tria_base.cc create mode 100644 source/distributed/tria_base.inst.in create mode 100644 tests/sharedtria/CMakeLists.txt create mode 100644 tests/sharedtria/dof_01.cc create mode 100644 tests/sharedtria/dof_01.with_metis=true.mpirun=3.output create mode 100644 tests/sharedtria/dof_01.with_metis=true.output create mode 100644 tests/sharedtria/dof_02.cc create mode 100644 tests/sharedtria/dof_02.with_metis=true.mpirun=3.output create mode 100644 tests/sharedtria/dof_02.with_metis=true.output create mode 100644 tests/sharedtria/tria_01.cc create mode 100644 tests/sharedtria/tria_01.with_metis=true.mpirun=3.output create mode 100644 tests/sharedtria/tria_01.with_metis=true.output diff --git a/doc/news/changes.h b/doc/news/changes.h index bd6c4f94fc..ff44c44573 100644 --- a/doc/news/changes.h +++ b/doc/news/changes.h @@ -66,6 +66,15 @@ inconvenience this causes.
    +
  1. New: parallel::shared::Triangulation class which extends + Triangulation class to automatically partition triangulation when run + with MPI. Identical functionality between parallel::shared::Triangulation and + parallel::distributed::Triangulation is grouped in the parent class + parallel::Triangulation. +
    + (Denis Davydov, 2015/08/14) +
  2. +
  3. New: The online documentation of all functions now includes links to the file and line where that function is implemented. Both are clickable to provide immediate access to the source code of a diff --git a/include/deal.II/distributed/shared_tria.h b/include/deal.II/distributed/shared_tria.h new file mode 100644 index 0000000000..9d8a676a59 --- /dev/null +++ b/include/deal.II/distributed/shared_tria.h @@ -0,0 +1,150 @@ +// --------------------------------------------------------------------- +// $Id: tria.h 32739 2014-04-08 16:39:47Z denis.davydov $ +// +// Copyright (C) 2008 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef __deal2__distributed__shared_tria_h +#define __deal2__distributed__shared_tria_h + + +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include + +#ifdef DEAL_II_WITH_MPI +# include +#endif + + +DEAL_II_NAMESPACE_OPEN + +template class Triangulation; + + +namespace parallel +{ + +#ifdef DEAL_II_WITH_MPI + + + namespace shared + { + + /** + * This is an extension of dealii::Triangulation class to automatically + * partition triangulation when run with MPI. + * Different from the parallel::distributed::Triangulation, the entire mesh + * is stored on each processor. However, cells are labeled according to + * the id of the processor which "owns" them. The partitioning is done + * automatically inside the DoFHandler by calling Metis. + * This enables distributing DoFs among processors and therefore splitting + * matrices and vectors across processors. + * The usage of this class is demonstrated in Step-18. + * + * @author Denis Davydov, 2015 + * @ingroup distributed + * + */ + template + class Triangulation : public dealii::parallel::Triangulation + { + public: + typedef typename dealii::Triangulation::active_cell_iterator active_cell_iterator; + typedef typename dealii::Triangulation::cell_iterator cell_iterator; + + /** + * Constructor. + */ + Triangulation (MPI_Comm mpi_communicator, + const typename dealii::Triangulation::MeshSmoothing = + (dealii::Triangulation::none) ); + + /** + * Destructor. + */ + virtual ~Triangulation (); + + /** + * Coarsen and refine the mesh according to refinement and + * coarsening flags set. + * + * This step is equivalent to the dealii::Triangulation class + * with an addition of calling dealii::GridTools::partition_triangulation() at the end. + */ + virtual void execute_coarsening_and_refinement (); + + /** + * Create a triangulation. + * + * This function also partitions triangulation based on the + * MPI communicator provided to constructor. + */ + virtual void create_triangulation (const std::vector< Point< spacedim > > &vertices, + const std::vector< CellData< dim > > &cells, + const SubCellData &subcelldata); + + }; + } +#else + + namespace shared + { + + /** + * Dummy class the compiler chooses for parallel shared + * triangulations if we didn't actually configure deal.II with the + * MPI library. The existence of this class allows us to refer + * to parallel::shared::Triangulation objects throughout the + * library even if it is disabled. + * + * Since the constructor of this class is private, no such objects + * can actually be created if we don't have p4est available. + */ + template + class Triangulation : public dealii::parallel::Triangulation + { + private: + /** + * Constructor. + */ + Triangulation (); + public: + + /** + * Destructor. + */ + virtual ~Triangulation (); + + }; + } + + +#endif +} + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index 2edf49acff..7b24492e16 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -26,6 +26,8 @@ #include #include +#include + #include #include #include @@ -324,7 +326,7 @@ namespace parallel * @ingroup distributed */ template - class Triangulation : public dealii::Triangulation + class Triangulation : public dealii::parallel::Triangulation { public: /** @@ -618,49 +620,6 @@ namespace parallel void communicate_locally_moved_vertices (const std::vector &vertex_locally_moved); - /** - * Return the subdomain id of those cells that are owned by the current - * processor. All cells in the triangulation that do not have this - * subdomain id are either owned by another processor or have children - * that only exist on other processors. - */ - types::subdomain_id locally_owned_subdomain () const; - - /** - * Return the number of active cells in the triangulation that are - * locally owned, i.e. that have a subdomain_id equal to - * locally_owned_subdomain(). Note that there may be more active cells - * in the triangulation stored on the present processor, such as for - * example ghost cells, or cells further away from the locally owned - * block of cells but that are needed to ensure that the triangulation - * that stores this processor's set of active cells still remains - * balanced with respect to the 2:1 size ratio of adjacent cells. - * - * As a consequence of the remark above, the result of this function is - * always smaller or equal to the result of the function with the same - * name in the ::Triangulation base class, which includes the active - * ghost and artificial cells (see also - * @ref GlossArtificialCell - * and - * @ref GlossGhostCell). - */ - unsigned int n_locally_owned_active_cells () const; - - /** - * Return the sum over all processors of the number of active cells - * owned by each processor. This equals the overall number of active - * cells in the distributed triangulation. - */ - virtual types::global_dof_index n_global_active_cells () const; - - /** - * Returns the global maximum level. This may be bigger than the number - * dealii::Triangulation::n_levels() (a function in this class's base - * class) returns if the current processor only stores cells in parts of - * the domain that are not very refined, but if other processors store - * cells in more deeply refined parts of the domain. - */ - virtual unsigned int n_global_levels () const; /** * Returns true if the triangulation has hanging nodes. @@ -680,20 +639,6 @@ namespace parallel virtual bool has_hanging_nodes() const; - /** - * Return the number of active cells owned by each of the MPI processes - * that contribute to this triangulation. The element of this vector - * indexed by locally_owned_subdomain() equals the result of - * n_locally_owned_active_cells(). - */ - const std::vector & - n_locally_owned_active_cells_per_processor () const; - - /** - * Return the MPI communicator used by this triangulation. - */ - MPI_Comm get_communicator () const; - /** * Return the local memory consumption in bytes. */ @@ -876,43 +821,17 @@ namespace parallel private: - /** - * MPI communicator to be used for the triangulation. We create a unique - * communicator for this class, which is a duplicate of the one passed - * to the constructor. - */ - MPI_Comm mpi_communicator; /** * store the Settings. */ Settings settings; - /** - * The subdomain id to be used for the current processor. - */ - types::subdomain_id my_subdomain; - /** * A flag that indicates whether the triangulation has actual content. */ bool triangulation_has_content; - /** - * A structure that contains some numbers about the distributed - * triangulation. - */ - struct NumberCache - { - std::vector n_locally_owned_active_cells; - types::global_dof_index n_global_active_cells; - unsigned int n_global_levels; - - NumberCache(); - }; - - NumberCache number_cache; - /** * A data structure that holds the connectivity between trees. Since * each tree is rooted in a coarse grid cell, this data structure holds @@ -1019,12 +938,6 @@ namespace parallel */ void copy_local_forest_to_triangulation (); - - /** - * Update the number_cache variable after mesh creation or refinement. - */ - void update_number_cache (); - /** * Internal function notifying all registered classes to attach their * data before repartitioning occurs. Called from @@ -1052,7 +965,7 @@ namespace parallel * all this class does is throw an exception. */ template - class Triangulation<1,spacedim> : public dealii::Triangulation<1,spacedim> + class Triangulation<1,spacedim> : public dealii::parallel::Triangulation<1,spacedim> { public: /** @@ -1066,19 +979,6 @@ namespace parallel */ virtual ~Triangulation (); - /** - * Return the MPI communicator used by this triangulation. - */ - MPI_Comm get_communicator () const; - - /** - * Return the sum over all processors of the number of active cells - * owned by each processor. This equals the overall number of active - * cells in the distributed triangulation. - */ - types::global_dof_index n_global_active_cells () const; - virtual unsigned int n_global_levels () const; - /** * Returns a permutation vector for the order the coarse cells are * handed of to p4est. For example the first element i in this vector @@ -1124,14 +1024,6 @@ namespace parallel void communicate_locally_moved_vertices (const std::vector &vertex_locally_moved); - /** - * Return the subdomain id of those cells that are owned by the current - * processor. All cells in the triangulation that do not have this - * subdomain id are either owned by another processor or have children - * that only exist on other processors. - */ - types::subdomain_id locally_owned_subdomain () const; - /** * Dummy arrays. This class isn't usable but the compiler wants to see * these variables at a couple places anyway. @@ -1210,12 +1102,6 @@ namespace parallel */ types::subdomain_id locally_owned_subdomain () const; - /** - * Return the MPI communicator used by this triangulation. - */ -#ifdef DEAL_II_WITH_MPI - MPI_Comm get_communicator () const; -#endif }; } } diff --git a/include/deal.II/distributed/tria_base.h b/include/deal.II/distributed/tria_base.h new file mode 100644 index 0000000000..b5f1ade171 --- /dev/null +++ b/include/deal.II/distributed/tria_base.h @@ -0,0 +1,192 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2008 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef __deal2__distributed__tria_base_h +#define __deal2__distributed__tria_base_h + + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#ifdef DEAL_II_WITH_MPI +# include +#endif + + +DEAL_II_NAMESPACE_OPEN + +template class Triangulation; + + +namespace parallel +{ + /** + * This class describes the interface for all triangulation classes that + * work in parallel, namely parallel::distributed::Triangulation + * and parallel::shared::Triangulation. + */ + template + class Triangulation : public dealii::Triangulation + { + public: + + /** + * Constructor. + */ +#ifdef DEAL_II_WITH_MPI + Triangulation (MPI_Comm mpi_communicator, + const typename dealii::Triangulation::MeshSmoothing smooth_grid = (dealii::Triangulation::none), + const bool check_for_distorted_cells = false); +#else + Triangulation (); +#endif + + /** + * Destructor. + */ + virtual ~Triangulation (); + +#ifdef DEAL_II_WITH_MPI + /** + * Return MPI communicator used by this triangulation. + */ + virtual MPI_Comm get_communicator () const; +#endif + + /** + * Implementation of the same function as in the base class. + */ + virtual void copy_triangulation (const dealii::Triangulation &old_tria); + + /** + * Return the number of active cells owned by each of the MPI processes + * that contribute to this triangulation. The element of this vector + * indexed by locally_owned_subdomain() equals the result of + * n_locally_owned_active_cells(). + */ + const std::vector & + n_locally_owned_active_cells_per_processor () const; + + + /** + * Return the number of active cells in the triangulation that are + * locally owned, i.e. that have a subdomain_id equal to + * locally_owned_subdomain(). Note that there may be more active cells + * in the triangulation stored on the present processor, such as for + * example ghost cells, or cells further away from the locally owned + * block of cells but that are needed to ensure that the triangulation + * that stores this processor's set of active cells still remains + * balanced with respect to the 2:1 size ratio of adjacent cells. + * + * As a consequence of the remark above, the result of this function is + * always smaller or equal to the result of the function with the same + * name in the ::Triangulation base class, which includes the active + * ghost and artificial cells (see also + * @ref GlossArtificialCell + * and + * @ref GlossGhostCell). + */ + unsigned int n_locally_owned_active_cells () const; + + /** + * Return the sum over all processors of the number of active cells + * owned by each processor. This equals the overall number of active + * cells in the triangulation. + */ + virtual types::global_dof_index n_global_active_cells () const; + + /** + * Return the local memory consumption in bytes. + */ + virtual std::size_t memory_consumption () const; + + + /** + * Returns the global maximum level. This may be bigger than the number + * dealii::Triangulation::n_levels() (a function in this class's base + * class) returns if the current processor only stores cells in parts of + * the domain that are not very refined, but if other processors store + * cells in more deeply refined parts of the domain. + */ + virtual unsigned int n_global_levels () const; + + /** + * Return the subdomain id of those cells that are owned by the current + * processor. All cells in the triangulation that do not have this + * subdomain id are either owned by another processor or have children + * that only exist on other processors. + */ + types::subdomain_id locally_owned_subdomain () const; + + + protected: +#ifdef DEAL_II_WITH_MPI + /** + * MPI communicator to be used for the triangulation. We create a unique + * communicator for this class, which is a duplicate of the one passed + * to the constructor. + */ + MPI_Comm mpi_communicator; +#endif + + /** + * The subdomain id to be used for the current processor. + */ + types::subdomain_id my_subdomain; + + /** + * total number of subdomains. + */ + types::subdomain_id n_subdomains; + + /** + * A structure that contains some numbers about the distributed + * triangulation. + */ + struct NumberCache + { + std::vector n_locally_owned_active_cells; + types::global_dof_index n_global_active_cells; + unsigned int n_global_levels; + + NumberCache(); + }; + + NumberCache number_cache; + + /** + * Update the number_cache variable after mesh creation or refinement. + */ + void update_number_cache (); + + + }; + +} // namespace parallel + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/include/deal.II/dofs/dof_handler_policy.h b/include/deal.II/dofs/dof_handler_policy.h index e44663844b..51a96465e1 100644 --- a/include/deal.II/dofs/dof_handler_policy.h +++ b/include/deal.II/dofs/dof_handler_policy.h @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -61,11 +63,19 @@ namespace internal virtual ~PolicyBase (); /** - * Distribute degrees of freedom on the object given as last argument. + * Distribute degrees of freedom on + * the object given as first argument. + * The reference to the NumberCache of the + * DoFHandler object has to be passed in a + * second argument. It could then be modified to + * make DoFHandler related functions work properly + * when called within the policies classes. + * The updated NumberCache is written to that argument. */ virtual - NumberCache - distribute_dofs (dealii::DoFHandler &dof_handler) const = 0; + void + distribute_dofs (dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const = 0; /** * Distribute the multigrid dofs on each level @@ -76,12 +86,20 @@ namespace internal std::vector &number_caches) const = 0; /** - * Renumber degrees of freedom as specified by the first argument. + * Renumber degrees of freedom as + * specified by the first argument. + * The reference to the NumberCache of the + * DoFHandler object has to be passed in a + * second argument. It could then be modified to + * make DoFHandler related functions work properly + * when called within the policies classes. + * The updated NumberCache is written to that argument. */ virtual - NumberCache + void renumber_dofs (const std::vector &new_numbers, - dealii::DoFHandler &dof_handler) const = 0; + dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const = 0; }; @@ -97,8 +115,9 @@ namespace internal * Distribute degrees of freedom on the object given as last argument. */ virtual - NumberCache - distribute_dofs (dealii::DoFHandler &dof_handler) const; + void + distribute_dofs (dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const; /** * Distribute multigrid DoFs. @@ -112,9 +131,62 @@ namespace internal * Renumber degrees of freedom as specified by the first argument. */ virtual - NumberCache + void renumber_dofs (const std::vector &new_numbers, - dealii::DoFHandler &dof_handler) const; + dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const; + }; + + /** + * This class implements the + * policy for operations when + * we use a + * parallel::shared::Triangulation + * object. + */ + template + class ParallelShared : public Sequential + { + public: + + /** + * Distribute degrees of freedom on + * the object given as first argument. + * + * On distribution, DoFs are renumbered subdomain-wise and + * number_cache.n_locally_owned_dofs_per_processor[i] and + * number_cache.locally_owned_dofs are updated consistently. + */ + virtual + void + distribute_dofs (dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const; + + /** + * This function is not yet implemented. + */ + virtual + void + distribute_mg_dofs (dealii::DoFHandler &dof_handler, + std::vector &number_caches) const; + + /** + * Renumber degrees of freedom as + * specified by the first argument. + * + * The input argument @p new_numbers may either have as many entries + * as there are global degrees of freedom (i.e. dof_handler.n_dofs() ) + * or dof_handler.locally_owned_dofs().n_elements(). + * Therefore it can be utilised with renumbering functions + * implemented for the parallel::distributed case. + */ + virtual + void + renumber_dofs (const std::vector &new_numbers, + dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const; + private: + }; @@ -130,8 +202,9 @@ namespace internal * Distribute degrees of freedom on the object given as last argument. */ virtual - NumberCache - distribute_dofs (dealii::DoFHandler &dof_handler) const; + void + distribute_dofs (dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const; /** * Distribute multigrid DoFs. @@ -145,9 +218,10 @@ namespace internal * Renumber degrees of freedom as specified by the first argument. */ virtual - NumberCache + void renumber_dofs (const std::vector &new_numbers, - dealii::DoFHandler &dof_handler) const; + dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const; }; } } diff --git a/include/deal.II/dofs/dof_tools.h b/include/deal.II/dofs/dof_tools.h index 38ca2fab94..7e6c33c19c 100644 --- a/include/deal.II/dofs/dof_tools.h +++ b/include/deal.II/dofs/dof_tools.h @@ -1437,6 +1437,22 @@ namespace DoFTools extract_locally_relevant_dofs (const DH &dof_handler, IndexSet &dof_set); + /** + * + * For each processor, determine the set of locally owned degrees of freedom as an IndexSet. + * This function then returns a vector of index sets, where the vector has size equal to the + * number of MPI processes that participate in the DoF handler object. + * + * The function can be used for objects of type dealii::Triangulation or parallel::shared::Triangulation. + * It will not work for objects of type parallel::distributed::Triangulation since for such triangulations + * we do not have information about all cells of the triangulation available locally, + * and consequently can not say anything definitive about the degrees of freedom active on other + * processors' locally owned cells. + */ + template + std::vector + locally_owned_dofs_per_subdomain (const DH &dof_handler); + /** * For each DoF, return in the output array to which subdomain (as given by * the cell->subdomain_id() function) it belongs. The output array diff --git a/include/deal.II/grid/tria_accessor.templates.h b/include/deal.II/grid/tria_accessor.templates.h index 0ebb75b840..41b226da8d 100644 --- a/include/deal.II/grid/tria_accessor.templates.h +++ b/include/deal.II/grid/tria_accessor.templates.h @@ -27,6 +27,7 @@ #include #include #include +#include #include @@ -38,6 +39,11 @@ namespace parallel { template class Triangulation; } + + namespace shared + { + template class Triangulation; + } } @@ -3030,19 +3036,20 @@ CellAccessor::is_locally_owned () const { Assert (this->active(), ExcMessage("is_locally_owned() can only be called on active cells!")); -#ifndef DEAL_II_WITH_P4EST +#ifndef DEAL_II_WITH_MPI return true; #else if (is_artificial()) return false; - const parallel::distributed::Triangulation *pdt - = dynamic_cast *>(this->tria); + const parallel::Triangulation *pt + = dynamic_cast *>(this->tria); - if (pdt == 0) + if (pt == 0) return true; else - return (this->subdomain_id() == pdt->locally_owned_subdomain()); + return (this->subdomain_id() == pt->locally_owned_subdomain()); + #endif } @@ -3052,16 +3059,19 @@ inline bool CellAccessor::is_locally_owned_on_level () const { -#ifndef DEAL_II_WITH_P4EST + +#ifndef DEAL_II_WITH_MPI return true; #else - const parallel::distributed::Triangulation *pdt - = dynamic_cast *>(this->tria); - if (pdt == 0) + const parallel::Triangulation *pt + = dynamic_cast *>(this->tria); + + if (pt == 0) return true; else - return (this->level_subdomain_id() == pdt->locally_owned_subdomain()); + return (this->level_subdomain_id() == pt->locally_owned_subdomain()); + #endif } @@ -3073,19 +3083,21 @@ CellAccessor::is_ghost () const { Assert (this->active(), ExcMessage("is_ghost() can only be called on active cells!")); -#ifndef DEAL_II_WITH_P4EST - return false; -#else if (is_artificial() || this->has_children()) return false; - const parallel::distributed::Triangulation *pdt - = dynamic_cast *>(this->tria); +#ifndef DEAL_II_WITH_MPI + return false; +#else + + const parallel::Triangulation *pt + = dynamic_cast *>(this->tria); - if (pdt == 0) + if (pt == 0) return false; else - return (this->subdomain_id() != pdt->locally_owned_subdomain()); + return (this->subdomain_id() != pt->locally_owned_subdomain()); + #endif } @@ -3098,10 +3110,18 @@ CellAccessor::is_artificial () const { Assert (this->active(), ExcMessage("is_artificial() can only be called on active cells!")); -#ifndef DEAL_II_WITH_P4EST +#ifndef DEAL_II_WITH_MPI return false; #else - return this->subdomain_id() == numbers::artificial_subdomain_id; + + const parallel::Triangulation *pt + = dynamic_cast *>(this->tria); + + if (pt == 0) + return false; + else + return this->subdomain_id() == numbers::artificial_subdomain_id; + #endif } diff --git a/source/distributed/CMakeLists.txt b/source/distributed/CMakeLists.txt index d7cb055825..f12666ca29 100644 --- a/source/distributed/CMakeLists.txt +++ b/source/distributed/CMakeLists.txt @@ -19,12 +19,16 @@ SET(_src grid_refinement.cc solution_transfer.cc tria.cc + tria_base.cc + shared_tria.cc ) SET(_inst grid_refinement.inst.in solution_transfer.inst.in tria.inst.in + shared_tria.inst.in + tria_base.inst.in ) FILE(GLOB _header diff --git a/source/distributed/shared_tria.cc b/source/distributed/shared_tria.cc new file mode 100644 index 0000000000..65035186c9 --- /dev/null +++ b/source/distributed/shared_tria.cc @@ -0,0 +1,128 @@ +// --------------------------------------------------------------------- +// $Id: tria.cc 32807 2014-04-22 15:01:57Z heister $ +// +// Copyright (C) 2008 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include + + +DEAL_II_NAMESPACE_OPEN + +#ifdef DEAL_II_WITH_MPI +namespace parallel +{ + namespace shared + { + + template + Triangulation::Triangulation (MPI_Comm mpi_communicator, + const typename dealii::Triangulation::MeshSmoothing smooth_grid): + dealii::parallel::Triangulation(mpi_communicator,smooth_grid,false) + { + } + + + template + Triangulation::~Triangulation () + { + + } + + template + void + Triangulation::execute_coarsening_and_refinement () + { + dealii::Triangulation::execute_coarsening_and_refinement (); + dealii::GridTools::partition_triangulation (this->n_subdomains, *this); + this->update_number_cache (); + } + + template + void + Triangulation::create_triangulation (const std::vector< Point< spacedim > > &vertices, + const std::vector< CellData< dim > > &cells, + const SubCellData &subcelldata) + { + try + { + dealii::Triangulation:: + create_triangulation (vertices, cells, subcelldata); + } + catch (const typename dealii::Triangulation::DistortedCellList &) + { + // the underlying triangulation should not be checking for distorted + // cells + AssertThrow (false, ExcInternalError()); + } + dealii::GridTools::partition_triangulation (this->n_subdomains, *this); + this->update_number_cache (); + } + + } +} + +#else + +namespace parallel +{ + namespace shared + { + template + Triangulation::Triangulation () + { + Assert (false, ExcNotImplemented()); + } + + + template + Triangulation::~Triangulation () + { + Assert (false, ExcNotImplemented()); + } + + template + types::subdomain_id + Triangulation::locally_owned_subdomain () const + { + Assert (false, ExcNotImplemented()); + return 0; + } + + } +} + + +#endif + + +/*-------------- Explicit Instantiations -------------------------------*/ +#include "shared_tria.inst" + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/distributed/shared_tria.inst.in b/source/distributed/shared_tria.inst.in new file mode 100644 index 0000000000..f6a863a443 --- /dev/null +++ b/source/distributed/shared_tria.inst.in @@ -0,0 +1,36 @@ +// --------------------------------------------------------------------- +// $Id: tria.inst.in 32674 2014-03-20 16:57:24Z denis.davydov $ +// +// Copyright (C) 2010 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +for (deal_II_dimension : DIMENSIONS) + { + namespace parallel + \{ + namespace shared + \{ + template class Triangulation; +# if deal_II_dimension < 3 + template class Triangulation; +# endif +# if deal_II_dimension < 2 + template class Triangulation; +# endif + \} + \} + + } + diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 327926340d..c259943410 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -2162,16 +2162,6 @@ namespace parallel /* ---------------------- class Triangulation ------------------------------ */ - - template - Triangulation::NumberCache::NumberCache() - : - n_global_active_cells(0), - n_global_levels(0) - {} - - - template Triangulation:: Triangulation (MPI_Comm mpi_communicator, @@ -2179,13 +2169,11 @@ namespace parallel const Settings settings_) : // do not check for distorted cells - dealii::Triangulation - (smooth_grid, + dealii::parallel::Triangulation + (mpi_communicator, + smooth_grid, false), - mpi_communicator (Utilities::MPI:: - duplicate_communicator(mpi_communicator)), settings(settings_), - my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)), triangulation_has_content (false), connectivity (0), parallel_forest (0), @@ -2200,9 +2188,6 @@ namespace parallel dealii::internal::p4est::InitFinalize::do_initialize (); parallel_ghost = 0; - - number_cache.n_locally_owned_active_cells - .resize (Utilities::MPI::n_mpi_processes (mpi_communicator)); } @@ -2217,9 +2202,6 @@ namespace parallel Assert (connectivity == 0, ExcInternalError()); Assert (parallel_forest == 0, ExcInternalError()); Assert (refinement_in_progress == false, ExcInternalError()); - - // get rid of the unique communicator used here again - MPI_Comm_free (&mpi_communicator); } @@ -2264,7 +2246,7 @@ namespace parallel AssertThrow (false, ExcInternalError()); } - update_number_cache (); + this->update_number_cache (); } @@ -2605,14 +2587,14 @@ namespace parallel dealii::Triangulation::clear (); - update_number_cache (); + this->update_number_cache (); } template bool Triangulation::has_hanging_nodes () const { - if (n_global_levels()<=1) + if (this->n_global_levels()<=1) return false; // can not have hanging nodes without refined cells // if there are any active cells with level less than n_global_levels()-1, then @@ -2622,8 +2604,8 @@ namespace parallel // The problem is that we cannot just ask for the first active cell, but // instead need to filter over locally owned cells. bool have_coarser_cell = false; - for (typename Triangulation::active_cell_iterator cell = this->begin_active(n_global_levels()-2); - cell != this->end(n_global_levels()-2); + for (typename Triangulation::active_cell_iterator cell = this->begin_active(this->n_global_levels()-2); + cell != this->end(this->n_global_levels()-2); ++cell) if (cell->is_locally_owned()) { @@ -2632,7 +2614,7 @@ namespace parallel } // return true if at least one process has a coarser cell - return 0mpi_communicator); } @@ -2678,13 +2660,13 @@ namespace parallel Assert(this->n_cells()>0, ExcMessage("Can not save() an empty Triangulation.")); - if (my_subdomain==0) + if (this->my_subdomain==0) { std::string fname=std::string(filename)+".info"; std::ofstream f(fname.c_str()); f << "version nproc attached_bytes n_attached_objs n_coarse_cells" << std::endl << 2 << " " - << Utilities::MPI::n_mpi_processes (mpi_communicator) << " " + << Utilities::MPI::n_mpi_processes (this->mpi_communicator) << " " << real_data_size << " " << attached_data_pack_callbacks.size() << " " << this->n_cells(0) @@ -2746,7 +2728,7 @@ namespace parallel Assert(this->n_cells(0) == n_coarse_cells, ExcMessage("Number of coarse cells differ!")); #if DEAL_II_P4EST_VERSION_GTE(0,3,4,3) #else - AssertThrow(numcpus <= Utilities::MPI::n_mpi_processes (mpi_communicator), + AssertThrow(numcpus <= Utilities::MPI::n_mpi_processes (this->mpi_communicator), ExcMessage("parallel::distributed::Triangulation::load() only supports loading " "saved data with a greater or equal number of processes than were used to " "save() when using p4est 0.3.4.2.")); @@ -2758,7 +2740,7 @@ namespace parallel #if DEAL_II_P4EST_VERSION_GTE(0,3,4,3) parallel_forest = dealii::internal::p4est::functions::load_ext ( - filename, mpi_communicator, + filename, this->mpi_communicator, attached_size, attached_size>0, autopartition, 0, this, @@ -2766,12 +2748,12 @@ namespace parallel #else (void)autopartition; parallel_forest = dealii::internal::p4est::functions::load ( - filename, mpi_communicator, + filename, this->mpi_communicator, attached_size, attached_size>0, this, &connectivity); #endif - if (numcpus != Utilities::MPI::n_mpi_processes (mpi_communicator)) + if (numcpus != Utilities::MPI::n_mpi_processes (this->mpi_communicator)) // We are changing the number of CPUs so we need to repartition. // Note that p4est actually distributes the cells between the changed // number of CPUs and so everything works without this call, but @@ -2795,7 +2777,7 @@ namespace parallel AssertThrow (false, ExcInternalError()); } - update_number_cache (); + this->update_number_cache (); } @@ -2884,7 +2866,7 @@ namespace parallel // now create a forest out of the connectivity data structure parallel_forest = dealii::internal::p4est::functions<2>:: - new_forest (mpi_communicator, + new_forest (this->mpi_communicator, connectivity, /* minimum initial number of quadrants per tree */ 0, /* minimum level of upfront refinement */ 0, @@ -2955,7 +2937,7 @@ namespace parallel // now create a forest out of the connectivity data structure parallel_forest = dealii::internal::p4est::functions<2>:: - new_forest (mpi_communicator, + new_forest (this->mpi_communicator, connectivity, /* minimum initial number of quadrants per tree */ 0, /* minimum level of upfront refinement */ 0, @@ -3107,7 +3089,7 @@ namespace parallel // now create a forest out of the connectivity data structure parallel_forest = dealii::internal::p4est::functions<3>:: - new_forest (mpi_communicator, + new_forest (this->mpi_communicator, connectivity, /* minimum initial number of quadrants per tree */ 0, /* minimum level of upfront refinement */ 0, @@ -3239,7 +3221,7 @@ namespace parallel match_tree_recursively (*tree, cell, p4est_coarse_cell, *parallel_forest, - my_subdomain); + this->my_subdomain); } } @@ -3310,7 +3292,7 @@ namespace parallel cell != this->end(); ++cell) { - if (cell->subdomain_id() != my_subdomain + if (cell->subdomain_id() != this->my_subdomain && cell->subdomain_id() != numbers::artificial_subdomain_id) ++num_ghosts; @@ -3377,7 +3359,7 @@ namespace parallel determine_level_subdomain_id_recursively (*tree, tree_index, cell, p4est_coarse_cell, *parallel_forest, - my_subdomain, + this->my_subdomain, marked_vertices); } @@ -3446,7 +3428,7 @@ namespace parallel const unsigned int total_local_cells = this->n_active_cells(); (void)total_local_cells; - if (Utilities::MPI::n_mpi_processes (mpi_communicator) == 1) + if (Utilities::MPI::n_mpi_processes (this->mpi_communicator) == 1) Assert (static_cast(parallel_forest->local_num_quadrants) == total_local_cells, ExcInternalError()) @@ -3461,7 +3443,7 @@ namespace parallel cell = this->begin_active(); cell != this->end(); ++cell) { - if (cell->subdomain_id() == my_subdomain) + if (cell->subdomain_id() == this->my_subdomain) ++n_owned; } @@ -3531,7 +3513,7 @@ namespace parallel RefineAndCoarsenList refine_and_coarsen_list (*this, p4est_tree_to_coarse_cell_permutation, - my_subdomain); + this->my_subdomain); // copy refine and coarsen flags into p4est and execute the refinement // and coarsening. this uses the refine_and_coarsen_list just built, @@ -3616,7 +3598,7 @@ namespace parallel refinement_in_progress = false; - update_number_cache (); + this->update_number_cache (); } template @@ -3665,7 +3647,7 @@ namespace parallel PartitionWeights partition_weights (*this, cell_weights, p4est_tree_to_coarse_cell_permutation, - my_subdomain); + this->my_subdomain); parallel_forest->user_pointer = &partition_weights; dealii::internal::p4est::functions:: @@ -3691,65 +3673,11 @@ namespace parallel refinement_in_progress = false; // update how many cells, edges, etc, we store locally - update_number_cache (); - } - - - template - void - Triangulation::update_number_cache () - { - Assert (number_cache.n_locally_owned_active_cells.size() - == - Utilities::MPI::n_mpi_processes (mpi_communicator), - ExcInternalError()); - - std::fill (number_cache.n_locally_owned_active_cells.begin(), - number_cache.n_locally_owned_active_cells.end(), - 0); - - if (this->n_levels() == 0) - { - // Skip communication done below if we do not have any cells - // (meaning the Triangulation is empty on all processors). This will - // happen when called from the destructor of Triangulation, which - // can get called during exception handling causing a hang in this - // function. - number_cache.n_global_active_cells = 0; - number_cache.n_global_levels = 0; - return; - } - - if (this->n_levels() > 0) - for (typename Triangulation::active_cell_iterator - cell = this->begin_active(); - cell != this->end(); ++cell) - if (cell->subdomain_id() == my_subdomain) - ++number_cache.n_locally_owned_active_cells[my_subdomain]; - - unsigned int send_value - = number_cache.n_locally_owned_active_cells[my_subdomain]; - MPI_Allgather (&send_value, - 1, - MPI_UNSIGNED, - &number_cache.n_locally_owned_active_cells[0], - 1, - MPI_UNSIGNED, - mpi_communicator); - - number_cache.n_global_active_cells - = std::accumulate (number_cache.n_locally_owned_active_cells.begin(), - number_cache.n_locally_owned_active_cells.end(), - /* ensure sum is computed with correct data type:*/ - static_cast(0)); - number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), mpi_communicator); + this->update_number_cache (); } - - - template void Triangulation:: @@ -3900,54 +3828,6 @@ namespace parallel ExcInternalError()); } - - - template - types::subdomain_id - Triangulation::locally_owned_subdomain () const - { - Assert (dim > 1, ExcNotImplemented()); - return my_subdomain; - } - - - - template - unsigned int - Triangulation::n_locally_owned_active_cells () const - { - return number_cache.n_locally_owned_active_cells[my_subdomain]; - } - - - - template - types::global_dof_index - Triangulation::n_global_active_cells () const - { - return number_cache.n_global_active_cells; - } - - - - template - unsigned int - Triangulation::n_global_levels () const - { - return number_cache.n_global_levels; - } - - - - template - const std::vector & - Triangulation::n_locally_owned_active_cells_per_processor () const - { - return number_cache.n_locally_owned_active_cells; - } - - - template unsigned int Triangulation:: @@ -4335,14 +4215,6 @@ namespace parallel } - template - MPI_Comm - Triangulation::get_communicator () const - { - return mpi_communicator; - } - - template void Triangulation::add_periodicity @@ -4459,7 +4331,7 @@ namespace parallel dealii::internal::p4est::functions::destroy (parallel_forest); parallel_forest = dealii::internal::p4est::functions:: - new_forest (mpi_communicator, + new_forest (this->mpi_communicator, connectivity, /* minimum initial number of quadrants per tree */ 0, /* minimum level of upfront refinement */ 0, @@ -4492,13 +4364,8 @@ namespace parallel Triangulation::memory_consumption () const { std::size_t mem= - this->dealii::Triangulation::memory_consumption() - + MemoryConsumption::memory_consumption(mpi_communicator) - + MemoryConsumption::memory_consumption(my_subdomain) + this->dealii::parallel::Triangulation::memory_consumption() + MemoryConsumption::memory_consumption(triangulation_has_content) - + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells) - + MemoryConsumption::memory_consumption(number_cache.n_global_active_cells) - + MemoryConsumption::memory_consumption(number_cache.n_global_levels) + MemoryConsumption::memory_consumption(connectivity) + MemoryConsumption::memory_consumption(parallel_forest) + MemoryConsumption::memory_consumption(refinement_in_progress) @@ -4557,7 +4424,8 @@ namespace parallel ExcMessage ("Parallel distributed triangulations can only " "be copied, if no refinement is in progress!")); - mpi_communicator = Utilities::MPI::duplicate_communicator (old_tria_x->get_communicator ()); + // duplicate MPI communicator, stored in the base class + dealii::parallel::Triangulation::copy_triangulation (old_tria); coarse_cell_to_p4est_tree_permutation = old_tria_x->coarse_cell_to_p4est_tree_permutation; p4est_tree_to_coarse_cell_permutation = old_tria_x->p4est_tree_to_coarse_cell_permutation; @@ -4584,7 +4452,7 @@ namespace parallel AssertThrow (false, ExcInternalError()); } - update_number_cache (); + this->update_number_cache (); } @@ -4667,6 +4535,10 @@ namespace parallel template Triangulation<1,spacedim>::Triangulation (MPI_Comm) + : + dealii::parallel::Triangulation<1,spacedim>(MPI_COMM_WORLD, + typename dealii::Triangulation<1,spacedim>::MeshSmoothing(), + false) { Assert (false, ExcNotImplemented()); } @@ -4689,40 +4561,6 @@ namespace parallel } - template - types::subdomain_id - Triangulation<1,spacedim>::locally_owned_subdomain () const - { - Assert (false, ExcNotImplemented()); - return 0; - } - - - template - types::global_dof_index - Triangulation<1,spacedim>::n_global_active_cells () const - { - Assert (false, ExcNotImplemented()); - return 0; - } - - - template - unsigned int - Triangulation<1,spacedim>::n_global_levels () const - { - Assert (false, ExcNotImplemented()); - return 0; - } - - - template - MPI_Comm - Triangulation<1,spacedim>::get_communicator () const - { - return MPI_COMM_WORLD; - } - template const std::vector & Triangulation<1,spacedim>::get_p4est_tree_to_coarse_cell_permutation() const diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc new file mode 100644 index 0000000000..39fd50d54c --- /dev/null +++ b/source/distributed/tria_base.cc @@ -0,0 +1,222 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2008 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include + + +DEAL_II_NAMESPACE_OPEN + +namespace parallel +{ + +#ifdef DEAL_II_WITH_MPI + template + Triangulation::Triangulation (MPI_Comm mpi_communicator, + const typename dealii::Triangulation::MeshSmoothing smooth_grid, + const bool check_for_distorted_cells) + : + dealii::Triangulation(smooth_grid,check_for_distorted_cells), + mpi_communicator (Utilities::MPI:: + duplicate_communicator(mpi_communicator)), + my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)), + n_subdomains(Utilities::MPI::n_mpi_processes(mpi_communicator)) + { + number_cache.n_locally_owned_active_cells.resize (n_subdomains); + }; + + template + void + Triangulation::copy_triangulation (const dealii::Triangulation &old_tria) + { + if (const dealii::parallel::Triangulation * + old_tria_x = dynamic_cast *>(&old_tria)) + { + mpi_communicator = Utilities::MPI::duplicate_communicator (old_tria_x->get_communicator ()); + } + } + +#else + template + Triangulation::Triangulation() + { + Assert (false, ExcNotImplemented()); + } + + template + void + Triangulation::copy_triangulation (const dealii::Triangulation &old_tria) + { + Assert (false, ExcNotImplemented()); + } + + +#endif + + template + std::size_t + Triangulation::memory_consumption() const + { + std::size_t mem= + this->dealii::Triangulation::memory_consumption() + + MemoryConsumption::memory_consumption(mpi_communicator) + + MemoryConsumption::memory_consumption(my_subdomain) + + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells) + + MemoryConsumption::memory_consumption(number_cache.n_global_active_cells) + + MemoryConsumption::memory_consumption(number_cache.n_global_levels); + return mem; + + } + + template + Triangulation::~Triangulation () + { + // get rid of the unique communicator used here again + MPI_Comm_free (&this->mpi_communicator); + }; + + template + Triangulation::NumberCache::NumberCache() + : + n_global_active_cells(0), + n_global_levels(0) + {} + + template + unsigned int + Triangulation::n_locally_owned_active_cells () const + { + return number_cache.n_locally_owned_active_cells[my_subdomain]; + } + + template + unsigned int + Triangulation::n_global_levels () const + { + return number_cache.n_global_levels; + } + + template + types::global_dof_index + Triangulation::n_global_active_cells () const + { + return number_cache.n_global_active_cells; + } + + template + const std::vector & + Triangulation::n_locally_owned_active_cells_per_processor () const + { + return number_cache.n_locally_owned_active_cells; + } + +#ifdef DEAL_II_WITH_MPI + template + MPI_Comm + Triangulation::get_communicator () const + { + return mpi_communicator; + } + + template + void + Triangulation::update_number_cache () + { + Assert (number_cache.n_locally_owned_active_cells.size() + == + Utilities::MPI::n_mpi_processes (this->mpi_communicator), + ExcInternalError()); + + std::fill (number_cache.n_locally_owned_active_cells.begin(), + number_cache.n_locally_owned_active_cells.end(), + 0); + + if (this->n_levels() == 0) + { + // Skip communication done below if we do not have any cells + // (meaning the Triangulation is empty on all processors). This will + // happen when called from the destructor of Triangulation, which + // can get called during exception handling causing a hang in this + // function. + number_cache.n_global_active_cells = 0; + number_cache.n_global_levels = 0; + return; + } + + if (this->n_levels() > 0) + for (typename Triangulation::active_cell_iterator + cell = this->begin_active(); + cell != this->end(); ++cell) + if (cell->subdomain_id() == my_subdomain) + ++number_cache.n_locally_owned_active_cells[my_subdomain]; + + unsigned int send_value + = number_cache.n_locally_owned_active_cells[my_subdomain]; + MPI_Allgather (&send_value, + 1, + MPI_UNSIGNED, + &number_cache.n_locally_owned_active_cells[0], + 1, + MPI_UNSIGNED, + this->mpi_communicator); + + number_cache.n_global_active_cells + = std::accumulate (number_cache.n_locally_owned_active_cells.begin(), + number_cache.n_locally_owned_active_cells.end(), + /* ensure sum is computed with correct data type:*/ + static_cast(0)); + number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), this->mpi_communicator); + } +#else + template + void + Triangulation::update_number_cache () + { + Assert (false, ExcNotImplemented()); + } + +#endif + + template + types::subdomain_id + Triangulation::locally_owned_subdomain () const + { + Assert (dim > 1, ExcNotImplemented()); + return my_subdomain; + } + + +} + + +/*-------------- Explicit Instantiations -------------------------------*/ +#include "tria_base.inst" + +DEAL_II_NAMESPACE_CLOSE diff --git a/source/distributed/tria_base.inst.in b/source/distributed/tria_base.inst.in new file mode 100644 index 0000000000..93ac41e8cb --- /dev/null +++ b/source/distributed/tria_base.inst.in @@ -0,0 +1,33 @@ +// --------------------------------------------------------------------- +// $Id: tria.inst.in 32674 2014-03-20 16:57:24Z denis.davydov $ +// +// Copyright (C) 2010 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +for (deal_II_dimension : DIMENSIONS) + { + namespace parallel + \{ + template class Triangulation; +# if deal_II_dimension < 3 + template class Triangulation; +# endif +# if deal_II_dimension < 2 + template class Triangulation; +# endif + \} + + } + diff --git a/source/dofs/dof_handler.cc b/source/dofs/dof_handler.cc index 26059240e7..342b7b524b 100644 --- a/source/dofs/dof_handler.cc +++ b/source/dofs/dof_handler.cc @@ -72,6 +72,8 @@ namespace internal policy_name = "Policy::Sequential<"; else if (dynamic_cast*>(&policy)) policy_name = "Policy::ParallelDistributed<"; + else if (dynamic_cast*>(&policy)) + policy_name = "Policy::ParallelShared<"; else AssertThrow(false, ExcNotImplemented()); policy_name += Utilities::int_to_string(dim)+ @@ -762,9 +764,13 @@ DoFHandler::DoFHandler (const Triangulation &tria) // decide whether we need a // sequential or a parallel // distributed policy - if (dynamic_cast*> + if (dynamic_cast*> (&tria) - == 0) + != 0) + policy.reset (new internal::DoFHandler::Policy::ParallelShared()); + else if (dynamic_cast*> + (&tria) + == 0) policy.reset (new internal::DoFHandler::Policy::Sequential()); else policy.reset (new internal::DoFHandler::Policy::ParallelDistributed()); @@ -802,9 +808,13 @@ DoFHandler::initialize( // decide whether we need a // sequential or a parallel // distributed policy - if (dynamic_cast*> + if (dynamic_cast*> (&t) - == 0) + != 0) + policy.reset (new internal::DoFHandler::Policy::ParallelShared()); + else if (dynamic_cast*> + (&t) + == 0) policy.reset (new internal::DoFHandler::Policy::Sequential()); else policy.reset (new internal::DoFHandler::Policy::ParallelDistributed()); @@ -1225,7 +1235,7 @@ void DoFHandler::distribute_dofs (const FiniteElementdistribute_dofs (*this); + policy->distribute_dofs (*this,number_cache); // initialize the block info object // only if this is a sequential @@ -1344,7 +1354,7 @@ DoFHandler::renumber_dofs (const std::vectorrenumber_dofs (new_numbers, *this); + policy->renumber_dofs (new_numbers, *this,number_cache); } diff --git a/source/dofs/dof_handler_policy.cc b/source/dofs/dof_handler_policy.cc index 1ec1ebb668..d573f83d47 100644 --- a/source/dofs/dof_handler_policy.cc +++ b/source/dofs/dof_handler_policy.cc @@ -869,9 +869,10 @@ namespace internal template - NumberCache + void Sequential:: - distribute_dofs (DoFHandler &dof_handler) const + distribute_dofs (DoFHandler &dof_handler, + NumberCache &number_cache_current ) const { const types::global_dof_index n_dofs = Implementation::distribute_dofs (0, @@ -897,7 +898,7 @@ namespace internal number_cache.locally_owned_dofs_per_processor = std::vector (1, number_cache.locally_owned_dofs); - return number_cache; + number_cache_current = number_cache; } @@ -928,10 +929,11 @@ namespace internal } template - NumberCache + void Sequential:: renumber_dofs (const std::vector &new_numbers, - dealii::DoFHandler &dof_handler) const + dealii::DoFHandler &dof_handler, + NumberCache &number_cache_current) const { Implementation::renumber_dofs (new_numbers, IndexSet(0), dof_handler, true); @@ -959,10 +961,158 @@ namespace internal number_cache.locally_owned_dofs_per_processor = std::vector (1, number_cache.locally_owned_dofs); - return number_cache; + number_cache_current = number_cache; + } + + /* --------------------- class ParallelShared ---------------- */ + + template + void + ParallelShared:: + distribute_dofs (DoFHandler &dof_handler, + NumberCache &number_cache) const + { + Sequential::distribute_dofs (dof_handler,number_cache); + DoFRenumbering::subdomain_wise (dof_handler); + number_cache.locally_owned_dofs_per_processor = DoFTools::locally_owned_dofs_per_subdomain (dof_handler); + number_cache.locally_owned_dofs = number_cache.locally_owned_dofs_per_processor[dof_handler.get_tria().locally_owned_subdomain()]; + number_cache.n_locally_owned_dofs_per_processor.resize (number_cache.locally_owned_dofs_per_processor.size()); + for (unsigned int i = 0; i < number_cache.n_locally_owned_dofs_per_processor.size(); i++) + number_cache.n_locally_owned_dofs_per_processor[i] = number_cache.locally_owned_dofs_per_processor[i].n_elements(); + number_cache.n_locally_owned_dofs = number_cache.n_locally_owned_dofs_per_processor[dof_handler.get_tria().locally_owned_subdomain()]; } + template + void + ParallelShared:: + distribute_mg_dofs (DoFHandler &dof_handler, + std::vector &number_caches) const + { + // first, call the sequential function to distribute dofs + Sequential:: distribute_mg_dofs (dof_handler, number_caches); + // now we need to update the number cache. + // This part is not yet implemented. + AssertThrow(false,ExcNotImplemented()); + } + + template + void + ParallelShared:: + renumber_dofs (const std::vector &new_numbers, + dealii::DoFHandler &dof_handler, + NumberCache &number_cache) const + { + +#ifndef DEAL_II_WITH_MPI + (void)dof_handler; + Assert (false, ExcNotImplemented()); + +#else + std::vector global_gathered_numbers (dof_handler.n_dofs (), 0); + // as we call DoFRenumbering::subdomain_wise (dof_handler) from distribute_dofs(), + // we need to support sequential-like input. + // Distributed-like input from, for example, component_wise renumbering is also supported. + if (new_numbers.size () == dof_handler.n_dofs ()) + { + global_gathered_numbers = new_numbers; + } + else + { + Assert(new_numbers.size() == dof_handler.locally_owned_dofs().n_elements(), + ExcInternalError()); + const parallel::shared::Triangulation *tr = + (dynamic_cast*> (&dof_handler.get_tria ())); + Assert(tr != 0, ExcInternalError()); + const unsigned int n_cpu = Utilities::MPI::n_mpi_processes (tr->get_communicator ()); + const unsigned int this_process = + Utilities::MPI::this_mpi_process (tr->get_communicator ()); + std::vector gathered_new_numbers (dof_handler.n_dofs (), 0); + Assert(this_process == dof_handler.get_tria ().locally_owned_subdomain (), + ExcInternalError()) + + //gather new numbers among processors into one vector + { + std::vector new_numbers_copy (new_numbers); + // displs: + // Entry i specifies the displacement (relative to recvbuf ) + // at which to place the incoming data from process i + // rcounts: + // containing the number of elements that are to be received from each process + std::vector displs(n_cpu), + rcounts(n_cpu); + types::global_dof_index shift = 0; + //set rcounts based on new_numbers: + int cur_count = new_numbers_copy.size (); + MPI_Allgather (&cur_count, 1, MPI_INT, + &rcounts[0], 1, MPI_INT, + tr->get_communicator ()); + + for (unsigned int i = 0; i < n_cpu; i++) + { + displs[i] = shift; + shift += rcounts[i]; + } + Assert(((int)new_numbers_copy.size()) == rcounts[this_process], + ExcInternalError()); + MPI_Allgatherv (&new_numbers_copy[0], new_numbers_copy.size (), + DEAL_II_DOF_INDEX_MPI_TYPE, + &gathered_new_numbers[0], &rcounts[0], + &displs[0], + DEAL_II_DOF_INDEX_MPI_TYPE, + tr->get_communicator ()); + } + // put new numbers according to the current locally_owned_dofs_per_processor IndexSets + types::global_dof_index shift = 0; + // flag_1 and flag_2 are + // used to control that there is a + // one-to-one relation between old and new DoFs. + std::vector flag_1 (dof_handler.n_dofs (), 0), + flag_2 (dof_handler.n_dofs (), 0); + for (unsigned int i = 0; i < n_cpu; i++) + { + const IndexSet &iset = + number_cache.locally_owned_dofs_per_processor[i]; + for (types::global_dof_index ind = 0; + ind < iset.n_elements (); ind++) + { + const types::global_dof_index target = iset.nth_index_in_set (ind); + const types::global_dof_index value = gathered_new_numbers[shift + ind]; + Assert(target < dof_handler.n_dofs(), ExcInternalError()); + Assert(value < dof_handler.n_dofs(), ExcInternalError()); + global_gathered_numbers[target] = value; + flag_1[target]++; + flag_2[value]++; + } + shift += iset.n_elements (); + } + + Assert(*std::max_element(flag_1.begin(), flag_1.end()) == 1, + ExcInternalError()); + Assert(*std::min_element(flag_1.begin(), flag_1.end()) == 1, + ExcInternalError()); + Assert((*std::max_element(flag_2.begin(), flag_2.end())) == 1, + ExcInternalError()); + Assert((*std::min_element(flag_2.begin(), flag_2.end())) == 1, + ExcInternalError()); + } + Sequential::renumber_dofs (global_gathered_numbers, dof_handler, number_cache); + // correct number_cache: + number_cache.locally_owned_dofs_per_processor = + DoFTools::locally_owned_dofs_per_subdomain (dof_handler); + number_cache.locally_owned_dofs = + number_cache.locally_owned_dofs_per_processor[dof_handler.get_tria ().locally_owned_subdomain ()]; + // sequential renumbering returns a vector of size 1 here, + // correct this: + number_cache.n_locally_owned_dofs_per_processor.resize(number_cache.locally_owned_dofs_per_processor.size()); + for (unsigned int i = 0; + i < number_cache.n_locally_owned_dofs_per_processor.size (); i++) + number_cache.n_locally_owned_dofs_per_processor[i] = number_cache.locally_owned_dofs_per_processor[i].n_elements (); + + number_cache.n_locally_owned_dofs = + number_cache.n_locally_owned_dofs_per_processor[dof_handler.get_tria ().locally_owned_subdomain ()]; +#endif + } /* --------------------- class ParallelDistributed ---------------- */ @@ -1929,9 +2079,10 @@ namespace internal template - NumberCache + void ParallelDistributed:: - distribute_dofs (DoFHandler &dof_handler) const + distribute_dofs (DoFHandler &dof_handler, + NumberCache &number_cache_current) const { NumberCache number_cache; @@ -2145,7 +2296,7 @@ namespace internal #endif // DEBUG #endif // DEAL_II_WITH_P4EST - return number_cache; + number_cache_current = number_cache; } @@ -2396,10 +2547,11 @@ namespace internal template - NumberCache + void ParallelDistributed:: renumber_dofs (const std::vector &new_numbers, - dealii::DoFHandler &dof_handler) const + dealii::DoFHandler &dof_handler, + NumberCache &number_cache_current) const { (void)new_numbers; (void)dof_handler; @@ -2628,7 +2780,7 @@ namespace internal } #endif - return number_cache; + number_cache_current = number_cache; } } } diff --git a/source/dofs/dof_handler_policy.inst.in b/source/dofs/dof_handler_policy.inst.in index 09f4d7f91d..68db7aaa35 100644 --- a/source/dofs/dof_handler_policy.inst.in +++ b/source/dofs/dof_handler_policy.inst.in @@ -24,17 +24,20 @@ namespace internal \{ template class PolicyBase; template class Sequential; + template class ParallelShared; template class ParallelDistributed; #if deal_II_dimension==1 || deal_II_dimension==2 template class PolicyBase; template class Sequential; + template class ParallelShared; template class ParallelDistributed; #endif #if deal_II_dimension==3 template class PolicyBase<1,3>; template class Sequential<1,3>; + template class ParallelShared<1,3>; template class ParallelDistributed<1,3>; #endif \} diff --git a/source/dofs/dof_renumbering.cc b/source/dofs/dof_renumbering.cc index 3e81ab2e65..cdd4d2d4d2 100644 --- a/source/dofs/dof_renumbering.cc +++ b/source/dofs/dof_renumbering.cc @@ -747,11 +747,11 @@ namespace DoFRenumbering const unsigned int n_buckets = fe_collection.n_components(); std::vector shifts(n_buckets); - if (const parallel::distributed::Triangulation *tria - = (dynamic_cast*> + if (const parallel::Triangulation *tria + = (dynamic_cast*> (&start->get_dof_handler().get_tria()))) { -#ifdef DEAL_II_WITH_P4EST +#ifdef DEAL_II_WITH_MPI std::vector local_dof_count(n_buckets); for (unsigned int c=0; c shifts(n_buckets); - if (const parallel::distributed::Triangulation *tria - = (dynamic_cast*> + if (const parallel::Triangulation *tria + = (dynamic_cast*> (&start->get_dof_handler().get_tria()))) { -#ifdef DEAL_II_WITH_P4EST +#ifdef DEAL_II_WITH_MPI std::vector local_dof_count(n_buckets); for (unsigned int c=0; cactive_cell_index()] = cell->active_fe_index(); } + template + std::vector + locally_owned_dofs_per_subdomain (const DH &dof_handler) + { + //the following is a random process (flip of a coin), thus should be called once only. + std::vector< dealii::types::subdomain_id > subdomain_association (dof_handler.n_dofs ()); + dealii::DoFTools::get_subdomain_association (dof_handler, subdomain_association); + + const unsigned int n_subdomains = 1 + (*std::max_element (subdomain_association.begin (), + subdomain_association.end () )); + + std::vector index_sets (n_subdomains,dealii::IndexSet(dof_handler.n_dofs())); + + // loop over subdomain_association and populate IndexSet when a + // change in subdomain ID is found + dealii::types::global_dof_index i_min = 0; + dealii::types::global_dof_index this_subdomain = subdomain_association[0]; + + for (dealii::types::global_dof_index index = 1; + index < subdomain_association.size (); ++index) + { + //found index different from the current one + if (subdomain_association[index] != this_subdomain) + { + index_sets[this_subdomain].add_range (i_min, index); + i_min = index; + this_subdomain = subdomain_association[index]; + } + } + // the very last element is of different index + if (i_min == subdomain_association.size () - 1) + { + index_sets[this_subdomain].add_index (i_min); + } + + // otherwise there are at least two different indices + else + { + index_sets[this_subdomain].add_range ( + i_min, subdomain_association.size ()); + } + + for (unsigned int i = 0; i < n_subdomains; i++) + index_sets[i].compress (); + + return index_sets; + } template void @@ -1093,6 +1140,9 @@ namespace DoFTools ExcDimensionMismatch(subdomain_association.size(), dof_handler.n_dofs())); + Assert(dof_handler.n_dofs() > 0, + ExcMessage("Number of DoF is not positive. " + "This could happen when the function is called before NumberCache is written.")); // preset all values by an invalid value std::fill_n (subdomain_association.begin(), dof_handler.n_dofs(), numbers::invalid_subdomain_id); diff --git a/source/dofs/dof_tools.inst.in b/source/dofs/dof_tools.inst.in index 53824eba45..0ac9e83efb 100644 --- a/source/dofs/dof_tools.inst.in +++ b/source/dofs/dof_tools.inst.in @@ -269,7 +269,15 @@ void DoFTools::get_subdomain_association > (const hp::DoFHandler &dof_handler, std::vector &subdomain_association); - + +template +std::vector +DoFTools::locally_owned_dofs_per_subdomain > +(const DoFHandler &dof_handler); +template +std::vector +DoFTools::locally_owned_dofs_per_subdomain > +(const hp::DoFHandler &dof_handler); template unsigned int @@ -362,6 +370,15 @@ void DoFTools::get_subdomain_association > (const hp::DoFHandler &dof_handler, std::vector &subdomain_association); + +template +std::vector +DoFTools::locally_owned_dofs_per_subdomain > +(const DoFHandler &dof_handler); +template +std::vector +DoFTools::locally_owned_dofs_per_subdomain > +(const hp::DoFHandler &dof_handler); template void @@ -398,6 +415,15 @@ void DoFTools::get_subdomain_association > (const hp::DoFHandler<1,3> &dof_handler, std::vector &subdomain_association); + +template +std::vector +DoFTools::locally_owned_dofs_per_subdomain > +(const DoFHandler<1,3> &dof_handler); +template +std::vector +DoFTools::locally_owned_dofs_per_subdomain > +(const hp::DoFHandler<1,3> &dof_handler); template unsigned int diff --git a/source/hp/dof_handler.cc b/source/hp/dof_handler.cc index fde77d8414..62c53cb706 100644 --- a/source/hp/dof_handler.cc +++ b/source/hp/dof_handler.cc @@ -2743,15 +2743,26 @@ namespace hp number_cache.n_global_dofs = next_free_dof; number_cache.n_locally_owned_dofs = number_cache.n_global_dofs; - number_cache.locally_owned_dofs - = IndexSet (number_cache.n_global_dofs); - number_cache.locally_owned_dofs.add_range (0, - number_cache.n_global_dofs); - Assert (number_cache.n_global_dofs < std::numeric_limits::max (), - ExcMessage ("Global number of degrees of freedom is too large.")); - number_cache.n_locally_owned_dofs_per_processor - = std::vector (1, - (types::global_dof_index) number_cache.n_global_dofs); + if (dynamic_cast*> + (&this->get_tria()) + == 0) + { + number_cache.locally_owned_dofs + = IndexSet (number_cache.n_global_dofs); + number_cache.locally_owned_dofs.add_range (0, + number_cache.n_global_dofs); + Assert (number_cache.n_global_dofs < std::numeric_limits::max (), + ExcMessage ("Global number of degrees of freedom is too large.")); + number_cache.n_locally_owned_dofs_per_processor + = std::vector (1, + (types::global_dof_index) number_cache.n_global_dofs); + } + else + { + AssertThrow(false, ExcNotImplemented() ); + //number_cache.locally_owned_dofs = dealii::DoFTools::locally_owned_dofs_with_subdomain(this,tria->locally_owned_subdomain() ); + //TODO: update n_locally_owned_dofs_per_processor as well + } number_cache.locally_owned_dofs_per_processor = std::vector (1, diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 1d3804e6c7..d9459fdc2a 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -86,7 +86,7 @@ SET(_categories a-framework algorithms all-headers aniso arpack base bits build_tests codim_one deal.II distributed_grids fe gla grid hp integrators lac lapack manifold matrix_free metis mpi multigrid opencascade petsc serialization - slepc trilinos umfpack + slepc trilinos umfpack sharedtria ) IF(DEFINED DEAL_II_HAVE_TESTS_DIRECTORY) # Only set up mesh_converter tests if the testsuite is set up as a diff --git a/tests/sharedtria/CMakeLists.txt b/tests/sharedtria/CMakeLists.txt new file mode 100644 index 0000000000..0b20ef4e0b --- /dev/null +++ b/tests/sharedtria/CMakeLists.txt @@ -0,0 +1,5 @@ +CMAKE_MINIMUM_REQUIRED(VERSION 2.8.9) +INCLUDE(${DEAL_II_SOURCE_DIR}/tests/setup_testsubproject.cmake) +PROJECT(testsuite CXX) +INCLUDE(${DEAL_II_TARGET_CONFIG}) +DEAL_II_PICKUP_TESTS() diff --git a/tests/sharedtria/dof_01.cc b/tests/sharedtria/dof_01.cc new file mode 100644 index 0000000000..4fa58f201b --- /dev/null +++ b/tests/sharedtria/dof_01.cc @@ -0,0 +1,148 @@ +// --------------------------------------------------------------------- +// $Id: dof_handler_number_cache.cc 31761 2013-11-22 14:42:37Z heister $ +// +// Copyright (C) 2008 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// check number cache for shared_tria + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +template +void test() +{ + parallel::shared::Triangulation + triangulation (MPI_COMM_WORLD); + + FESystem fe (FE_Q(3),2, + FE_DGQ(1),1); + + DoFHandler dof_handler (triangulation); + + GridGenerator::hyper_cube(triangulation); + triangulation.refine_global (2); + + const unsigned int n_refinements[] = { 0, 4, 3, 2 }; + for (unsigned int i=0; i flags (triangulation.n_active_cells(), false); + for (unsigned int k=0; k::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + { + if (flags[index]) + cell->set_refine_flag(); + ++index; + } + + Assert (index <= triangulation.n_active_cells(), ExcInternalError()); + + // flag all other cells for coarsening + // (this should ensure that at least + // some of them will actually be + // coarsened) + index=0; + for (typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + { + if (!flags[index]) + cell->set_coarsen_flag(); + ++index; + } + + triangulation.execute_coarsening_and_refinement (); + dof_handler.distribute_dofs (fe); + + deallog + << "n_dofs: " << dof_handler.n_dofs() << std::endl + << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl; + + deallog << "n_locally_owned_dofs_per_processor: "; + std::vector v = dof_handler.n_locally_owned_dofs_per_processor(); + unsigned int sum = 0; + for (unsigned int i=0;i(); + deallog.pop(); + + deallog.push("3d"); + test<3>(); + deallog.pop(); +} diff --git a/tests/sharedtria/dof_01.with_metis=true.mpirun=3.output b/tests/sharedtria/dof_01.with_metis=true.mpirun=3.output new file mode 100644 index 0000000000..40e7378f11 --- /dev/null +++ b/tests/sharedtria/dof_01.with_metis=true.mpirun=3.output @@ -0,0 +1,50 @@ + +DEAL:0:2d::n_dofs: 818 +DEAL:0:2d::n_locally_owned_dofs: 289 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818 +DEAL:0:2d::n_dofs: 1754 +DEAL:0:2d::n_locally_owned_dofs: 578 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754 +DEAL:0:2d::n_dofs: 3056 +DEAL:0:2d::n_locally_owned_dofs: 1023 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056 +DEAL:0:3d::n_dofs: 13282 +DEAL:0:3d::n_locally_owned_dofs: 4446 +DEAL:0:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282 +DEAL:0:3d::n_dofs: 41826 +DEAL:0:3d::n_locally_owned_dofs: 13862 +DEAL:0:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826 + +DEAL:1:2d::n_dofs: 818 +DEAL:1:2d::n_locally_owned_dofs: 297 +DEAL:1:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818 +DEAL:1:2d::n_dofs: 1754 +DEAL:1:2d::n_locally_owned_dofs: 588 +DEAL:1:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754 +DEAL:1:2d::n_dofs: 3056 +DEAL:1:2d::n_locally_owned_dofs: 1013 +DEAL:1:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056 +DEAL:1:3d::n_dofs: 13282 +DEAL:1:3d::n_locally_owned_dofs: 4386 +DEAL:1:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282 +DEAL:1:3d::n_dofs: 41826 +DEAL:1:3d::n_locally_owned_dofs: 14131 +DEAL:1:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826 + + +DEAL:2:2d::n_dofs: 818 +DEAL:2:2d::n_locally_owned_dofs: 232 +DEAL:2:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818 +DEAL:2:2d::n_dofs: 1754 +DEAL:2:2d::n_locally_owned_dofs: 588 +DEAL:2:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754 +DEAL:2:2d::n_dofs: 3056 +DEAL:2:2d::n_locally_owned_dofs: 1020 +DEAL:2:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056 +DEAL:2:3d::n_dofs: 13282 +DEAL:2:3d::n_locally_owned_dofs: 4450 +DEAL:2:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282 +DEAL:2:3d::n_dofs: 41826 +DEAL:2:3d::n_locally_owned_dofs: 13833 +DEAL:2:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826 + diff --git a/tests/sharedtria/dof_01.with_metis=true.output b/tests/sharedtria/dof_01.with_metis=true.output new file mode 100644 index 0000000000..d0742843fd --- /dev/null +++ b/tests/sharedtria/dof_01.with_metis=true.output @@ -0,0 +1,16 @@ + +DEAL:0:2d::n_dofs: 818 +DEAL:0:2d::n_locally_owned_dofs: 818 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 818 sum: 818 +DEAL:0:2d::n_dofs: 1754 +DEAL:0:2d::n_locally_owned_dofs: 1754 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 1754 sum: 1754 +DEAL:0:2d::n_dofs: 3056 +DEAL:0:2d::n_locally_owned_dofs: 3056 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 3056 sum: 3056 +DEAL:0:3d::n_dofs: 13282 +DEAL:0:3d::n_locally_owned_dofs: 13282 +DEAL:0:3d::n_locally_owned_dofs_per_processor: 13282 sum: 13282 +DEAL:0:3d::n_dofs: 41826 +DEAL:0:3d::n_locally_owned_dofs: 41826 +DEAL:0:3d::n_locally_owned_dofs_per_processor: 41826 sum: 41826 diff --git a/tests/sharedtria/dof_02.cc b/tests/sharedtria/dof_02.cc new file mode 100644 index 0000000000..7b2eb7e7a7 --- /dev/null +++ b/tests/sharedtria/dof_02.cc @@ -0,0 +1,149 @@ +// --------------------------------------------------------------------- +// $Id: dof_handler_number_cache.cc 31761 2013-11-22 14:42:37Z heister $ +// +// Copyright (C) 2008 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// check number cache for shared_tria with renumbering + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + + +template +void test() +{ + parallel::shared::Triangulation + triangulation (MPI_COMM_WORLD); + + FESystem fe (FE_Q(3),2, + FE_DGQ(1),1); + + DoFHandler dof_handler (triangulation); + + GridGenerator::hyper_cube(triangulation); + triangulation.refine_global (2); + + const unsigned int n_refinements[] = { 0, 4, 3, 2 }; + for (unsigned int i=0; i flags (triangulation.n_active_cells(), false); + for (unsigned int k=0; k::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + { + if (flags[index]) + cell->set_refine_flag(); + ++index; + } + + Assert (index <= triangulation.n_active_cells(), ExcInternalError()); + + // flag all other cells for coarsening + // (this should ensure that at least + // some of them will actually be + // coarsened) + index=0; + for (typename Triangulation::active_cell_iterator + cell = triangulation.begin_active(); + cell != triangulation.end(); ++cell) + { + if (!flags[index]) + cell->set_coarsen_flag(); + ++index; + } + + triangulation.execute_coarsening_and_refinement (); + dof_handler.distribute_dofs (fe); + DoFRenumbering::component_wise(dof_handler); + + deallog + << "n_dofs: " << dof_handler.n_dofs() << std::endl + << "n_locally_owned_dofs: " << dof_handler.n_locally_owned_dofs() << std::endl; + + deallog << "n_locally_owned_dofs_per_processor: "; + std::vector v = dof_handler.n_locally_owned_dofs_per_processor(); + unsigned int sum = 0; + for (unsigned int i=0;i(); + deallog.pop(); + + deallog.push("3d"); + test<3>(); + deallog.pop(); +} diff --git a/tests/sharedtria/dof_02.with_metis=true.mpirun=3.output b/tests/sharedtria/dof_02.with_metis=true.mpirun=3.output new file mode 100644 index 0000000000..40e7378f11 --- /dev/null +++ b/tests/sharedtria/dof_02.with_metis=true.mpirun=3.output @@ -0,0 +1,50 @@ + +DEAL:0:2d::n_dofs: 818 +DEAL:0:2d::n_locally_owned_dofs: 289 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818 +DEAL:0:2d::n_dofs: 1754 +DEAL:0:2d::n_locally_owned_dofs: 578 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754 +DEAL:0:2d::n_dofs: 3056 +DEAL:0:2d::n_locally_owned_dofs: 1023 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056 +DEAL:0:3d::n_dofs: 13282 +DEAL:0:3d::n_locally_owned_dofs: 4446 +DEAL:0:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282 +DEAL:0:3d::n_dofs: 41826 +DEAL:0:3d::n_locally_owned_dofs: 13862 +DEAL:0:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826 + +DEAL:1:2d::n_dofs: 818 +DEAL:1:2d::n_locally_owned_dofs: 297 +DEAL:1:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818 +DEAL:1:2d::n_dofs: 1754 +DEAL:1:2d::n_locally_owned_dofs: 588 +DEAL:1:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754 +DEAL:1:2d::n_dofs: 3056 +DEAL:1:2d::n_locally_owned_dofs: 1013 +DEAL:1:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056 +DEAL:1:3d::n_dofs: 13282 +DEAL:1:3d::n_locally_owned_dofs: 4386 +DEAL:1:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282 +DEAL:1:3d::n_dofs: 41826 +DEAL:1:3d::n_locally_owned_dofs: 14131 +DEAL:1:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826 + + +DEAL:2:2d::n_dofs: 818 +DEAL:2:2d::n_locally_owned_dofs: 232 +DEAL:2:2d::n_locally_owned_dofs_per_processor: 289 297 232 sum: 818 +DEAL:2:2d::n_dofs: 1754 +DEAL:2:2d::n_locally_owned_dofs: 588 +DEAL:2:2d::n_locally_owned_dofs_per_processor: 578 588 588 sum: 1754 +DEAL:2:2d::n_dofs: 3056 +DEAL:2:2d::n_locally_owned_dofs: 1020 +DEAL:2:2d::n_locally_owned_dofs_per_processor: 1023 1013 1020 sum: 3056 +DEAL:2:3d::n_dofs: 13282 +DEAL:2:3d::n_locally_owned_dofs: 4450 +DEAL:2:3d::n_locally_owned_dofs_per_processor: 4446 4386 4450 sum: 13282 +DEAL:2:3d::n_dofs: 41826 +DEAL:2:3d::n_locally_owned_dofs: 13833 +DEAL:2:3d::n_locally_owned_dofs_per_processor: 13862 14131 13833 sum: 41826 + diff --git a/tests/sharedtria/dof_02.with_metis=true.output b/tests/sharedtria/dof_02.with_metis=true.output new file mode 100644 index 0000000000..d0742843fd --- /dev/null +++ b/tests/sharedtria/dof_02.with_metis=true.output @@ -0,0 +1,16 @@ + +DEAL:0:2d::n_dofs: 818 +DEAL:0:2d::n_locally_owned_dofs: 818 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 818 sum: 818 +DEAL:0:2d::n_dofs: 1754 +DEAL:0:2d::n_locally_owned_dofs: 1754 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 1754 sum: 1754 +DEAL:0:2d::n_dofs: 3056 +DEAL:0:2d::n_locally_owned_dofs: 3056 +DEAL:0:2d::n_locally_owned_dofs_per_processor: 3056 sum: 3056 +DEAL:0:3d::n_dofs: 13282 +DEAL:0:3d::n_locally_owned_dofs: 13282 +DEAL:0:3d::n_locally_owned_dofs_per_processor: 13282 sum: 13282 +DEAL:0:3d::n_dofs: 41826 +DEAL:0:3d::n_locally_owned_dofs: 41826 +DEAL:0:3d::n_locally_owned_dofs_per_processor: 41826 sum: 41826 diff --git a/tests/sharedtria/tria_01.cc b/tests/sharedtria/tria_01.cc new file mode 100644 index 0000000000..b85a32da73 --- /dev/null +++ b/tests/sharedtria/tria_01.cc @@ -0,0 +1,107 @@ +// --------------------------------------------------------------------- +// $Id: 3d_refinement_01.cc 31349 2013-10-20 19:07:06Z maier $ +// +// Copyright (C) 2008 - 2013 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// create a shared tria mesh and refine it + +#include "../tests.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +template +void write_mesh (const parallel::shared::Triangulation &tria, + const char *filename_) +{ + DataOut data_out; + data_out.attach_triangulation (tria); + Vector subdomain (tria.n_active_cells()); + for (unsigned int i=0; i +void test() +{ + parallel::shared::Triangulation tr(MPI_COMM_WORLD); + + + GridGenerator::hyper_cube(tr); + tr.begin_active()->set_refine_flag(); + tr.execute_coarsening_and_refinement (); + tr.begin_active()->set_refine_flag(); + tr.execute_coarsening_and_refinement (); + + deallog + << " locally_owned_subdomain(): " << tr.locally_owned_subdomain() << "\n" + << " n_active_cells: " << tr.n_active_cells() << "\n" + << " n_levels: " << tr.n_levels() << "\n" + << " n_global_levels: " << tr.n_global_levels() << "\n" + //<< " n_locally_owned_active_cells: " << tr.n_locally_owned_active_cells() << "\n" + //<< " n_global_active_cells: " << tr.n_global_active_cells() << "\n" + << std::endl; + + /*deallog << "n_locally_owned_active_cells_per_processor: "; + std::vector v = tr.n_locally_owned_active_cells_per_processor(); + for (unsigned int i=0;i::active_cell_iterator it=tr.begin_active(); + for (; it!=tr.end(); ++it) + { + deallog << it->subdomain_id() << " "; + } + deallog << std::endl; + + //write_mesh(tr, "mesh"); +} + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + deallog.push("2d"); + test<2>(); + deallog.pop(); + deallog.push("3d"); + test<3>(); + deallog.pop(); +} diff --git a/tests/sharedtria/tria_01.with_metis=true.mpirun=3.output b/tests/sharedtria/tria_01.with_metis=true.mpirun=3.output new file mode 100644 index 0000000000..1f3007679d --- /dev/null +++ b/tests/sharedtria/tria_01.with_metis=true.mpirun=3.output @@ -0,0 +1,41 @@ + +DEAL:0:2d:: locally_owned_subdomain(): 0 + n_active_cells: 7 + n_levels: 3 + n_global_levels: 3 + +DEAL:0:2d::subdomains: 0 1 0 1 2 1 2 +DEAL:0:3d:: locally_owned_subdomain(): 0 + n_active_cells: 15 + n_levels: 3 + n_global_levels: 3 + +DEAL:0:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1 + +DEAL:1:2d:: locally_owned_subdomain(): 1 + n_active_cells: 7 + n_levels: 3 + n_global_levels: 3 + +DEAL:1:2d::subdomains: 0 1 0 1 2 1 2 +DEAL:1:3d:: locally_owned_subdomain(): 1 + n_active_cells: 15 + n_levels: 3 + n_global_levels: 3 + +DEAL:1:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1 + + +DEAL:2:2d:: locally_owned_subdomain(): 2 + n_active_cells: 7 + n_levels: 3 + n_global_levels: 3 + +DEAL:2:2d::subdomains: 0 1 0 1 2 1 2 +DEAL:2:3d:: locally_owned_subdomain(): 2 + n_active_cells: 15 + n_levels: 3 + n_global_levels: 3 + +DEAL:2:3d::subdomains: 0 2 0 1 0 0 0 2 2 2 2 1 1 1 1 + diff --git a/tests/sharedtria/tria_01.with_metis=true.output b/tests/sharedtria/tria_01.with_metis=true.output new file mode 100644 index 0000000000..d1b965d9b2 --- /dev/null +++ b/tests/sharedtria/tria_01.with_metis=true.output @@ -0,0 +1,13 @@ + +DEAL:0:2d:: locally_owned_subdomain(): 0 + n_active_cells: 7 + n_levels: 3 + n_global_levels: 3 + +DEAL:0:2d::subdomains: 0 0 0 0 0 0 0 +DEAL:0:3d:: locally_owned_subdomain(): 0 + n_active_cells: 15 + n_levels: 3 + n_global_levels: 3 + +DEAL:0:3d::subdomains: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 -- 2.39.5