From 3e4c89222c45394077effac1227c0b8fbb6497d1 Mon Sep 17 00:00:00 2001 From: Martin Kronbichler Date: Mon, 3 Jun 2019 11:44:42 +0200 Subject: [PATCH] Backwards compatibility for locally_owned_dofs_per_processor --- include/deal.II/dofs/dof_handler.h | 153 ++++++++++++++++++++++--- include/deal.II/hp/dof_handler.h | 176 +++++++++++++++++++++++++---- 2 files changed, 290 insertions(+), 39 deletions(-) diff --git a/include/deal.II/dofs/dof_handler.h b/include/deal.II/dofs/dof_handler.h index 980cf95f3a..3b2502ef59 100644 --- a/include/deal.II/dofs/dof_handler.h +++ b/include/deal.II/dofs/dof_handler.h @@ -1035,9 +1035,14 @@ public: locally_owned_mg_dofs(const unsigned int level) const; /** - * Return a vector that stores the locally owned DoFs of each processor. If - * you are only interested in the number of elements each processor owns - * then n_locally_owned_dofs_per_processor() is a better choice. + * Compute a vector with the locally owned DoFs of each processor. + * + * This function involves global communication via the @p MPI_Allgather + * function, so it must be called on all processors participating in the MPI + * communicator underlying the triangulation. + * + * If you are only interested in the number of elements each processor owns + * then compute_n_locally_owned_dofs_per_processor() is a better choice. * * If this is a sequential DoFHandler, then the vector has a single element * that equals the IndexSet representing the entire range [0,n_dofs()]. (Here, @@ -1047,17 +1052,21 @@ public: * only on one MPI process.) */ std::vector - locally_owned_dofs_per_processor() const; + compute_locally_owned_dofs_per_processor() const; /** - * Return a vector that stores the number of degrees of freedom each + * Compute a vector with the number of degrees of freedom each * processor that participates in this triangulation owns locally. The sum * of all these numbers equals the number of degrees of freedom that exist * globally, i.e. what n_dofs() returns. * + * This function involves global communication via the @p MPI_Allgather + * function, so it must be called on all processors participating in the MPI + * communicator underlying the triangulation. + * * Each element of the vector returned by this function equals the number of * elements of the corresponding sets returned by - * locally_owned_dofs_per_processor(). + * compute_locally_owned_dofs_per_processor(). * * If this is a sequential DoFHandler, then the vector has a single element * equal to n_dofs(). (Here, "sequential" means that either the whole program @@ -1066,11 +1075,15 @@ public: * this DoFHandler builds works only on one MPI process.) */ std::vector - n_locally_owned_dofs_per_processor() const; + compute_n_locally_owned_dofs_per_processor() const; /** - * Return a vector that stores the locally owned DoFs of each processor on - * the given level @p level. + * Compute a vector with the locally owned DoFs of each processor on + * the given level @p level for geometric multigrid. + * + * This function involves global communication via the @p MPI_Allgather + * function, so it must be called on all processors participating in the MPI + * communicator underlying the triangulation. * * If this is a sequential DoFHandler, then the vector has a single element * that equals the IndexSet representing the entire range [0,n_dofs()]. (Here, @@ -1080,7 +1093,56 @@ public: * only on one MPI process.) */ std::vector - locally_owned_mg_dofs_per_processor(const unsigned int level) const; + compute_locally_owned_mg_dofs_per_processor(const unsigned int level) const; + + /** + * Return a vector that stores the locally owned DoFs of each processor. + * + * @deprecated As of deal.II version 9.2, we do not populate a vector with + * the index sets of all processors by default any more due to a possibly + * large memory footprint on many processors. As a consequence, this + * function needs to call compute_locally_owned_dofs_per_processor() upon + * the first invocation, including global communication. Use + * compute_locally_owned_dofs_per_processor() instead if using up to a few + * thousands of MPI ranks or some variant involving local communication with + * more processors. + */ + DEAL_II_DEPRECATED const std::vector & + locally_owned_dofs_per_processor() const; + + /** + * Return a vector that stores the number of degrees of freedom each + * processor that participates in this triangulation owns locally. The sum + * of all these numbers equals the number of degrees of freedom that exist + * globally, i.e. what n_dofs() returns. + * + * @deprecated As of deal.II version 9.2, we do not populate a vector with + * the numbers of dofs of all processors by default any more due to a + * possibly large memory footprint on many processors. As a consequence, + * this function needs to call compute_n_locally_owned_dofs_per_processor() + * upon the first invocation, including global communication. Use + * compute_n_locally_owned_dofs_per_processor() instead if using up to a few + * thousands of MPI ranks or some variant involving local communication with + * more processors. + */ + DEAL_II_DEPRECATED const std::vector & + n_locally_owned_dofs_per_processor() const; + + /** + * Return a vector that stores the locally owned DoFs of each processor on + * the given level @p level. + * + * @deprecated As of deal.II version 9.2, we do not populate a vector with + * the index sets of all processors by default any more due to a possibly + * large memory footprint on many processors. As a consequence, this + * function needs to call compute_locally_owned_dofs_mg_per_processor() upon + * the first invocation, including global communication. Use + * compute_locally_owned_mg_dofs_per_processor() instead if using up to a few + * thousands of MPI ranks or some variant involving local communication with + * more processors. + */ + DEAL_II_DEPRECATED const std::vector & + locally_owned_mg_dofs_per_processor(const unsigned int level) const; /** * Return a constant reference to the selected finite element object. @@ -1450,7 +1512,8 @@ const IndexSet & DoFHandler::locally_owned_mg_dofs(const unsigned int level) const { Assert(level < this->get_triangulation().n_global_levels(), - ExcMessage("invalid level in locally_owned_mg_dofs")); + ExcMessage("The given level index exceeds the number of levels " + "present in the triangulation")); Assert( mg_number_cache.size() == this->get_triangulation().n_global_levels(), ExcMessage( @@ -1461,8 +1524,67 @@ DoFHandler::locally_owned_mg_dofs(const unsigned int level) const template -std::vector +const std::vector & DoFHandler::n_locally_owned_dofs_per_processor() const +{ + if (number_cache.n_locally_owned_dofs_per_processor.empty() && + number_cache.n_global_dofs > 0) + { + const_cast( + number_cache) + .n_locally_owned_dofs_per_processor = + compute_n_locally_owned_dofs_per_processor(); + } + return number_cache.n_locally_owned_dofs_per_processor; +} + + + +template +const std::vector & +DoFHandler::locally_owned_dofs_per_processor() const +{ + if (number_cache.locally_owned_dofs_per_processor.empty() && + number_cache.n_global_dofs > 0) + { + const_cast( + number_cache) + .locally_owned_dofs_per_processor = + compute_locally_owned_dofs_per_processor(); + } + return number_cache.locally_owned_dofs_per_processor; +} + + + +template +const std::vector & +DoFHandler::locally_owned_mg_dofs_per_processor( + const unsigned int level) const +{ + Assert(level < this->get_triangulation().n_global_levels(), + ExcMessage("The given level index exceeds the number of levels " + "present in the triangulation")); + Assert( + mg_number_cache.size() == this->get_triangulation().n_global_levels(), + ExcMessage( + "The level dofs are not set up properly! Did you call distribute_mg_dofs()?")); + if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() && + mg_number_cache[level].n_global_dofs > 0) + { + const_cast( + mg_number_cache[level]) + .locally_owned_dofs_per_processor = + compute_locally_owned_mg_dofs_per_processor(level); + } + return mg_number_cache[level].locally_owned_dofs_per_processor; +} + + + +template +std::vector +DoFHandler::compute_n_locally_owned_dofs_per_processor() const { const parallel::Triangulation *tr = (dynamic_cast *>( @@ -1478,7 +1600,7 @@ DoFHandler::n_locally_owned_dofs_per_processor() const template std::vector -DoFHandler::locally_owned_dofs_per_processor() const +DoFHandler::compute_locally_owned_dofs_per_processor() const { const parallel::Triangulation *tr = (dynamic_cast *>( @@ -1494,11 +1616,12 @@ DoFHandler::locally_owned_dofs_per_processor() const template std::vector -DoFHandler::locally_owned_mg_dofs_per_processor( +DoFHandler::compute_locally_owned_mg_dofs_per_processor( const unsigned int level) const { Assert(level < this->get_triangulation().n_global_levels(), - ExcMessage("invalid level in locally_owned_mg_dofs_per_processor")); + ExcMessage("The given level index exceeds the number of levels " + "present in the triangulation")); Assert( mg_number_cache.size() == this->get_triangulation().n_global_levels(), ExcMessage( diff --git a/include/deal.II/hp/dof_handler.h b/include/deal.II/hp/dof_handler.h index 622cacb9e9..8e95299211 100644 --- a/include/deal.II/hp/dof_handler.h +++ b/include/deal.II/hp/dof_handler.h @@ -826,30 +826,38 @@ namespace hp locally_owned_dofs() const; /** - * Return a vector that stores the locally owned DoFs of each processor. - * If you are only interested in the number of elements each processor - * owns then n_dofs_per_processor() is a better choice. + * Compute a vector with the locally owned DoFs of each processor. + * + * This function involves global communication via the @p MPI_Allgather + * function, so it must be called on all processors participating in the MPI + * communicator underlying the triangulation. + * + * If you are only interested in the number of elements each processor owns + * then compute_n_locally_owned_dofs_per_processor() is a better choice. * * If this is a sequential DoFHandler, then the vector has a single element * that equals the IndexSet representing the entire range [0,n_dofs()]. - * (Here, "sequential" means that either - * the whole program does not use MPI, or that it uses MPI - * but only uses a single MPI process, or that there are multiple MPI - * processes but the Triangulation on which this DoFHandler builds - * works only on one MPI process.) + * (Here, "sequential" means that either the whole program does not use MPI, + * or that it uses MPI but only uses a single MPI process, or that there are + * multiple MPI processes but the Triangulation on which this DoFHandler + * builds works only on one MPI process.) */ std::vector - locally_owned_dofs_per_processor() const; + compute_locally_owned_dofs_per_processor() const; /** - * Return a vector that stores the number of degrees of freedom each + * Compute a vector with the number of degrees of freedom each * processor that participates in this triangulation owns locally. The sum * of all these numbers equals the number of degrees of freedom that exist * globally, i.e. what n_dofs() returns. * - * Each element of the vector returned by this function equals the number - * of elements of the corresponding sets returned by - * locally_owned_dofs_per_processor(). + * This function involves global communication via the @p MPI_Allgather + * function, so it must be called on all processors participating in the MPI + * communicator underlying the triangulation. + * + * Each element of the vector returned by this function equals the number of + * elements of the corresponding sets returned by + * compute_locally_owned_dofs_per_processor(). * * If this is a sequential DoFHandler, then the vector has a single element * equal to n_dofs(). (Here, "sequential" means that either the whole @@ -858,7 +866,40 @@ namespace hp * on which this DoFHandler builds works only on one MPI process.) */ std::vector - n_locally_owned_dofs_per_processor() const; + compute_n_locally_owned_dofs_per_processor() const; + + /** + * Return a vector that stores the locally owned DoFs of each processor. + * + * @deprecated As of deal.II version 9.2, we do not populate a vector with + * the index sets of all processors by default any more due to a possibly + * large memory footprint on many processors. As a consequence, this + * function needs to call compute_locally_owned_dofs_per_processor() upon + * the first invocation, including global communication. Use + * compute_locally_owned_dofs_per_processor() instead if using up to a few + * thousands of MPI ranks or some variant involving local communication with + * more processors. + */ + DEAL_II_DEPRECATED const std::vector & + locally_owned_dofs_per_processor() const; + + /** + * Return a vector that stores the number of degrees of freedom each + * processor that participates in this triangulation owns locally. The sum + * of all these numbers equals the number of degrees of freedom that exist + * globally, i.e. what n_dofs() returns. + * + * @deprecated As of deal.II version 9.2, we do not populate a vector with + * the numbers of dofs of all processors by default any more due to a + * possibly large memory footprint on many processors. As a consequence, + * this function needs to call compute_n_locally_owned_dofs_per_processor() + * upon the first invocation, including global communication. Use + * compute_n_locally_owned_dofs_per_processor() instead if using up to a few + * thousands of MPI ranks or some variant involving local communication with + * more processors. + */ + DEAL_II_DEPRECATED const std::vector & + n_locally_owned_dofs_per_processor() const; /** * Return an IndexSet describing the set of locally owned DoFs used for @@ -870,13 +911,38 @@ namespace hp locally_owned_mg_dofs(const unsigned int level) const; /** - * Return a vector that stores the locally owned level DoFs of each - * processor on the given level @p level. Since hp::DoFHandler does not - * support multilevel methods yet, this function throws an exception - * ExcNotImplemented() independent of its argument. + * Compute a vector with the locally owned DoFs of each processor on + * the given level @p level for geometric multigrid. + * + * This function involves global communication via the @p MPI_Allgather + * function, so it must be called on all processors participating in the MPI + * communicator underlying the triangulation. + * + * If this is a sequential DoFHandler, then the vector has a single element + * that equals the IndexSet representing the entire range [0,n_dofs()]. + * (Here, "sequential" means that either the whole program does not use MPI, + * or that it uses MPI but only uses a single MPI process, or that there are + * multiple MPI processes but the Triangulation on which this DoFHandler + * builds works only on one MPI process.) */ std::vector - locally_owned_mg_dofs_per_processor(const unsigned int level) const; + compute_locally_owned_mg_dofs_per_processor(const unsigned int level) const; + + /** + * Return a vector that stores the locally owned DoFs of each processor on + * the given level @p level. + * + * @deprecated As of deal.II version 9.2, we do not populate a vector with + * the index sets of all processors by default any more due to a possibly + * large memory footprint on many processors. As a consequence, this + * function needs to call compute_locally_owned_dofs_mg_per_processor() upon + * the first invocation, including global communication. Use + * compute_locally_owned_mg_dofs_per_processor() instead if using up to a + * few thousands of MPI ranks or some variant involving local communication + * with more processors. + */ + DEAL_II_DEPRECATED const std::vector & + locally_owned_mg_dofs_per_processor(const unsigned int level) const; /** * Return a constant reference to the set of finite element objects that @@ -1487,8 +1553,42 @@ namespace hp template - std::vector + const std::vector & DoFHandler::n_locally_owned_dofs_per_processor() const + { + if (number_cache.n_locally_owned_dofs_per_processor.empty() && + number_cache.n_global_dofs > 0) + { + const_cast( + number_cache) + .n_locally_owned_dofs_per_processor = + compute_n_locally_owned_dofs_per_processor(); + } + return number_cache.n_locally_owned_dofs_per_processor; + } + + + + template + const std::vector & + DoFHandler::locally_owned_dofs_per_processor() const + { + if (number_cache.locally_owned_dofs_per_processor.empty() && + number_cache.n_global_dofs > 0) + { + const_cast( + number_cache) + .locally_owned_dofs_per_processor = + compute_locally_owned_dofs_per_processor(); + } + return number_cache.locally_owned_dofs_per_processor; + } + + + + template + std::vector + DoFHandler::compute_n_locally_owned_dofs_per_processor() const { const parallel::Triangulation *tr = (dynamic_cast *>( @@ -1504,7 +1604,7 @@ namespace hp template std::vector - DoFHandler::locally_owned_dofs_per_processor() const + DoFHandler::compute_locally_owned_dofs_per_processor() const { const parallel::Triangulation *tr = (dynamic_cast *>( @@ -1526,20 +1626,48 @@ namespace hp Assert(false, ExcNotImplemented()); (void)level; Assert(level < this->get_triangulation().n_global_levels(), - ExcMessage("invalid level in locally_owned_mg_dofs")); + ExcMessage("The given level index exceeds the number of levels " + "present in the triangulation")); return mg_number_cache[0].locally_owned_dofs; } + template - std::vector + const std::vector & DoFHandler::locally_owned_mg_dofs_per_processor( const unsigned int level) const + { + Assert(level < this->get_triangulation().n_global_levels(), + ExcMessage("The given level index exceeds the number of levels " + "present in the triangulation")); + Assert( + mg_number_cache.size() == this->get_triangulation().n_global_levels(), + ExcMessage( + "The level dofs are not set up properly! Did you call distribute_mg_dofs()?")); + if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() && + mg_number_cache[level].n_global_dofs > 0) + { + const_cast( + mg_number_cache[level]) + .locally_owned_dofs_per_processor = + compute_locally_owned_mg_dofs_per_processor(level); + } + return mg_number_cache[level].locally_owned_dofs_per_processor; + } + + + + template + std::vector + DoFHandler::compute_locally_owned_mg_dofs_per_processor( + const unsigned int level) const { Assert(false, ExcNotImplemented()); (void)level; Assert(level < this->get_triangulation().n_global_levels(), - ExcMessage("invalid level in locally_owned_mg_dofs_per_processor")); + ExcMessage("The given level index exceeds the number of levels " + "present in the triangulation")); const parallel::Triangulation *tr = (dynamic_cast *>( &this->get_triangulation())); -- 2.39.5