From: denis.davydov Date: Fri, 2 May 2014 08:33:59 +0000 (+0000) Subject: added MeshSmoothing to constructor; Verbatim copy of NumberCache related functions... X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=0e0f5e82a57348cd8c3468b51d3c4c7cd9fb0b97;p=dealii-svn.git added MeshSmoothing to constructor; Verbatim copy of NumberCache related functions from distributed triangulation git-svn-id: https://svn.dealii.org/branches/branch_sharedtria@32871 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/include/deal.II/distributed/shared_tria.h b/deal.II/include/deal.II/distributed/shared_tria.h index 2ae1949971..f19b8e71c6 100644 --- a/deal.II/include/deal.II/distributed/shared_tria.h +++ b/deal.II/include/deal.II/distributed/shared_tria.h @@ -41,6 +41,7 @@ DEAL_II_NAMESPACE_OPEN template class Triangulation; + namespace parallel { namespace shared @@ -60,7 +61,9 @@ namespace parallel /** * Constructor. */ - Triangulation (MPI_Comm mpi_communicator); + Triangulation (MPI_Comm mpi_communicator, + const typename dealii::Triangulation::MeshSmoothing = + typename Triangulation::MeshSmoothing(Triangulation::none)); /** * Destructor. @@ -98,11 +101,59 @@ namespace parallel const std::vector< CellData< dim > > &cells, const SubCellData &subcelldata); + /** + * Return the number of active cells owned by each of the MPI + * processes that contribute to this triangulation. The element + * of this vector indexed by locally_owned_subdomain() equals + * the result of n_locally_owned_active_cells(). + */ + const std::vector & + n_locally_owned_active_cells_per_processor () const; + + /** + * Return the number of active cells in the triangulation that + * are locally owned, i.e. that have a subdomain_id equal to + * locally_owned_subdomain(). + * + */ + unsigned int n_locally_owned_active_cells () const; + + /** + * Return the sum over all processors of the number of active + * cells owned by each processor. This equals the overall number + * of active cells in the shared triangulation. + */ + types::global_dof_index n_global_active_cells () const; + + /** + * Returns the global maximum level. This may be bigger than n_levels. + */ + virtual unsigned int n_global_levels () const; + private: MPI_Comm mpi_communicator; types::subdomain_id my_subdomain; types::subdomain_id num_subdomains; + + struct NumberCache + { + std::vector n_locally_owned_active_cells; + types::global_dof_index n_global_active_cells; + unsigned int n_global_levels; + + NumberCache(); + }; + + NumberCache number_cache; + + /** + * Update the number_cache variable after mesh creation or + * refinement. + */ + void update_number_cache (); + + }; } } diff --git a/deal.II/source/distributed/shared_tria.cc b/deal.II/source/distributed/shared_tria.cc index e01636453e..f28b69291a 100644 --- a/deal.II/source/distributed/shared_tria.cc +++ b/deal.II/source/distributed/shared_tria.cc @@ -40,15 +40,24 @@ namespace parallel { namespace shared { + + template + Triangulation::NumberCache::NumberCache() + : + n_global_active_cells(0), + n_global_levels(0) + {} + template - Triangulation::Triangulation (MPI_Comm mpi_communicator): + Triangulation::Triangulation (MPI_Comm mpi_communicator, + const typename dealii::Triangulation::MeshSmoothing): dealii::Triangulation(), mpi_communicator (Utilities::MPI:: duplicate_communicator(mpi_communicator)), my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)), num_subdomains(Utilities::MPI::n_mpi_processes(mpi_communicator)) { - + number_cache.n_locally_owned_active_cells.resize (num_subdomains); } @@ -58,6 +67,72 @@ namespace parallel } + template + unsigned int + Triangulation::n_locally_owned_active_cells () const + { + return number_cache.n_locally_owned_active_cells[my_subdomain]; + } + + template + unsigned int + Triangulation::n_global_levels () const + { + return number_cache.n_global_levels; + } + + template + types::global_dof_index + Triangulation::n_global_active_cells () const + { + return number_cache.n_global_active_cells; + } + + template + const std::vector & + Triangulation::n_locally_owned_active_cells_per_processor () const + { + return number_cache.n_locally_owned_active_cells; + } + + template + void + Triangulation::update_number_cache () + { + Assert (number_cache.n_locally_owned_active_cells.size() + == + Utilities::MPI::n_mpi_processes (mpi_communicator), + ExcInternalError()); + + std::fill (number_cache.n_locally_owned_active_cells.begin(), + number_cache.n_locally_owned_active_cells.end(), + 0); + + if (this->n_levels() > 0) + for (typename Triangulation::active_cell_iterator + cell = this->begin_active(); + cell != this->end(); ++cell) + if (cell->subdomain_id() == my_subdomain) + ++number_cache.n_locally_owned_active_cells[my_subdomain]; + + unsigned int send_value + = number_cache.n_locally_owned_active_cells[my_subdomain]; + MPI_Allgather (&send_value, + 1, + MPI_UNSIGNED, + &number_cache.n_locally_owned_active_cells[0], + 1, + MPI_UNSIGNED, + mpi_communicator); + + number_cache.n_global_active_cells + = std::accumulate (number_cache.n_locally_owned_active_cells.begin(), + number_cache.n_locally_owned_active_cells.end(), + /* ensure sum is computed with correct data type:*/ + static_cast(0)); + number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), mpi_communicator); + } + template