--- /dev/null
+Changed: The parallel::TriangulationBase class does not store the number of active cells
+of all MPI processes any more. Instead, the information is computed on-demand when calling
+the function parallel::TriangulationBase::compute_n_locally_owned_active_cells_per_processor.
+<br>
+(Peter Munch, 2019/09/20)
// ---------------------------------------------------------------------
//
-// Copyright (C) 2008 - 2018 by the deal.II authors
+// Copyright (C) 2008 - 2019 by the deal.II authors
//
// This file is part of the deal.II library.
//
* that contribute to this triangulation. The element of this vector
* indexed by locally_owned_subdomain() equals the result of
* n_locally_owned_active_cells().
+ *
+ * @note This function involves global communication!
*/
- const std::vector<unsigned int> &
- n_locally_owned_active_cells_per_processor() const;
-
+ std::vector<unsigned int>
+ compute_n_locally_owned_active_cells_per_processor() const;
/**
* Return the number of active cells in the triangulation that are locally
struct NumberCache
{
/**
- * This vector stores the number of locally owned active cells per MPI
- * rank.
+ * Number of locally owned active cells of this MPI rank.
*/
- std::vector<unsigned int> n_locally_owned_active_cells;
+ unsigned int n_locally_owned_active_cells;
/**
* The total number of active cells (sum of @p
* n_locally_owned_active_cells).
ExcMessage("You compiled deal.II without MPI support, for "
"which parallel::TriangulationBase is not available."));
#endif
- number_cache.n_locally_owned_active_cells.resize(n_subdomains);
}
this->dealii::Triangulation<dim, spacedim>::memory_consumption() +
MemoryConsumption::memory_consumption(mpi_communicator) +
MemoryConsumption::memory_consumption(my_subdomain) +
- MemoryConsumption::memory_consumption(
- number_cache.n_locally_owned_active_cells) +
MemoryConsumption::memory_consumption(
number_cache.n_global_active_cells) +
MemoryConsumption::memory_consumption(number_cache.n_global_levels);
template <int dim, int spacedim>
TriangulationBase<dim, spacedim>::NumberCache::NumberCache()
- : n_global_active_cells(0)
+ : n_locally_owned_active_cells(0)
, n_global_levels(0)
{}
unsigned int
TriangulationBase<dim, spacedim>::n_locally_owned_active_cells() const
{
- return number_cache.n_locally_owned_active_cells[my_subdomain];
+ return number_cache.n_locally_owned_active_cells;
}
template <int dim, int spacedim>
}
template <int dim, int spacedim>
- const std::vector<unsigned int> &
- TriangulationBase<dim, spacedim>::n_locally_owned_active_cells_per_processor()
- const
+ std::vector<unsigned int>
+ TriangulationBase<dim, spacedim>::
+ compute_n_locally_owned_active_cells_per_processor() const
{
- return number_cache.n_locally_owned_active_cells;
+ ;
+#ifdef DEAL_II_WITH_MPI
+ std::vector<unsigned int> n_locally_owned_active_cells_per_processor(
+ Utilities::MPI::n_mpi_processes(this->mpi_communicator), 0);
+
+ if (this->n_levels() > 0)
+ {
+ const int ierr =
+ MPI_Allgather(&number_cache.n_locally_owned_active_cells,
+ 1,
+ MPI_UNSIGNED,
+ n_locally_owned_active_cells_per_processor.data(),
+ 1,
+ MPI_UNSIGNED,
+ this->mpi_communicator);
+ AssertThrowMPI(ierr);
+ }
+
+ return n_locally_owned_active_cells_per_processor;
+#else
+ return {number_cache.n_locally_owned_active_cells};
+#endif
}
template <int dim, int spacedim>
void
TriangulationBase<dim, spacedim>::update_number_cache()
{
- Assert(number_cache.n_locally_owned_active_cells.size() ==
- Utilities::MPI::n_mpi_processes(this->mpi_communicator),
- ExcInternalError());
-
- std::fill(number_cache.n_locally_owned_active_cells.begin(),
- number_cache.n_locally_owned_active_cells.end(),
- 0);
-
number_cache.ghost_owners.clear();
number_cache.level_ghost_owners.clear();
+ number_cache.n_locally_owned_active_cells = 0;
if (this->n_levels() == 0)
{
cell != this->end();
++cell)
if (cell->subdomain_id() == my_subdomain)
- ++number_cache.n_locally_owned_active_cells[my_subdomain];
-
- unsigned int send_value =
- number_cache.n_locally_owned_active_cells[my_subdomain];
- const int ierr =
- MPI_Allgather(&send_value,
- 1,
- MPI_UNSIGNED,
- number_cache.n_locally_owned_active_cells.data(),
- 1,
- MPI_UNSIGNED,
- this->mpi_communicator);
- AssertThrowMPI(ierr);
+ ++number_cache.n_locally_owned_active_cells;
number_cache.n_global_active_cells =
- std::accumulate(number_cache.n_locally_owned_active_cells.begin(),
- number_cache.n_locally_owned_active_cells.end(),
- /* ensure sum is computed with correct data type:*/
- static_cast<types::global_dof_index>(0));
+ Utilities::MPI::sum(number_cache.n_locally_owned_active_cells,
+ this->mpi_communicator);
number_cache.n_global_levels =
Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
}
// (roughly) equal between all processors
tr.repartition();
+ const auto n_locally_owned_active_cells_per_processor =
+ tr.compute_n_locally_owned_active_cells_per_processor();
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
- << tr.n_locally_owned_active_cells_per_processor()[p]
+ << n_locally_owned_active_cells_per_processor[p]
<< " locally owned active cells" << std::endl;
}
std::placeholders::_2));
tr.repartition();
-
+ const auto n_locally_owned_active_cells_per_processor =
+ tr.compute_n_locally_owned_active_cells_per_processor();
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
- << tr.n_locally_owned_active_cells_per_processor()[p]
+ << n_locally_owned_active_cells_per_processor[p]
<< " locally owned active cells" << std::endl;
}
tr.signals.cell_weight.disconnect_all_slots();
tr.repartition();
-
+ const auto n_locally_owned_active_cells_per_processor =
+ tr.compute_n_locally_owned_active_cells_per_processor();
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
- << tr.n_locally_owned_active_cells_per_processor()[p]
+ << n_locally_owned_active_cells_per_processor[p]
<< " locally owned active cells" << std::endl;
}
std::bind(&cell_weight<dim>, std::placeholders::_1, std::placeholders::_2));
tr.refine_global(1);
+ const auto n_locally_owned_active_cells_per_processor =
+ tr.compute_n_locally_owned_active_cells_per_processor();
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
- << tr.n_locally_owned_active_cells_per_processor()[p]
+ << n_locally_owned_active_cells_per_processor[p]
<< " locally owned active cells" << std::endl;
}
tr.repartition();
+ const auto n_locally_owned_active_cells_per_processor =
+ tr.compute_n_locally_owned_active_cells_per_processor();
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
- << tr.n_locally_owned_active_cells_per_processor()[p]
+ << n_locally_owned_active_cells_per_processor[p]
<< " locally owned active cells" << std::endl;
// let each processor sum up its weights
tr.refine_global(1);
+ const auto n_locally_owned_active_cells_per_processor =
+ tr.compute_n_locally_owned_active_cells_per_processor();
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
- << tr.n_locally_owned_active_cells_per_processor()[p]
+ << n_locally_owned_active_cells_per_processor[p]
<< " locally owned active cells" << std::endl;
// let each processor sum up its weights
tr.repartition();
+ const auto n_locally_owned_active_cells_per_processor =
+ tr.compute_n_locally_owned_active_cells_per_processor();
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
- << tr.n_locally_owned_active_cells_per_processor()[p]
+ << n_locally_owned_active_cells_per_processor[p]
<< " locally owned active cells" << std::endl;
// let each processor sum up its weights
std::bind(&cell_weight<dim>, std::placeholders::_1, std::placeholders::_2));
tr.repartition();
+ const auto n_locally_owned_active_cells_per_processor =
+ tr.compute_n_locally_owned_active_cells_per_processor();
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
- << tr.n_locally_owned_active_cells_per_processor()[p]
+ << n_locally_owned_active_cells_per_processor[p]
<< " locally owned active cells" << std::endl;
// let each processor sum up its weights
pcout << " Number of active cells: "
<< triangulation.n_global_active_cells() << std::endl
<< " ";
+ const auto n_locally_owned_active_cells_per_processor =
+ triangulation.compute_n_locally_owned_active_cells_per_processor();
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << triangulation.n_locally_owned_active_cells_per_processor()[i]
- << '+';
+ pcout << n_locally_owned_active_cells_per_processor[i] << '+';
pcout << std::endl;
pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
pcout << " Number of active cells: "
<< triangulation.n_global_active_cells() << std::endl
<< " ";
+ const auto n_locally_owned_active_cells_per_processor =
+ triangulation.compute_n_locally_owned_active_cells_per_processor();
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << triangulation.n_locally_owned_active_cells_per_processor()[i]
- << '+';
+ pcout << n_locally_owned_active_cells_per_processor[i] << '+';
pcout << std::endl;
pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
pcout << " Number of active cells: "
<< triangulation.n_global_active_cells() << std::endl
<< " ";
+ const auto n_locally_owned_active_cells_per_processor =
+ triangulation.compute_n_locally_owned_active_cells_per_processor();
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << triangulation.n_locally_owned_active_cells_per_processor()[i]
- << '+';
+ pcout << n_locally_owned_active_cells_per_processor[i] << '+';
pcout << std::endl;
pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
pcout << " Number of active cells: "
<< triangulation.n_global_active_cells() << std::endl
<< " ";
+ const auto n_locally_owned_active_cells_per_processor =
+ triangulation.compute_n_locally_owned_active_cells_per_processor();
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << triangulation.n_locally_owned_active_cells_per_processor()[i]
- << '+';
+ pcout << n_locally_owned_active_cells_per_processor[i] << '+';
pcout << std::endl;
pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
pcout << " Number of active cells: "
<< triangulation.n_global_active_cells() << std::endl
<< " ";
+ const auto n_locally_owned_active_cells_per_processor =
+ triangulation.compute_n_locally_owned_active_cells_per_processor();
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << triangulation.n_locally_owned_active_cells_per_processor()[i]
- << '+';
+ pcout << n_locally_owned_active_cells_per_processor[i] << '+';
pcout << std::endl;
pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()
pcout << " Number of active cells: "
<< triangulation.n_global_active_cells() << std::endl
<< " ";
+ const auto n_locally_owned_active_cells_per_processor =
+ triangulation.compute_n_locally_owned_active_cells_per_processor();
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << triangulation.n_locally_owned_active_cells_per_processor()[i]
- << '+';
+ pcout << n_locally_owned_active_cells_per_processor[i] << '+';
pcout << std::endl;
pcout << " Number of degrees of freedom: " << dof_handler.n_dofs()