dof_handler, coupling, dsp, constraints, false);
SparsityTools::distribute_sparsity_pattern(
dsp,
- dof_handler.compute_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(mpi_communicator,
+ dof_handler.locally_owned_dofs()),
mpi_communicator,
locally_relevant_dofs);
preconditioner_matrix.reinit(owned_partitioning,
std::vector<T>
all_gather(const MPI_Comm &comm, const T &object)
{
+ if (job_supports_mpi() == false)
+ return {object};
+
# ifndef DEAL_II_WITH_MPI
(void)comm;
std::vector<T> v(1, object);
copy_triangulation(
const dealii::Triangulation<dim, spacedim> &old_tria) override;
- /**
- * Return the number of active cells owned by each of the MPI processes
- * that contribute to this triangulation. The element of this vector
- * indexed by locally_owned_subdomain() equals the result of
- * n_locally_owned_active_cells().
- *
- * @note This function involves global communication!
- */
- std::vector<unsigned int>
- compute_n_locally_owned_active_cells_per_processor() const;
-
/**
* Return the number of active cells in the triangulation that are locally
* owned, i.e. that have a subdomain_id equal to
const IndexSet &
locally_owned_mg_dofs(const unsigned int level) const;
- /**
- * Compute a vector with the locally owned DoFs of each processor.
- *
- * This function involves global communication via the @p MPI_Allgather
- * function, so it must be called on all processors participating in the MPI
- * communicator underlying the triangulation.
- *
- * If you are only interested in the number of elements each processor owns
- * then compute_n_locally_owned_dofs_per_processor() is a better choice.
- *
- * If this is a sequential DoFHandler, then the vector has a single element
- * that equals the IndexSet representing the entire range [0,n_dofs()]. (Here,
- * "sequential" means that either the whole program does not use MPI, or that
- * it uses MPI but only uses a single MPI process, or that there are multiple
- * MPI processes but the Triangulation on which this DoFHandler builds works
- * only on one MPI process.)
- */
- std::vector<IndexSet>
- compute_locally_owned_dofs_per_processor() const;
-
- /**
- * Compute a vector with the number of degrees of freedom each
- * processor that participates in this triangulation owns locally. The sum
- * of all these numbers equals the number of degrees of freedom that exist
- * globally, i.e. what n_dofs() returns.
- *
- * This function involves global communication via the @p MPI_Allgather
- * function, so it must be called on all processors participating in the MPI
- * communicator underlying the triangulation.
- *
- * Each element of the vector returned by this function equals the number of
- * elements of the corresponding sets returned by
- * compute_locally_owned_dofs_per_processor().
- *
- * If this is a sequential DoFHandler, then the vector has a single element
- * equal to n_dofs(). (Here, "sequential" means that either the whole program
- * does not use MPI, or that it uses MPI but only uses a single MPI process,
- * or that there are multiple MPI processes but the Triangulation on which
- * this DoFHandler builds works only on one MPI process.)
- */
- std::vector<types::global_dof_index>
- compute_n_locally_owned_dofs_per_processor() const;
-
- /**
- * Compute a vector with the locally owned DoFs of each processor on
- * the given level @p level for geometric multigrid.
- *
- * This function involves global communication via the @p MPI_Allgather
- * function, so it must be called on all processors participating in the MPI
- * communicator underlying the triangulation.
- *
- * If this is a sequential DoFHandler, then the vector has a single element
- * that equals the IndexSet representing the entire range [0,n_dofs()]. (Here,
- * "sequential" means that either the whole program does not use MPI, or that
- * it uses MPI but only uses a single MPI process, or that there are multiple
- * MPI processes but the Triangulation on which this DoFHandler builds works
- * only on one MPI process.)
- */
- std::vector<IndexSet>
- compute_locally_owned_mg_dofs_per_processor(const unsigned int level) const;
-
/**
* Return a vector that stores the locally owned DoFs of each processor.
*
* @deprecated As of deal.II version 9.2, we do not populate a vector with
* the index sets of all processors by default any more due to a possibly
* large memory footprint on many processors. As a consequence, this
- * function needs to call compute_locally_owned_dofs_per_processor() upon
- * the first invocation, including global communication. Use
- * compute_locally_owned_dofs_per_processor() instead if using up to a few
- * thousands of MPI ranks or some variant involving local communication with
- * more processors.
+ * function needs to call `Utilities::all_gather(comm, locally_owned_dofs())`
+ * upon the first invocation, including global communication. Use
+ * `Utilities::all_gather(comm, dof_handler.locally_owned_dofs())` instead if
+ * using up to a few thousands of MPI ranks or some variant involving local
+ * communication with more processors.
*/
DEAL_II_DEPRECATED const std::vector<IndexSet> &
locally_owned_dofs_per_processor() const;
* @deprecated As of deal.II version 9.2, we do not populate a vector with
* the numbers of dofs of all processors by default any more due to a
* possibly large memory footprint on many processors. As a consequence,
- * this function needs to call compute_n_locally_owned_dofs_per_processor()
- * upon the first invocation, including global communication. Use
- * compute_n_locally_owned_dofs_per_processor() instead if using up to a few
- * thousands of MPI ranks or some variant involving local communication with
- * more processors.
+ * this function needs to call `Utilities::all_gather(comm,
+ * n_locally_owned_dofs()` upon the first invocation, including global
+ * communication. Use `Utilities::all_gather(comm,
+ * dof_handler.n_locally_owned_dofs()` instead if using up to a few thousands
+ * of MPI ranks or some variant involving local communication with more
+ * processors.
*/
DEAL_II_DEPRECATED const std::vector<types::global_dof_index> &
n_locally_owned_dofs_per_processor() const;
* @deprecated As of deal.II version 9.2, we do not populate a vector with
* the index sets of all processors by default any more due to a possibly
* large memory footprint on many processors. As a consequence, this
- * function needs to call compute_locally_owned_dofs_mg_per_processor() upon
- * the first invocation, including global communication. Use
- * compute_locally_owned_mg_dofs_per_processor() instead if using up to a few
+ * function needs to call `Utilities::all_gather(comm,
+ * locally_owned_dofs_mg())` upon the first invocation, including global
+ * communication. Use `Utilities::all_gather(comm,
+ * dof_handler.locally_owned_dofs_mg())` instead if using up to a few
* thousands of MPI ranks or some variant involving local communication with
* more processors.
*/
if (number_cache.n_locally_owned_dofs_per_processor.empty() &&
number_cache.n_global_dofs > 0)
{
+ MPI_Comm comm;
+
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ comm = tr->get_communicator();
+ else
+ comm = MPI_COMM_SELF;
+
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
number_cache)
.n_locally_owned_dofs_per_processor =
- compute_n_locally_owned_dofs_per_processor();
+ number_cache.get_n_locally_owned_dofs_per_processor(comm);
}
return number_cache.n_locally_owned_dofs_per_processor;
}
if (number_cache.locally_owned_dofs_per_processor.empty() &&
number_cache.n_global_dofs > 0)
{
+ MPI_Comm comm;
+
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ comm = tr->get_communicator();
+ else
+ comm = MPI_COMM_SELF;
+
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
number_cache)
.locally_owned_dofs_per_processor =
- compute_locally_owned_dofs_per_processor();
+ number_cache.get_locally_owned_dofs_per_processor(comm);
}
return number_cache.locally_owned_dofs_per_processor;
}
if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() &&
mg_number_cache[level].n_global_dofs > 0)
{
+ MPI_Comm comm;
+
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ comm = tr->get_communicator();
+ else
+ comm = MPI_COMM_SELF;
+
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
mg_number_cache[level])
.locally_owned_dofs_per_processor =
- compute_locally_owned_mg_dofs_per_processor(level);
+ mg_number_cache[level].get_locally_owned_dofs_per_processor(comm);
}
return mg_number_cache[level].locally_owned_dofs_per_processor;
}
-template <int dim, int spacedim>
-std::vector<types::global_dof_index>
-DoFHandler<dim, spacedim>::compute_n_locally_owned_dofs_per_processor() const
-{
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- return number_cache.get_n_locally_owned_dofs_per_processor(
- tr->get_communicator());
- else
- return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF);
-}
-
-
-
-template <int dim, int spacedim>
-std::vector<IndexSet>
-DoFHandler<dim, spacedim>::compute_locally_owned_dofs_per_processor() const
-{
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- return number_cache.get_locally_owned_dofs_per_processor(
- tr->get_communicator());
- else
- return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF);
-}
-
-
-
-template <int dim, int spacedim>
-std::vector<IndexSet>
-DoFHandler<dim, spacedim>::compute_locally_owned_mg_dofs_per_processor(
- const unsigned int level) const
-{
- Assert(level < this->get_triangulation().n_global_levels(),
- ExcMessage("The given level index exceeds the number of levels "
- "present in the triangulation"));
- Assert(
- mg_number_cache.size() == this->get_triangulation().n_global_levels(),
- ExcMessage(
- "The level dofs are not set up properly! Did you call distribute_mg_dofs()?"));
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- return mg_number_cache[level].get_locally_owned_dofs_per_processor(
- tr->get_communicator());
- else
- return mg_number_cache[level].get_locally_owned_dofs_per_processor(
- MPI_COMM_SELF);
-}
-
-
-
template <int dim, int spacedim>
inline const FiniteElement<dim, spacedim> &
DoFHandler<dim, spacedim>::get_fe(const unsigned int index) const
const IndexSet &
locally_owned_dofs() const;
- /**
- * Compute a vector with the locally owned DoFs of each processor.
- *
- * This function involves global communication via the @p MPI_Allgather
- * function, so it must be called on all processors participating in the MPI
- * communicator underlying the triangulation.
- *
- * If you are only interested in the number of elements each processor owns
- * then compute_n_locally_owned_dofs_per_processor() is a better choice.
- *
- * If this is a sequential DoFHandler, then the vector has a single element
- * that equals the IndexSet representing the entire range [0,n_dofs()].
- * (Here, "sequential" means that either the whole program does not use MPI,
- * or that it uses MPI but only uses a single MPI process, or that there are
- * multiple MPI processes but the Triangulation on which this DoFHandler
- * builds works only on one MPI process.)
- */
- std::vector<IndexSet>
- compute_locally_owned_dofs_per_processor() const;
-
- /**
- * Compute a vector with the number of degrees of freedom each
- * processor that participates in this triangulation owns locally. The sum
- * of all these numbers equals the number of degrees of freedom that exist
- * globally, i.e. what n_dofs() returns.
- *
- * This function involves global communication via the @p MPI_Allgather
- * function, so it must be called on all processors participating in the MPI
- * communicator underlying the triangulation.
- *
- * Each element of the vector returned by this function equals the number of
- * elements of the corresponding sets returned by
- * compute_locally_owned_dofs_per_processor().
- *
- * If this is a sequential DoFHandler, then the vector has a single element
- * equal to n_dofs(). (Here, "sequential" means that either the whole
- * program does not use MPI, or that it uses MPI but only uses a single MPI
- * process, or that there are multiple MPI processes but the Triangulation
- * on which this DoFHandler builds works only on one MPI process.)
- */
- std::vector<types::global_dof_index>
- compute_n_locally_owned_dofs_per_processor() const;
-
/**
* Return a vector that stores the locally owned DoFs of each processor.
*
* @deprecated As of deal.II version 9.2, we do not populate a vector with
* the index sets of all processors by default any more due to a possibly
* large memory footprint on many processors. As a consequence, this
- * function needs to call compute_locally_owned_dofs_per_processor() upon
- * the first invocation, including global communication. Use
- * compute_locally_owned_dofs_per_processor() instead if using up to a few
- * thousands of MPI ranks or some variant involving local communication with
- * more processors.
+ * function needs to call `Utilities::all_gather(comm,
+ * locally_owned_dofs())` upon the first invocation, including global
+ * communication. Use `Utilities::all_gather(comm,
+ * dof_handler.locally_owned_dofs())` instead if using up to a few thousands
+ * of MPI ranks or some variant involving local communication with more
+ * processors.
*/
DEAL_II_DEPRECATED const std::vector<IndexSet> &
locally_owned_dofs_per_processor() const;
* @deprecated As of deal.II version 9.2, we do not populate a vector with
* the numbers of dofs of all processors by default any more due to a
* possibly large memory footprint on many processors. As a consequence,
- * this function needs to call compute_n_locally_owned_dofs_per_processor()
- * upon the first invocation, including global communication. Use
- * compute_n_locally_owned_dofs_per_processor() instead if using up to a few
+ * this function needs to call `Utilities::all_gather(comm,
+ * n_locally_owned_dofs()` upon the first invocation, including global
+ * communication. Use `Utilities::all_gather(comm,
+ * dof_handler.n_locally_owned_dofs()` instead if using up to a few
* thousands of MPI ranks or some variant involving local communication with
* more processors.
*/
const IndexSet &
locally_owned_mg_dofs(const unsigned int level) const;
- /**
- * Compute a vector with the locally owned DoFs of each processor on
- * the given level @p level for geometric multigrid.
- *
- * This function involves global communication via the @p MPI_Allgather
- * function, so it must be called on all processors participating in the MPI
- * communicator underlying the triangulation.
- *
- * If this is a sequential DoFHandler, then the vector has a single element
- * that equals the IndexSet representing the entire range [0,n_dofs()].
- * (Here, "sequential" means that either the whole program does not use MPI,
- * or that it uses MPI but only uses a single MPI process, or that there are
- * multiple MPI processes but the Triangulation on which this DoFHandler
- * builds works only on one MPI process.)
- */
- std::vector<IndexSet>
- compute_locally_owned_mg_dofs_per_processor(const unsigned int level) const;
-
/**
* Return a vector that stores the locally owned DoFs of each processor on
* the given level @p level.
* @deprecated As of deal.II version 9.2, we do not populate a vector with
* the index sets of all processors by default any more due to a possibly
* large memory footprint on many processors. As a consequence, this
- * function needs to call compute_locally_owned_dofs_mg_per_processor() upon
- * the first invocation, including global communication. Use
- * compute_locally_owned_mg_dofs_per_processor() instead if using up to a
- * few thousands of MPI ranks or some variant involving local communication
- * with more processors.
+ * function needs to call `Utilities::all_gather(comm,
+ * locally_owned_dofs_mg())` upon the first invocation, including global
+ * communication. Use `Utilities::all_gather(comm,
+ * dof_handler.locally_owned_dofs_mg())` instead if using up to a few
+ * thousands of MPI ranks or some variant involving local communication with
+ * more processors.
*/
DEAL_II_DEPRECATED const std::vector<IndexSet> &
locally_owned_mg_dofs_per_processor(const unsigned int level) const;
if (number_cache.n_locally_owned_dofs_per_processor.empty() &&
number_cache.n_global_dofs > 0)
{
+ MPI_Comm comm;
+
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ comm = tr->get_communicator();
+ else
+ comm = MPI_COMM_SELF;
+
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
number_cache)
.n_locally_owned_dofs_per_processor =
- compute_n_locally_owned_dofs_per_processor();
+ number_cache.get_n_locally_owned_dofs_per_processor(comm);
}
return number_cache.n_locally_owned_dofs_per_processor;
}
if (number_cache.locally_owned_dofs_per_processor.empty() &&
number_cache.n_global_dofs > 0)
{
+ MPI_Comm comm;
+
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ comm = tr->get_communicator();
+ else
+ comm = MPI_COMM_SELF;
+
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
number_cache)
.locally_owned_dofs_per_processor =
- compute_locally_owned_dofs_per_processor();
+ number_cache.get_locally_owned_dofs_per_processor(comm);
}
return number_cache.locally_owned_dofs_per_processor;
}
- template <int dim, int spacedim>
- std::vector<types::global_dof_index>
- DoFHandler<dim, spacedim>::compute_n_locally_owned_dofs_per_processor() const
- {
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- return number_cache.get_n_locally_owned_dofs_per_processor(
- tr->get_communicator());
- else
- return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF);
- }
-
-
-
- template <int dim, int spacedim>
- std::vector<IndexSet>
- DoFHandler<dim, spacedim>::compute_locally_owned_dofs_per_processor() const
- {
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- return number_cache.get_locally_owned_dofs_per_processor(
- tr->get_communicator());
- else
- return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF);
- }
-
-
-
template <int dim, int spacedim>
const IndexSet &
DoFHandler<dim, spacedim>::locally_owned_mg_dofs(
if (mg_number_cache[level].locally_owned_dofs_per_processor.empty() &&
mg_number_cache[level].n_global_dofs > 0)
{
+ MPI_Comm comm;
+
+ const parallel::TriangulationBase<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ comm = tr->get_communicator();
+ else
+ comm = MPI_COMM_SELF;
+
const_cast<dealii::internal::DoFHandlerImplementation::NumberCache &>(
mg_number_cache[level])
.locally_owned_dofs_per_processor =
- compute_locally_owned_mg_dofs_per_processor(level);
+ mg_number_cache[level].get_locally_owned_dofs_per_processor(comm);
}
return mg_number_cache[level].locally_owned_dofs_per_processor;
}
- template <int dim, int spacedim>
- std::vector<IndexSet>
- DoFHandler<dim, spacedim>::compute_locally_owned_mg_dofs_per_processor(
- const unsigned int level) const
- {
- Assert(false, ExcNotImplemented());
- (void)level;
- Assert(level < this->get_triangulation().n_global_levels(),
- ExcMessage("The given level index exceeds the number of levels "
- "present in the triangulation"));
- const parallel::TriangulationBase<dim, spacedim> *tr =
- (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &this->get_triangulation()));
- if (tr != nullptr)
- return mg_number_cache[level].get_locally_owned_dofs_per_processor(
- tr->get_communicator());
- else
- return mg_number_cache[level].get_locally_owned_dofs_per_processor(
- MPI_COMM_SELF);
- }
-
-
-
template <int dim, int spacedim>
inline const FiniteElement<dim, spacedim> &
DoFHandler<dim, spacedim>::get_fe(const unsigned int number) const
return number_cache.n_global_active_cells;
}
- template <int dim, int spacedim>
- std::vector<unsigned int>
- TriangulationBase<dim, spacedim>::
- compute_n_locally_owned_active_cells_per_processor() const
- {
- ;
-#ifdef DEAL_II_WITH_MPI
- std::vector<unsigned int> n_locally_owned_active_cells_per_processor(
- Utilities::MPI::n_mpi_processes(this->mpi_communicator), 0);
-
- if (this->n_levels() > 0)
- {
- const int ierr =
- MPI_Allgather(&number_cache.n_locally_owned_active_cells,
- 1,
- MPI_UNSIGNED,
- n_locally_owned_active_cells_per_processor.data(),
- 1,
- MPI_UNSIGNED,
- this->mpi_communicator);
- AssertThrowMPI(ierr);
- }
-
- return n_locally_owned_active_cells_per_processor;
-#else
- return {number_cache.n_locally_owned_active_cells};
-#endif
- }
-
template <int dim, int spacedim>
const MPI_Comm &
TriangulationBase<dim, spacedim>::get_communicator() const
NumberCache::get_n_locally_owned_dofs_per_processor(
const MPI_Comm mpi_communicator) const
{
- const unsigned int n_procs =
- Utilities::MPI::job_supports_mpi() ?
- Utilities::MPI::n_mpi_processes(mpi_communicator) :
- 1;
if (n_global_dofs == 0)
return std::vector<types::global_dof_index>();
else if (n_locally_owned_dofs_per_processor.empty() == false)
{
- AssertDimension(n_locally_owned_dofs_per_processor.size(), n_procs);
+ AssertDimension(n_locally_owned_dofs_per_processor.size(),
+ (Utilities::MPI::job_supports_mpi() ?
+ Utilities::MPI::n_mpi_processes(mpi_communicator) :
+ 1));
return n_locally_owned_dofs_per_processor;
}
else
{
- std::vector<types::global_dof_index> result(n_procs,
- n_locally_owned_dofs);
-#ifdef DEAL_II_WITH_MPI
- if (n_procs > 1)
- MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs),
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- result.data(),
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- mpi_communicator);
-#endif
- return result;
+ return Utilities::MPI::all_gather(mpi_communicator,
+ n_locally_owned_dofs);
}
}
const MPI_Comm mpi_communicator) const
{
AssertDimension(locally_owned_dofs.size(), n_global_dofs);
- const unsigned int n_procs =
- Utilities::MPI::job_supports_mpi() ?
- Utilities::MPI::n_mpi_processes(mpi_communicator) :
- 1;
if (n_global_dofs == 0)
return std::vector<IndexSet>();
else if (locally_owned_dofs_per_processor.empty() == false)
{
- AssertDimension(locally_owned_dofs_per_processor.size(), n_procs);
+ AssertDimension(locally_owned_dofs_per_processor.size(),
+ (Utilities::MPI::job_supports_mpi() ?
+ Utilities::MPI::n_mpi_processes(mpi_communicator) :
+ 1));
return locally_owned_dofs_per_processor;
}
else
{
- std::vector<IndexSet> locally_owned_dofs_per_processor(
- n_procs, locally_owned_dofs);
-
-#ifdef DEAL_II_WITH_MPI
- if (n_procs > 1)
- {
- // this step is substantially more complicated because indices
- // might be distributed arbitrarily among the processors. Here we
- // have to serialize the IndexSet objects and shop them across the
- // network.
- std::vector<char> my_data;
- {
-# ifdef DEAL_II_WITH_ZLIB
-
- boost::iostreams::filtering_ostream out;
- out.push(boost::iostreams::gzip_compressor(
- boost::iostreams::gzip_params(
- boost::iostreams::gzip::best_speed)));
- out.push(boost::iostreams::back_inserter(my_data));
-
- boost::archive::binary_oarchive archive(out);
-
- archive << locally_owned_dofs;
- out.flush();
-# else
- std::ostringstream out;
- boost::archive::binary_oarchive archive(out);
- archive << locally_owned_dofs;
- const std::string &s = out.str();
- my_data.reserve(s.size());
- my_data.assign(s.begin(), s.end());
-# endif
- }
-
- // determine maximum size of IndexSet
- const unsigned int max_size =
- Utilities::MPI::max(my_data.size(), mpi_communicator);
-
- // as the MPI_Allgather call will be reading max_size elements,
- // and as this may be past the end of my_data, we need to increase
- // the size of the local buffer. This is filled with zeros.
- my_data.resize(max_size);
-
- std::vector<char> buffer(max_size * n_procs);
- const int ierr = MPI_Allgather(my_data.data(),
- max_size,
- MPI_BYTE,
- buffer.data(),
- max_size,
- MPI_BYTE,
- mpi_communicator);
- AssertThrowMPI(ierr);
-
- for (unsigned int i = 0; i < n_procs; ++i)
- if (i == Utilities::MPI::this_mpi_process(mpi_communicator))
- locally_owned_dofs_per_processor[i] = locally_owned_dofs;
- else
- {
- // copy the data previously received into a stringstream
- // object and then read the IndexSet from it
- std::string decompressed_buffer;
-
- // first decompress the buffer
- {
-# ifdef DEAL_II_WITH_ZLIB
-
- boost::iostreams::filtering_ostream decompressing_stream;
- decompressing_stream.push(
- boost::iostreams::gzip_decompressor());
- decompressing_stream.push(
- boost::iostreams::back_inserter(decompressed_buffer));
-
- decompressing_stream.write(&buffer[i * max_size],
- max_size);
-# else
- decompressed_buffer.assign(&buffer[i * max_size],
- max_size);
-# endif
- }
-
- // then restore the object from the buffer
- std::istringstream in(decompressed_buffer);
- boost::archive::binary_iarchive archive(in);
-
- archive >> locally_owned_dofs_per_processor[i];
- }
- }
-#endif
- return locally_owned_dofs_per_processor;
+ return Utilities::MPI::all_gather(mpi_communicator,
+ locally_owned_dofs);
}
}
AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError());
- AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+ AssertThrow(Utilities::MPI::all_gather(
+ MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) ==
std::vector<types::global_dof_index>(1, N),
ExcInternalError());
- AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() ==
+ AssertThrow(Utilities::MPI::all_gather(
+ MPI_COMM_SELF, dof_handler.locally_owned_dofs()) ==
std::vector<IndexSet>(1, all),
ExcInternalError());
}
AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError());
- AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+ AssertThrow(Utilities::MPI::all_gather(
+ MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) ==
std::vector<types::global_dof_index>(1, N),
ExcInternalError());
- AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() ==
+ AssertThrow(Utilities::MPI::all_gather(
+ MPI_COMM_SELF, dof_handler.locally_owned_dofs()) ==
std::vector<IndexSet>(1, all),
ExcInternalError());
}
AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError());
- AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+ AssertThrow(Utilities::MPI::all_gather(
+ MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) ==
std::vector<types::global_dof_index>(1, N),
ExcInternalError());
- AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() ==
+ AssertThrow(Utilities::MPI::all_gather(
+ MPI_COMM_SELF, dof_handler.locally_owned_dofs()) ==
std::vector<IndexSet>(1, all),
ExcInternalError());
}
Assert(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
Assert(dof_handler.locally_owned_dofs() == all, ExcInternalError());
- Assert(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+ Assert(Utilities::MPI::all_gather(MPI_COMM_SELF,
+ dof_handler.n_locally_owned_dofs()) ==
std::vector<types::global_dof_index>(1, N),
ExcInternalError());
- Assert(dof_handler.compute_locally_owned_dofs_per_processor() ==
+ Assert(Utilities::MPI::all_gather(MPI_COMM_SELF,
+ dof_handler.locally_owned_dofs()) ==
std::vector<IndexSet>(1, all),
ExcInternalError());
dof_handler.clear();
deallog << "those should be zero: " << dof_handler.n_locally_owned_dofs()
<< " "
- << dof_handler.compute_n_locally_owned_dofs_per_processor().size()
+ << Utilities::MPI::all_gather(MPI_COMM_SELF,
+ dof_handler.n_locally_owned_dofs())
+ .size()
<< " " << dof_handler.n_dofs() << std::endl;
}
}
DEAL:1d::50
-DEAL:1d::those should be zero: 0 0 0
+DEAL:1d::those should be zero: 0 1 0
DEAL:1d::82
-DEAL:1d::those should be zero: 0 0 0
+DEAL:1d::those should be zero: 0 1 0
DEAL:1d::90
-DEAL:1d::those should be zero: 0 0 0
+DEAL:1d::those should be zero: 0 1 0
DEAL:1d::90
-DEAL:1d::those should be zero: 0 0 0
+DEAL:1d::those should be zero: 0 1 0
DEAL:2d::816
-DEAL:2d::those should be zero: 0 0 0
+DEAL:2d::those should be zero: 0 1 0
DEAL:2d::1264
-DEAL:2d::those should be zero: 0 0 0
+DEAL:2d::those should be zero: 0 1 0
DEAL:2d::2192
-DEAL:2d::those should be zero: 0 0 0
+DEAL:2d::those should be zero: 0 1 0
DEAL:3d::13524
-DEAL:3d::those should be zero: 0 0 0
+DEAL:3d::those should be zero: 0 1 0
DEAL:3d::42768
-DEAL:3d::those should be zero: 0 0 0
+DEAL:3d::those should be zero: 0 1 0
MPI_COMM_WORLD));
SparsityTools::distribute_sparsity_pattern(
sp,
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
MPI_COMM_WORLD,
relevant);
sp.compress();
AssertThrow(dof_handler.n_locally_owned_dofs() == N, ExcInternalError());
AssertThrow(dof_handler.locally_owned_dofs() == all, ExcInternalError());
- AssertThrow(dof_handler.compute_n_locally_owned_dofs_per_processor() ==
+ AssertThrow(Utilities::MPI::all_gather(
+ MPI_COMM_SELF, dof_handler.n_locally_owned_dofs()) ==
std::vector<types::global_dof_index>(1, N),
ExcInternalError());
- AssertThrow(dof_handler.compute_locally_owned_dofs_per_processor() ==
+ AssertThrow(Utilities::MPI::all_gather(
+ MPI_COMM_SELF, dof_handler.locally_owned_dofs()) ==
std::vector<IndexSet>(1, all),
ExcInternalError());
}
solver.solve(mf, sol, rhs, PreconditionIdentity());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dof.locally_owned_dofs());
// gather all data at root
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
{
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- tr.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(tr.get_communicator(),
+ tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- tr.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(tr.get_communicator(),
+ tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- tr.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(tr.get_communicator(),
+ tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
tr.refine_global(1);
const auto n_locally_owned_active_cells_per_processor =
- tr.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(tr.get_communicator(),
+ tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- tr.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(tr.get_communicator(),
+ tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
tr.refine_global(1);
const auto n_locally_owned_active_cells_per_processor =
- tr.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(tr.get_communicator(),
+ tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- tr.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(tr.get_communicator(),
+ tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
tr.repartition();
const auto n_locally_owned_active_cells_per_processor =
- tr.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(tr.get_communicator(),
+ tr.n_locally_owned_active_cells());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
for (unsigned int p = 0; p < numproc; ++p)
deallog << "processor " << p << ": "
constraints.print(deallog.get_file_stream());
deallog << "consistent? "
<< constraints.is_consistent_in_parallel(
- dof_handler.compute_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs()),
locally_active_dofs,
MPI_COMM_WORLD,
true)
deallog << N << std::endl;
Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError());
- for (unsigned int i = 0;
- i < dof_handler.compute_n_locally_owned_dofs_per_processor().size();
- ++i)
- AssertThrow(
- dof_handler.compute_n_locally_owned_dofs_per_processor()[i] <= N,
- ExcInternalError());
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
+ for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size();
+ ++i)
+ AssertThrow(n_locally_owned_dofs_per_processor[i] <= N,
+ ExcInternalError());
AssertThrow(std::accumulate(n_locally_owned_dofs_per_processor.begin(),
n_locally_owned_dofs_per_processor.end(),
0U) == N,
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N), really_all(N);
// poor man's union operation
for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size();
system_matrix.reinit(
mpi_communicator,
csp,
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
Utilities::MPI::this_mpi_process(mpi_communicator));
}
<< triangulation.n_global_active_cells() << std::endl
<< " ";
const auto n_locally_owned_active_cells_per_processor =
- triangulation.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(
+ triangulation.get_communicator(),
+ triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+ pcout << Utilities::MPI::all_gather(
+ MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i]
<< '+';
pcout << std::endl;
system_matrix.reinit(
mpi_communicator,
csp,
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
Utilities::MPI::this_mpi_process(mpi_communicator));
}
<< triangulation.n_global_active_cells() << std::endl
<< " ";
const auto n_locally_owned_active_cells_per_processor =
- triangulation.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(
+ triangulation.get_communicator(),
+ triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+ pcout << Utilities::MPI::all_gather(
+ MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i]
<< '+';
pcout << std::endl;
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dofh.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
deallog << "n_locally_owned_dofs_per_processor:" << std::endl;
for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size(); ++i)
deallog << n_locally_owned_dofs_per_processor[i] << std::endl;
deallog << "level " << lvl << ":" << std::endl;
const std::vector<IndexSet> vec =
- dofh.compute_locally_owned_mg_dofs_per_processor(lvl);
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dofh.locally_owned_mg_dofs(lvl));
for (unsigned int i = 0; i < vec.size(); ++i)
deallog << vec[i].n_elements() << std::endl;
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dofh.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
if (myid == 0)
{
deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
dofh.distribute_dofs(fe);
std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor =
- dofh.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
if (myid == 0)
{
deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
dofh.distribute_dofs(fe);
std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor =
- dofh.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
if (myid == 0)
{
deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
dofh.distribute_dofs(fe);
std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor =
- dofh.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
if (myid == 0)
{
deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
DoFTools::extract_locally_active_dofs(dofh, dof_set);
const std::vector<IndexSet> owned_dofs =
- dofh.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs());
if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
{
dof_set.print(deallog);
data_out.build_patches();
std::vector<types::global_dof_index> n_locally_owned_dofs_per_processor =
- dofh.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
if (myid == 0)
{
for (unsigned int i = 0; i < n_locally_owned_dofs_per_processor.size();
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dofh.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.n_locally_owned_dofs());
if (myid == 1)
{
deallog << "dofh.n_dofs() " << n_locally_owned_dofs_per_processor
constraints.close();
const std::vector<IndexSet> &locally_owned_dofs =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet locally_active_dofs;
DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs,
system_matrix.reinit(
mpi_communicator,
csp,
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
Utilities::MPI::this_mpi_process(mpi_communicator));
}
constraints.close();
const std::vector<IndexSet> &locally_owned_dofs =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet locally_active_dofs;
DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs,
constraints.close();
const std::vector<IndexSet> &locally_owned_dofs =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet locally_active_dofs;
DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs,
constraints.print(deallog.get_file_stream());
const std::vector<IndexSet> locally_owned_dofs_vector =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet locally_active_dofs;
DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs_vector,
constraints.close();
const std::vector<IndexSet> &locally_owned_dofs =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet locally_active_dofs;
DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
AssertThrow(constraints.is_consistent_in_parallel(locally_owned_dofs,
DoFTools::extract_locally_active_dofs(dof_handler, locally_active_dofs);
const std::vector<IndexSet> locally_owned_dofs =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
std::map<types::global_dof_index, Point<dim>> supportPoints;
DoFTools::map_dofs_to_support_points(MappingQ1<dim>(),
complete_renumbering.begin());
unsigned int offset = renumbering.size();
const std::vector<IndexSet> dofs_per_proc =
- dofh.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs());
for (unsigned int i = 1; i < nprocs; ++i)
{
if (myid == i)
complete_renumbering.begin());
unsigned int offset = renumbering.size();
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dofh.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD, dofh.locally_owned_dofs());
for (unsigned int i = 1; i < nprocs; ++i)
{
if (myid == i)
system_matrix.reinit(
mpi_communicator,
csp,
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs()),
Utilities::MPI::this_mpi_process(mpi_communicator));
}
<< triangulation.n_global_active_cells() << std::endl
<< " ";
const auto n_locally_owned_active_cells_per_processor =
- triangulation.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(
+ triangulation.get_communicator(),
+ triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+ pcout << Utilities::MPI::all_gather(
+ MPI_COMM_WORLD, dof_handler.n_locally_owned_dofs())[i]
<< '+';
pcout << std::endl;
system_matrix.reinit(
mpi_communicator,
csp,
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(mpi_communicator,
+ dof_handler.n_locally_owned_dofs()),
+ Utilities::MPI::all_gather(mpi_communicator,
+ dof_handler.n_locally_owned_dofs()),
Utilities::MPI::this_mpi_process(mpi_communicator));
}
<< triangulation.n_global_active_cells() << std::endl
<< " ";
const auto n_locally_owned_active_cells_per_processor =
- triangulation.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(
+ triangulation.get_communicator(),
+ triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+ pcout << Utilities::MPI::all_gather(
+ mpi_communicator, dof_handler.n_locally_owned_dofs())[i]
<< '+';
pcout << std::endl;
system_matrix.reinit(
mpi_communicator,
csp,
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(mpi_communicator,
+ dof_handler.n_locally_owned_dofs()),
+ Utilities::MPI::all_gather(mpi_communicator,
+ dof_handler.n_locally_owned_dofs()),
Utilities::MPI::this_mpi_process(mpi_communicator));
}
<< triangulation.n_global_active_cells() << std::endl
<< " ";
const auto n_locally_owned_active_cells_per_processor =
- triangulation.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(
+ triangulation.get_communicator(),
+ triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
<< " ";
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(mpi_communicator,
+ dof_handler.n_locally_owned_dofs());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
system_matrix.reinit(
mpi_communicator,
csp,
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
- dof_handler.compute_n_locally_owned_dofs_per_processor(),
+ Utilities::MPI::all_gather(mpi_communicator,
+ dof_handler.n_locally_owned_dofs()),
+ Utilities::MPI::all_gather(mpi_communicator,
+ dof_handler.n_locally_owned_dofs()),
Utilities::MPI::this_mpi_process(mpi_communicator));
}
<< triangulation.n_global_active_cells() << std::endl
<< " ";
const auto n_locally_owned_active_cells_per_processor =
- triangulation.compute_n_locally_owned_active_cells_per_processor();
+ Utilities::MPI::all_gather(
+ triangulation.get_communicator(),
+ triangulation.n_locally_owned_active_cells());
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
for (unsigned int i = 0;
i < Utilities::MPI::n_mpi_processes(mpi_communicator);
++i)
- pcout << dof_handler.compute_n_locally_owned_dofs_per_processor()[i]
+ pcout << Utilities::MPI::all_gather(
+ mpi_communicator, dof_handler.locally_owned_dofs())[i]
<< '+';
pcout << std::endl;
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(dof_handler.n_locally_owned_dofs() ==
n_locally_owned_dofs_per_processor[triangulation
.locally_owned_subdomain()],
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(dof_handler.n_locally_owned_dofs() ==
n_locally_owned_dofs_per_processor[triangulation
.locally_owned_subdomain()],
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(dof_handler.n_locally_owned_dofs() ==
n_locally_owned_dofs_per_processor[triangulation
.locally_owned_subdomain()],
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(dof_handler.n_locally_owned_dofs() ==
n_locally_owned_dofs_per_processor[triangulation
.locally_owned_subdomain()],
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
shared_dofs.print(deallog.get_file_stream());
std::vector<IndexSet> shared_dofs_per_proc =
- shared_dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ shared_dof_handler.locally_owned_dofs());
std::vector<IndexSet> distributed_dofs_per_proc =
- distributed_dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ distributed_dof_handler.locally_owned_dofs());
for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
++i)
Assert(shared_dofs_per_proc[i] == distributed_dofs_per_proc[i],
<< dof_handler.n_locally_owned_dofs() << std::endl;
std::vector<IndexSet> shared_dofs_per_proc =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
for (unsigned int i = 0; i < Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
++i)
shared_dofs_per_proc[i].print(deallog.get_file_stream());
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(dof_handler.n_locally_owned_dofs() ==
n_locally_owned_dofs_per_processor[triangulation
.locally_owned_subdomain()],
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(dof_handler.n_locally_owned_dofs() ==
n_locally_owned_dofs_per_processor[triangulation
.locally_owned_subdomain()],
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(dof_handler.n_locally_owned_dofs() ==
n_locally_owned_dofs_per_processor[triangulation
.locally_owned_subdomain()],
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
const std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(dof_handler.n_locally_owned_dofs() ==
n_locally_owned_dofs_per_processor[triangulation
.locally_owned_subdomain()],
ExcInternalError());
const std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
deallog << "n_locally_owned_dofs_per_processor: ";
std::vector<types::global_dof_index> v =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
unsigned int sum = 0;
for (unsigned int i = 0; i < v.size(); ++i)
{
Assert(std::accumulate(v.begin(), v.end(), 0U) == N, ExcInternalError());
std::vector<IndexSet> locally_owned_dofs_per_processor =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < locally_owned_dofs_per_processor.size(); ++i)
{
for (unsigned int lvl = 0; lvl < n_levels; ++lvl)
{
std::vector<IndexSet> dof_index_per_proc =
- dof_handler.compute_locally_owned_mg_dofs_per_processor(lvl);
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_mg_dofs(lvl));
for (unsigned int i = 0; i < dof_index_per_proc.size(); ++i)
dof_index_per_proc[i].print(deallog);
deallog << "n_locally_owned_dofs_per_processor: ";
const std::vector<types::global_dof_index> v =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
unsigned int sum = 0;
for (unsigned int i = 0; i < v.size(); ++i)
{
dof_handler.locally_owned_dofs().write(deallog.get_file_stream());
deallog << std::endl;
- Assert(dof_handler.n_locally_owned_dofs() ==
- dof_handler.compute_n_locally_owned_dofs_per_processor()
- [triangulation.locally_owned_subdomain()],
- ExcInternalError());
+ Assert(
+ dof_handler.n_locally_owned_dofs() ==
+ Utilities::MPI::all_gather(
+ MPI_COMM_WORLD,
+ dof_handler
+ .n_locally_owned_dofs())[triangulation.locally_owned_subdomain()],
+ ExcInternalError());
Assert(dof_handler.n_locally_owned_dofs() ==
dof_handler.locally_owned_dofs().n_elements(),
ExcInternalError());
Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError());
const std::vector<types::global_dof_index> n_owned_dofs =
- dof_handler.compute_n_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.n_locally_owned_dofs());
Assert(std::accumulate(n_owned_dofs.begin(), n_owned_dofs.end(), 0U) == N,
ExcInternalError());
const std::vector<IndexSet> owned_dofs =
- dof_handler.compute_locally_owned_dofs_per_processor();
+ Utilities::MPI::all_gather(MPI_COMM_WORLD,
+ dof_handler.locally_owned_dofs());
IndexSet all(N);
for (unsigned int i = 0; i < owned_dofs.size(); ++i)
{