Do not store the index sets of all processors on each processor.
Fill them on demand.
* MPI processes but the Triangulation on which this DoFHandler builds works
* only on one MPI process.)
*/
- const std::vector<IndexSet> &
+ std::vector<IndexSet>
locally_owned_dofs_per_processor() const;
/**
* or that there are multiple MPI processes but the Triangulation on which
* this DoFHandler builds works only on one MPI process.)
*/
- const std::vector<types::global_dof_index> &
+ std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor() const;
/**
* MPI processes but the Triangulation on which this DoFHandler builds works
* only on one MPI process.)
*/
- const std::vector<IndexSet> &
+ std::vector<IndexSet>
locally_owned_mg_dofs_per_processor(const unsigned int level) const;
/**
template <int dim, int spacedim>
-const std::vector<types::global_dof_index> &
+std::vector<types::global_dof_index>
DoFHandler<dim, spacedim>::n_locally_owned_dofs_per_processor() const
{
- return number_cache.n_locally_owned_dofs_per_processor;
+ const parallel::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ return number_cache.get_n_locally_owned_dofs_per_processor(
+ tr->get_communicator());
+ else
+ return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF);
}
template <int dim, int spacedim>
-const std::vector<IndexSet> &
+std::vector<IndexSet>
DoFHandler<dim, spacedim>::locally_owned_dofs_per_processor() const
{
- return number_cache.locally_owned_dofs_per_processor;
+ const parallel::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ return number_cache.get_locally_owned_dofs_per_processor(
+ tr->get_communicator());
+ else
+ return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF);
}
template <int dim, int spacedim>
-const std::vector<IndexSet> &
+std::vector<IndexSet>
DoFHandler<dim, spacedim>::locally_owned_mg_dofs_per_processor(
const unsigned int level) const
{
mg_number_cache.size() == this->get_triangulation().n_global_levels(),
ExcMessage(
"The level dofs are not set up properly! Did you call distribute_mg_dofs()?"));
- return mg_number_cache[level].locally_owned_dofs_per_processor;
+ const parallel::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ return mg_number_cache[level].get_locally_owned_dofs_per_processor(
+ tr->get_communicator());
+ else
+ return mg_number_cache[level].get_locally_owned_dofs_per_processor(
+ MPI_COMM_SELF);
}
#include <deal.II/base/config.h>
#include <deal.II/base/index_set.h>
+#include <deal.II/base/mpi.h>
#include <vector>
void
clear();
+ /**
+ * Return a representation of @p n_locally_owned_dofs_per_processor both
+ * in case it was set up (directly returning the array) or in case we
+ * need to accumulate some information over all processors. The latter
+ * case involves global communication and is typically expensive to set
+ * up because it invokes MPI_Allgather.
+ */
+ std::vector<types::global_dof_index>
+ get_n_locally_owned_dofs_per_processor(
+ const MPI_Comm mpi_communicator) const;
+
+ /**
+ * Return a representation of @p locally_owned_dofs_per_processor both
+ * in case it was set up (directly returning the array of IndexSet
+ * fields) or in case we need to accumulate some information over all
+ * processors. The latter case involves global communication and is
+ * typically expensive to set up because it invokes MPI_Allgather.
+ */
+ std::vector<IndexSet>
+ get_locally_owned_dofs_per_processor(
+ const MPI_Comm mpi_communicator) const;
+
/**
* Total number of dofs, accumulated over all processors that may
* participate on this mesh.
* processes but the Triangulation on which this DoFHandler builds
* works only on one MPI process.)
*/
- const std::vector<IndexSet> &
+ std::vector<IndexSet>
locally_owned_dofs_per_processor() const;
/**
* process, or that there are multiple MPI processes but the Triangulation
* on which this DoFHandler builds works only on one MPI process.)
*/
- const std::vector<types::global_dof_index> &
+ std::vector<types::global_dof_index>
n_locally_owned_dofs_per_processor() const;
/**
* support multilevel methods yet, this function throws an exception
* ExcNotImplemented() independent of its argument.
*/
- const std::vector<IndexSet> &
+ std::vector<IndexSet>
locally_owned_mg_dofs_per_processor(const unsigned int level) const;
/**
template <int dim, int spacedim>
- const std::vector<types::global_dof_index> &
+ std::vector<types::global_dof_index>
DoFHandler<dim, spacedim>::n_locally_owned_dofs_per_processor() const
{
- return number_cache.n_locally_owned_dofs_per_processor;
+ const parallel::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ return number_cache.get_n_locally_owned_dofs_per_processor(
+ tr->get_communicator());
+ else
+ return number_cache.get_n_locally_owned_dofs_per_processor(MPI_COMM_SELF);
}
template <int dim, int spacedim>
- const std::vector<IndexSet> &
+ std::vector<IndexSet>
DoFHandler<dim, spacedim>::locally_owned_dofs_per_processor() const
{
- return number_cache.locally_owned_dofs_per_processor;
+ const parallel::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ return number_cache.get_locally_owned_dofs_per_processor(
+ tr->get_communicator());
+ else
+ return number_cache.get_locally_owned_dofs_per_processor(MPI_COMM_SELF);
}
template <int dim, int spacedim>
- const std::vector<IndexSet> &
+ std::vector<IndexSet>
DoFHandler<dim, spacedim>::locally_owned_mg_dofs_per_processor(
const unsigned int level) const
{
(void)level;
Assert(level < this->get_triangulation().n_global_levels(),
ExcMessage("invalid level in locally_owned_mg_dofs_per_processor"));
- return mg_number_cache[0].locally_owned_dofs_per_processor;
+ const parallel::Triangulation<dim, spacedim> *tr =
+ (dynamic_cast<const parallel::Triangulation<dim, spacedim> *>(
+ &this->get_triangulation()));
+ if (tr != nullptr)
+ return mg_number_cache[level].get_locally_owned_dofs_per_processor(
+ tr->get_communicator());
+ else
+ return mg_number_cache[level].get_locally_owned_dofs_per_processor(
+ MPI_COMM_SELF);
}
const std::string fname_variable =
std::string(filename) + "_variable.data";
- const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
-
MPI_Info info;
int ierr = MPI_Info_create(&info);
AssertThrowMPI(ierr);
// Gather size of data in bytes we want to store from this processor.
const unsigned int size_on_proc = src_data_variable.size();
- // Share information among all processors.
- std::vector<unsigned int> sizes_on_all_procs(n_procs);
- ierr = MPI_Allgather(DEAL_II_MPI_CONST_CAST(&size_on_proc),
- 1,
- MPI_UNSIGNED,
- sizes_on_all_procs.data(),
- 1,
- MPI_UNSIGNED,
- mpi_communicator);
+ // Compute prefix sum
+ unsigned int prefix_sum = 0;
+ ierr = MPI_Exscan(DEAL_II_MPI_CONST_CAST(&size_on_proc),
+ &prefix_sum,
+ 1,
+ MPI_UNSIGNED,
+ MPI_SUM,
+ mpi_communicator);
AssertThrowMPI(ierr);
- // Generate accumulated sum to get an offset for writing variable
- // size data.
- std::partial_sum(sizes_on_all_procs.begin(),
- sizes_on_all_procs.end(),
- sizes_on_all_procs.begin());
-
const char *data = src_data_variable.data();
// Write data consecutively into file.
- ierr = MPI_File_write_at(
- fh,
- offset_variable +
- ((myrank == 0) ?
- 0 :
- sizes_on_all_procs[myrank - 1]), // global position in file
- DEAL_II_MPI_CONST_CAST(data),
- src_data_variable.size(), // local buffer
- MPI_CHAR,
- MPI_STATUS_IGNORE);
+ ierr = MPI_File_write_at(fh,
+ offset_variable +
+ prefix_sum, // global position in file
+ DEAL_II_MPI_CONST_CAST(data),
+ src_data_variable.size(), // local buffer
+ MPI_CHAR,
+ MPI_STATUS_IGNORE);
AssertThrowMPI(ierr);
ierr = MPI_File_close(&fh);
const std::string fname_variable =
std::string(filename) + "_variable.data";
- const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
-
MPI_Info info;
int ierr = MPI_Info_create(&info);
AssertThrowMPI(ierr);
dest_sizes_variable.end(),
0);
- // share information among all processors
- std::vector<unsigned int> sizes_on_all_procs(n_procs);
- ierr = MPI_Allgather(DEAL_II_MPI_CONST_CAST(&size_on_proc),
- 1,
- MPI_UNSIGNED,
- sizes_on_all_procs.data(),
- 1,
- MPI_UNSIGNED,
- mpi_communicator);
+ // share information among all processors by prefix sum
+ unsigned int prefix_sum = 0;
+ ierr = MPI_Exscan(DEAL_II_MPI_CONST_CAST(&size_on_proc),
+ &prefix_sum,
+ 1,
+ MPI_UNSIGNED,
+ MPI_SUM,
+ mpi_communicator);
AssertThrowMPI(ierr);
- // generate accumulated sum
- std::partial_sum(sizes_on_all_procs.begin(),
- sizes_on_all_procs.end(),
- sizes_on_all_procs.begin());
-
dest_data_variable.resize(size_on_proc);
ierr = MPI_File_read_at(fh,
- offset + ((myrank == 0) ?
- 0 :
- sizes_on_all_procs[myrank - 1]),
+ offset + prefix_sum,
dest_data_variable.data(),
dest_data_variable.size(),
MPI_CHAR,
&dof_handler->get_triangulation())));
Assert(triangulation != nullptr, ExcInternalError());
- const unsigned int n_cpus =
- Utilities::MPI::n_mpi_processes(triangulation->get_communicator());
-
const types::subdomain_id subdomain_id =
triangulation->locally_owned_subdomain();
// --------- Phase 4: shift indices so that each processor has a unique
// range of indices
- std::vector<dealii::types::global_dof_index>
- n_locally_owned_dofs_per_processor(n_cpus);
-
- const int ierr =
- MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs),
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- n_locally_owned_dofs_per_processor.data(),
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- triangulation->get_communicator());
+ dealii::types::global_dof_index my_shift = 0;
+ const int ierr =
+ MPI_Exscan(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs),
+ &my_shift,
+ 1,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ MPI_SUM,
+ triangulation->get_communicator());
AssertThrowMPI(ierr);
- const dealii::types::global_dof_index my_shift =
- std::accumulate(n_locally_owned_dofs_per_processor.begin(),
- n_locally_owned_dofs_per_processor.begin() +
- subdomain_id,
- static_cast<dealii::types::global_dof_index>(0));
-
// make dof indices globally consecutive
for (auto &new_index : renumbering)
if (new_index != numbers::invalid_dof_index)
// now a little bit of housekeeping
const dealii::types::global_dof_index n_global_dofs =
- std::accumulate(n_locally_owned_dofs_per_processor.begin(),
- n_locally_owned_dofs_per_processor.end(),
- dealii::types::global_dof_index(0));
-
- std::vector<IndexSet> locally_owned_dofs_per_processor(
- n_cpus, IndexSet(n_global_dofs));
- {
- dealii::types::global_dof_index current_shift = 0;
- for (unsigned int i = 0; i < n_cpus; ++i)
- {
- locally_owned_dofs_per_processor[i].add_range(
- current_shift,
- current_shift + n_locally_owned_dofs_per_processor[i]);
- current_shift += n_locally_owned_dofs_per_processor[i];
- }
- }
- NumberCache number_cache(locally_owned_dofs_per_processor,
- triangulation->locally_owned_subdomain());
- Assert(number_cache
- .locally_owned_dofs_per_processor
- [triangulation->locally_owned_subdomain()]
- .n_elements() == number_cache.n_locally_owned_dofs,
- ExcInternalError());
- Assert(
- !number_cache
- .locally_owned_dofs_per_processor[triangulation
- ->locally_owned_subdomain()]
- .n_elements() ||
- number_cache
- .locally_owned_dofs_per_processor[triangulation
- ->locally_owned_subdomain()]
- .nth_index_in_set(0) == my_shift,
- ExcInternalError());
+ Utilities::MPI::sum(n_locally_owned_dofs,
+ triangulation->get_communicator());
+
+ NumberCache number_cache;
+ number_cache.n_global_dofs = n_global_dofs;
+ number_cache.n_locally_owned_dofs = n_locally_owned_dofs;
+ number_cache.locally_owned_dofs = IndexSet(n_global_dofs);
+ number_cache.locally_owned_dofs.add_range(my_shift,
+ my_shift +
+ n_locally_owned_dofs);
+ number_cache.locally_owned_dofs.compress();
// this ends the phase where we enumerate degrees of freedom on
// each processor. what is missing is communicating DoF indices
"Triangulation if the flag construct_multigrid_hierarchy "
"is set in the constructor."));
-
- const unsigned int n_cpus =
- Utilities::MPI::n_mpi_processes(triangulation->get_communicator());
-
// loop over all levels that exist globally (across all
// processors), even if the current processor does not in fact
// have any cells on that level or if the local part of the
}
}
- // TODO: make this code simpler with the new constructors of
- // NumberCache make indices consecutive
level_number_cache.n_locally_owned_dofs = 0;
for (types::global_dof_index &index : renumbering)
if (index != numbers::invalid_dof_index)
//* 3. communicate local dofcount and shift ids to make
// them unique
- level_number_cache.n_locally_owned_dofs_per_processor.resize(
- n_cpus);
-
- int ierr = MPI_Allgather(
- &level_number_cache.n_locally_owned_dofs,
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- level_number_cache.n_locally_owned_dofs_per_processor.data(),
- 1,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- triangulation->get_communicator());
+ dealii::types::global_dof_index my_shift = 0;
+ const int ierr =
+ MPI_Exscan(DEAL_II_MPI_CONST_CAST(
+ &level_number_cache.n_locally_owned_dofs),
+ &my_shift,
+ 1,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ MPI_SUM,
+ triangulation->get_communicator());
AssertThrowMPI(ierr);
- const dealii::types::global_dof_index shift = std::accumulate(
- level_number_cache.n_locally_owned_dofs_per_processor.begin(),
- level_number_cache.n_locally_owned_dofs_per_processor.begin() +
- triangulation->locally_owned_subdomain(),
- static_cast<dealii::types::global_dof_index>(0));
for (types::global_dof_index &index : renumbering)
if (index != numbers::invalid_dof_index)
- index += shift;
+ index += my_shift;
// now re-enumerate all dofs to this shifted and condensed
// numbering form. we renumber some dofs as invalid, so
renumbering, IndexSet(0), *dof_handler, level, false);
// now a little bit of housekeeping
- level_number_cache.n_global_dofs = std::accumulate(
- level_number_cache.n_locally_owned_dofs_per_processor.begin(),
- level_number_cache.n_locally_owned_dofs_per_processor.end(),
- static_cast<dealii::types::global_dof_index>(0));
+ level_number_cache.n_global_dofs =
+ Utilities::MPI::sum(level_number_cache.n_locally_owned_dofs,
+ triangulation->get_communicator());
level_number_cache.locally_owned_dofs =
IndexSet(level_number_cache.n_global_dofs);
level_number_cache.locally_owned_dofs.add_range(
- shift, shift + level_number_cache.n_locally_owned_dofs);
+ my_shift, my_shift + level_number_cache.n_locally_owned_dofs);
level_number_cache.locally_owned_dofs.compress();
- // fill global_dof_indexsets
- level_number_cache.locally_owned_dofs_per_processor.resize(n_cpus);
- {
- dealii::types::global_dof_index current_shift = 0;
- for (unsigned int i = 0; i < n_cpus; ++i)
- {
- level_number_cache.locally_owned_dofs_per_processor[i] =
- IndexSet(level_number_cache.n_global_dofs);
- level_number_cache.locally_owned_dofs_per_processor[i]
- .add_range(current_shift,
- current_shift +
- level_number_cache
- .n_locally_owned_dofs_per_processor[i]);
- current_shift +=
- level_number_cache.n_locally_owned_dofs_per_processor[i];
- }
- }
- Assert(level_number_cache
- .locally_owned_dofs_per_processor
- [triangulation->locally_owned_subdomain()]
- .n_elements() == level_number_cache.n_locally_owned_dofs,
- ExcInternalError());
- Assert(!level_number_cache
- .locally_owned_dofs_per_processor
- [triangulation->locally_owned_subdomain()]
- .n_elements() ||
- level_number_cache
- .locally_owned_dofs_per_processor
- [triangulation->locally_owned_subdomain()]
- .nth_index_in_set(0) == shift,
- ExcInternalError());
-
number_caches.emplace_back(level_number_cache);
}
*dof_handler,
/*check_validity=*/false);
- // Since we have not updated the number cache yet, we can use the
- // index sets contained in the DoFHandler at this stage.
- return NumberCache(dof_handler->locally_owned_dofs_per_processor(),
- Utilities::MPI::this_mpi_process(
- triangulation->get_communicator()));
+ NumberCache number_cache;
+ number_cache.locally_owned_dofs = dof_handler->locally_owned_dofs();
+ number_cache.n_global_dofs = dof_handler->n_dofs();
+ number_cache.n_locally_owned_dofs =
+ number_cache.locally_owned_dofs.n_elements();
+ return number_cache;
}
else
{
triangulation->load_user_flags(user_flags);
}
- // the last step is to update the NumberCache, including knowing
- // which processor owns which DoF index. this requires
- // communication.
- //
- // this step is substantially more complicated than it is in
- // distribute_dofs() in case the IndexSets of locally owned DoFs
- // after renumbering are not contiguous any more (which we have done
- // at the top of this function). for distribute_dofs() it was enough
- // to exchange the starting indices for each processor and the
- // global number of DoFs, but here we actually have to serialize the
- // IndexSet objects and shop them across the network.
- const unsigned int n_cpus = Utilities::MPI::n_mpi_processes(
- triangulation->get_communicator());
- std::vector<IndexSet> locally_owned_dofs_per_processor(
- n_cpus, IndexSet(dof_handler->n_dofs()));
- // serialize our own IndexSet
- std::vector<char> my_data;
- {
-# ifdef DEAL_II_WITH_ZLIB
-
- boost::iostreams::filtering_ostream out;
- out.push(
- boost::iostreams::gzip_compressor(boost::iostreams::gzip_params(
- boost::iostreams::gzip::best_compression)));
- out.push(boost::iostreams::back_inserter(my_data));
-
- boost::archive::binary_oarchive archive(out);
-
- archive << my_locally_owned_new_dof_indices;
- out.flush();
-# else
- std::ostringstream out;
- boost::archive::binary_oarchive archive(out);
- archive << my_locally_owned_new_dof_indices;
- const std::string &s = out.str();
- my_data.reserve(s.size());
- my_data.assign(s.begin(), s.end());
-# endif
- }
-
- // determine maximum size of IndexSet
- const unsigned int max_size =
- Utilities::MPI::max(my_data.size(),
- triangulation->get_communicator());
-
- // as the MPI_Allgather call will be reading max_size elements, and
- // as this may be past the end of my_data, we need to increase the
- // size of the local buffer. This is filled with zeros.
- my_data.resize(max_size);
-
- std::vector<char> buffer(max_size * n_cpus);
- const int ierr = MPI_Allgather(my_data.data(),
- max_size,
- MPI_BYTE,
- buffer.data(),
- max_size,
- MPI_BYTE,
- triangulation->get_communicator());
- AssertThrowMPI(ierr);
-
- for (unsigned int i = 0; i < n_cpus; ++i)
- if (i == Utilities::MPI::this_mpi_process(
- triangulation->get_communicator()))
- locally_owned_dofs_per_processor[i] =
- my_locally_owned_new_dof_indices;
- else
- {
- // copy the data previously received into a stringstream
- // object and then read the IndexSet from it
- std::string decompressed_buffer;
-
- // first decompress the buffer
- {
-# ifdef DEAL_II_WITH_ZLIB
-
- boost::iostreams::filtering_ostream decompressing_stream;
- decompressing_stream.push(
- boost::iostreams::gzip_decompressor());
- decompressing_stream.push(
- boost::iostreams::back_inserter(decompressed_buffer));
-
- decompressing_stream.write(&buffer[i * max_size], max_size);
-# else
- decompressed_buffer.assign(&buffer[i * max_size], max_size);
-# endif
- }
-
- // then restore the object from the buffer
- std::istringstream in(decompressed_buffer);
- boost::archive::binary_iarchive archive(in);
-
- archive >> locally_owned_dofs_per_processor[i];
- }
-
- return NumberCache(locally_owned_dofs_per_processor,
- Utilities::MPI::this_mpi_process(
- triangulation->get_communicator()));
+ NumberCache number_cache;
+ number_cache.locally_owned_dofs = my_locally_owned_new_dof_indices;
+ number_cache.n_global_dofs = dof_handler->n_dofs();
+ number_cache.n_locally_owned_dofs =
+ number_cache.locally_owned_dofs.n_elements();
+ return number_cache;
}
#endif
}
// we only implement the case where the multigrid numbers are
// renumbered within the processor's partition, rather than the most
// general case
- const std::vector<IndexSet> &index_sets =
- dof_handler->locally_owned_mg_dofs_per_processor(level);
+ const IndexSet index_set = dof_handler->locally_owned_mg_dofs(level);
constexpr int dim = DoFHandlerType::dimension;
constexpr int spacedim = DoFHandlerType::space_dimension;
# ifdef DEBUG
for (types::global_dof_index i : new_numbers)
{
- Assert(index_sets[my_rank].is_element(i),
+ Assert(index_set.is_element(i),
ExcNotImplemented(
"Renumberings that change the locally owned mg dofs "
"partitioning are currently not implemented for "
std::vector<types::global_dof_index> ghosted_new_numbers(
relevant_dofs.n_elements());
{
- Utilities::MPI::Partitioner partitioner(index_sets[my_rank],
+ Utilities::MPI::Partitioner partitioner(index_set,
relevant_dofs,
tr->get_communicator());
std::vector<types::global_dof_index> temp_array(
Assert(false, ExcNotImplemented());
#endif
- return NumberCache(
- index_sets, Utilities::MPI::this_mpi_process(tr->get_communicator()));
+ NumberCache number_cache;
+ number_cache.locally_owned_dofs = index_set;
+ number_cache.n_global_dofs = dof_handler->n_dofs();
+ number_cache.n_locally_owned_dofs =
+ number_cache.locally_owned_dofs.n_elements();
+ return number_cache;
}
} // namespace Policy
} // namespace DoFHandlerImplementation
for (unsigned int c = 0; c < n_buckets; ++c)
local_dof_count[c] = component_to_dof_map[c].size();
-
- // gather information from all CPUs
- std::vector<types::global_dof_index> all_dof_counts(
- fe_collection.n_components() *
- Utilities::MPI::n_mpi_processes(tria->get_communicator()));
-
- const int ierr = MPI_Allgather(local_dof_count.data(),
- n_buckets,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- all_dof_counts.data(),
- n_buckets,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- tria->get_communicator());
+ std::vector<types::global_dof_index> prefix_dof_count(n_buckets);
+ const int ierr = MPI_Exscan(local_dof_count.data(),
+ prefix_dof_count.data(),
+ n_buckets,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ MPI_SUM,
+ tria->get_communicator());
AssertThrowMPI(ierr);
- for (unsigned int i = 0; i < n_buckets; ++i)
- Assert(
- all_dof_counts[n_buckets * tria->locally_owned_subdomain() + i] ==
- local_dof_count[i],
- ExcInternalError());
+ std::vector<types::global_dof_index> global_dof_count(n_buckets);
+ Utilities::MPI::sum(local_dof_count,
+ tria->get_communicator(),
+ global_dof_count);
// calculate shifts
- unsigned int cumulated = 0;
+ types::global_dof_index cumulated = 0;
for (unsigned int c = 0; c < n_buckets; ++c)
{
- shifts[c] = cumulated;
- for (types::subdomain_id i = 0; i < tria->locally_owned_subdomain();
- ++i)
- shifts[c] += all_dof_counts[c + n_buckets * i];
- for (unsigned int i = 0;
- i < Utilities::MPI::n_mpi_processes(tria->get_communicator());
- ++i)
- cumulated += all_dof_counts[c + n_buckets * i];
+ shifts[c] = prefix_dof_count[c] + cumulated;
+ cumulated += global_dof_count[c];
}
#else
(void)tria;
for (unsigned int c = 0; c < n_buckets; ++c)
local_dof_count[c] = block_to_dof_map[c].size();
-
- // gather information from all CPUs
- std::vector<types::global_dof_index> all_dof_counts(
- fe_collection.n_components() *
- Utilities::MPI::n_mpi_processes(tria->get_communicator()));
-
- const int ierr = MPI_Allgather(local_dof_count.data(),
- n_buckets,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- all_dof_counts.data(),
- n_buckets,
- DEAL_II_DOF_INDEX_MPI_TYPE,
- tria->get_communicator());
+ std::vector<types::global_dof_index> prefix_dof_count(n_buckets);
+ const int ierr = MPI_Exscan(local_dof_count.data(),
+ prefix_dof_count.data(),
+ n_buckets,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ MPI_SUM,
+ tria->get_communicator());
AssertThrowMPI(ierr);
- for (unsigned int i = 0; i < n_buckets; ++i)
- Assert(
- all_dof_counts[n_buckets * tria->locally_owned_subdomain() + i] ==
- local_dof_count[i],
- ExcInternalError());
+ std::vector<types::global_dof_index> global_dof_count(n_buckets);
+ Utilities::MPI::sum(local_dof_count,
+ tria->get_communicator(),
+ global_dof_count);
// calculate shifts
types::global_dof_index cumulated = 0;
for (unsigned int c = 0; c < n_buckets; ++c)
{
- shifts[c] = cumulated;
- for (types::subdomain_id i = 0; i < tria->locally_owned_subdomain();
- ++i)
- shifts[c] += all_dof_counts[c + n_buckets * i];
- for (unsigned int i = 0;
- i < Utilities::MPI::n_mpi_processes(tria->get_communicator());
- ++i)
- cumulated += all_dof_counts[c + n_buckets * i];
+ shifts[c] = prefix_dof_count[c] + cumulated;
+ cumulated += global_dof_count[c];
}
#else
(void)tria;
// ---------------------------------------------------------------------
#include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/mpi.h>
#include <deal.II/dofs/number_cache.h>
locally_owned_dofs_per_processor.clear();
}
+
+
+ std::vector<types::global_dof_index>
+ NumberCache::get_n_locally_owned_dofs_per_processor(
+ const MPI_Comm mpi_communicator) const
+ {
+ const unsigned int n_procs =
+ Utilities::MPI::job_supports_mpi() ?
+ Utilities::MPI::n_mpi_processes(mpi_communicator) :
+ 1;
+ if (n_global_dofs == 0)
+ return std::vector<types::global_dof_index>();
+ else if (n_locally_owned_dofs_per_processor.empty() == false)
+ {
+ AssertDimension(n_locally_owned_dofs_per_processor.size(), n_procs);
+ return n_locally_owned_dofs_per_processor;
+ }
+ else
+ {
+ std::vector<types::global_dof_index> result(n_procs,
+ n_locally_owned_dofs);
+#ifdef DEAL_II_WITH_MPI
+ if (n_procs > 1)
+ MPI_Allgather(DEAL_II_MPI_CONST_CAST(&n_locally_owned_dofs),
+ 1,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ result.data(),
+ 1,
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ mpi_communicator);
+#endif
+ return result;
+ }
+ }
+
+
+
+ std::vector<IndexSet>
+ NumberCache::get_locally_owned_dofs_per_processor(
+ const MPI_Comm mpi_communicator) const
+ {
+ AssertDimension(locally_owned_dofs.size(), n_global_dofs);
+ const unsigned int n_procs =
+ Utilities::MPI::job_supports_mpi() ?
+ Utilities::MPI::n_mpi_processes(mpi_communicator) :
+ 1;
+ if (n_global_dofs == 0)
+ return std::vector<IndexSet>();
+ else if (locally_owned_dofs_per_processor.empty() == false)
+ {
+ AssertDimension(locally_owned_dofs_per_processor.size(), n_procs);
+ return locally_owned_dofs_per_processor;
+ }
+ else
+ {
+ std::vector<IndexSet> locally_owned_dofs_per_processor(
+ n_procs, locally_owned_dofs);
+
+#ifdef DEAL_II_WITH_MPI
+ if (n_procs > 1)
+ {
+ // this step is substantially more complicated because indices
+ // might be distributed arbitrarily among the processors. Here we
+ // have to serialize the IndexSet objects and shop them across the
+ // network.
+ std::vector<char> my_data;
+ {
+# ifdef DEAL_II_WITH_ZLIB
+
+ boost::iostreams::filtering_ostream out;
+ out.push(boost::iostreams::gzip_compressor(
+ boost::iostreams::gzip_params(
+ boost::iostreams::gzip::best_compression)));
+ out.push(boost::iostreams::back_inserter(my_data));
+
+ boost::archive::binary_oarchive archive(out);
+
+ archive << locally_owned_dofs;
+ out.flush();
+# else
+ std::ostringstream out;
+ boost::archive::binary_oarchive archive(out);
+ archive << locally_owned_dofs;
+ const std::string &s = out.str();
+ my_data.reserve(s.size());
+ my_data.assign(s.begin(), s.end());
+# endif
+ }
+
+ // determine maximum size of IndexSet
+ const unsigned int max_size =
+ Utilities::MPI::max(my_data.size(), mpi_communicator);
+
+ // as the MPI_Allgather call will be reading max_size elements,
+ // and as this may be past the end of my_data, we need to increase
+ // the size of the local buffer. This is filled with zeros.
+ my_data.resize(max_size);
+
+ std::vector<char> buffer(max_size * n_procs);
+ const int ierr = MPI_Allgather(my_data.data(),
+ max_size,
+ MPI_BYTE,
+ buffer.data(),
+ max_size,
+ MPI_BYTE,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ for (unsigned int i = 0; i < n_procs; ++i)
+ if (i == Utilities::MPI::this_mpi_process(mpi_communicator))
+ locally_owned_dofs_per_processor[i] = locally_owned_dofs;
+ else
+ {
+ // copy the data previously received into a stringstream
+ // object and then read the IndexSet from it
+ std::string decompressed_buffer;
+
+ // first decompress the buffer
+ {
+# ifdef DEAL_II_WITH_ZLIB
+
+ boost::iostreams::filtering_ostream decompressing_stream;
+ decompressing_stream.push(
+ boost::iostreams::gzip_decompressor());
+ decompressing_stream.push(
+ boost::iostreams::back_inserter(decompressed_buffer));
+
+ decompressing_stream.write(&buffer[i * max_size],
+ max_size);
+# else
+ decompressed_buffer.assign(&buffer[i * max_size],
+ max_size);
+# endif
+ }
+
+ // then restore the object from the buffer
+ std::istringstream in(decompressed_buffer);
+ boost::archive::binary_iarchive archive(in);
+
+ archive >> locally_owned_dofs_per_processor[i];
+ }
+ }
+#endif
+ return locally_owned_dofs_per_processor;
+ }
+ }
+
+
std::size_t
NumberCache::memory_consumption() const
{
// Make indices global by getting the number of vertices owned by each
// processors and shifting the indices accordingly
- const unsigned int n_cpu =
- Utilities::MPI::n_mpi_processes(triangulation.get_communicator());
- std::vector<types::global_vertex_index> indices(n_cpu);
- int ierr = MPI_Allgather(&next_index,
- 1,
- DEAL_II_VERTEX_INDEX_MPI_TYPE,
- indices.data(),
- 1,
- DEAL_II_VERTEX_INDEX_MPI_TYPE,
- triangulation.get_communicator());
+ types::global_dof_index shift = 0;
+ int ierr = MPI_Exscan(&next_index,
+ &shift,
+ 1,
+ DEAL_II_VERTEX_INDEX_MPI_TYPE,
+ MPI_SUM,
+ triangulation.get_communicator());
AssertThrowMPI(ierr);
- Assert(indices.begin() + triangulation.locally_owned_subdomain() <
- indices.end(),
- ExcInternalError());
- const types::global_vertex_index shift =
- std::accumulate(indices.begin(),
- indices.begin() + triangulation.locally_owned_subdomain(),
- types::global_vertex_index(0));
std::map<unsigned int, types::global_vertex_index>::iterator
global_index_it = local_to_global_vertex_index.begin(),
// The list of neighbors is symmetric (our neighbors have us as a
// neighbor), so we can use it to send and to know how many messages
// we will get.
+ std::vector<std::vector<IndexSet>>
+ locally_owned_mg_dofs_per_processor;
+ for (unsigned int l = 0; l < tria->n_global_levels(); ++l)
+ locally_owned_mg_dofs_per_processor.push_back(
+ mg_dof.locally_owned_mg_dofs_per_processor(l));
+
const std::set<types::subdomain_id> &neighbors =
tria->level_ghost_owners();
std::map<int, std::vector<DoFPair>> send_data;
std::set<types::subdomain_id>::iterator it;
for (it = neighbors.begin(); it != neighbors.end(); ++it)
{
- if (mg_dof
- .locally_owned_mg_dofs_per_processor(dofpair.level)[*it]
+ if (locally_owned_mg_dofs_per_processor[dofpair.level][*it]
.is_element(dofpair.level_dof_index))
{
send_data[*it].push_back(dofpair);
// Compute # of locally owned MG dofs / processor for distribution
const std::vector<::dealii::IndexSet>
- &locally_owned_mg_dofs_per_processor =
+ locally_owned_mg_dofs_per_processor =
mg_dof.locally_owned_mg_dofs_per_processor(level + 1);
std::vector<::dealii::types::global_dof_index>
n_locally_owned_mg_dofs_per_processor(
<< std::endl;
deallog << "n_locally_owned_dofs_per_processor: ";
- std::vector<types::global_dof_index> v =
+ const std::vector<types::global_dof_index> v =
dof_handler.n_locally_owned_dofs_per_processor();
unsigned int sum = 0;
for (unsigned int i = 0; i < v.size(); ++i)
const unsigned int N = dof_handler.n_dofs();
Assert(dof_handler.n_locally_owned_dofs() <= N, ExcInternalError());
- Assert(
- std::accumulate(dof_handler.n_locally_owned_dofs_per_processor().begin(),
- dof_handler.n_locally_owned_dofs_per_processor().end(),
- 0U) == N,
- ExcInternalError());
+ const std::vector<types::global_dof_index> n_owned_dofs =
+ dof_handler.n_locally_owned_dofs_per_processor();
+ Assert(std::accumulate(n_owned_dofs.begin(), n_owned_dofs.end(), 0U) == N,
+ ExcInternalError());
+ const std::vector<IndexSet> owned_dofs =
+ dof_handler.locally_owned_dofs_per_processor();
IndexSet all(N);
- for (unsigned int i = 0;
- i < dof_handler.locally_owned_dofs_per_processor().size();
- ++i)
+ for (unsigned int i = 0; i < owned_dofs.size(); ++i)
{
- IndexSet intersect =
- all & dof_handler.locally_owned_dofs_per_processor()[i];
+ IndexSet intersect = all & owned_dofs[i];
Assert(intersect.n_elements() == 0, ExcInternalError());
- all.add_indices(dof_handler.locally_owned_dofs_per_processor()[i]);
+ all.add_indices(owned_dofs[i]);
}
Assert(all == complete_index_set(N), ExcInternalError());