*/
static constexpr unsigned int sparsity_factor = 4;
+
+ /**
+ * Set up the dictionary by computing the partitioning from the
+ * global size and sending the rank information on locally owned
+ * ranges to the owner of the dictionary part.
+ */
+ Dictionary(const IndexSet &owned_indices, const MPI_Comm comm);
+
/**
* A vector with as many entries as there are dofs in the dictionary
* of the current process, and each entry containing the rank of the
*/
unsigned int stride_small_size;
- /**
- * Set up the dictionary by computing the partitioning from the
- * global size and sending the rank information on locally owned
- * ranges to the owner of the dictionary part.
- */
- void
- reinit(const IndexSet &owned_indices, const MPI_Comm comm);
-
/**
* Translate a global dof index to the MPI rank in the dictionary
* using `dofs_per_process`. We multiply by `stride_small_size` to
*/
std::vector<unsigned int> &owning_ranks;
+ /**
+ * The dictionary handling the requests.
+ */
+ Dictionary dict;
+
/**
* Keeps track of the origin of the requests. The layout of the data
* structure is as follows: The outermost vector has as many entries
types::global_dof_index>>>>>
requesters;
- /**
- * The dictionary handling the requests.
- */
- Dictionary dict;
-
/**
* Array to collect the indices to look up (first vector) and their
* local index among indices (second vector), sorted by the rank in
- void
- Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm comm)
+ Dictionary::Dictionary(const IndexSet &owned_indices,
+ const MPI_Comm comm)
{
// 1) set up the partition
this->partition(owned_indices, comm);
, n_procs(n_mpi_processes(comm))
, track_index_requesters(track_index_requesters)
, owning_ranks(owning_ranks)
- {
- dict.reinit(owned_indices, comm);
- requesters.resize(dict.actually_owning_rank_list.size());
- }
+ , dict(owned_indices, comm)
+ , requesters(dict.actually_owning_rank_list.size())
+ {}