*/
Partitioner(const unsigned int size);
+ /**
+ * Constructor that takes the number of locally-owned degrees of freedom
+ * @p local_size and the number of ghost degrees of freedom @p ghost_size.
+ *
+ * The local index range is translated to global indices in an ascending
+ * and one-to-one fashion, i.e., the indices of process $p$ sit exactly
+ * between the indices of the processes $p-1$ and $p+1$, respectively.
+ *
+ * @note Setting the @p ghost_size variable to an appropriate value
+ * provides memory space for the ghost data in a vector's memory
+ * allocation as and allows access to it via local_element(). However,
+ * the associated global indices must be handled externally in this
+ * case.
+ */
+ Partitioner(const types::global_dof_index local_size,
+ const types::global_dof_index ghost_size,
+ const MPI_Comm communicator);
+
/**
* Constructor with index set arguments. This constructor creates a
* distributed layout based on a given communicators, an IndexSet
reinit(
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+ /**
+ * Initialize vector with @p local_size locally-owned and @p ghost_size
+ * ghost degrees of freedoms.
+ *
+ * @note In the created underlying partitioner, the local index range is
+ * translated to global indices in an ascending and one-to-one fashion,
+ * i.e., the indices of process $p$ sit exactly between the indices of
+ * the processes $p-1$ and $p+1$, respectively. Setting the
+ * @p ghost_size variable to an appropriate value provides memory space
+ * for the ghost data in a vector's memory allocation as and allows
+ * access to it via local_element(). However, the associated global
+ * indices must be handled externally in this case.
+ */
+ void
+ reinit(const types::global_dof_index local_size,
+ const types::global_dof_index ghost_size,
+ const MPI_Comm comm);
+
/**
* Swap the contents of this vector and the other vector @p v. One could
* do this operation with a temporary variable and copying over the data
+ template <typename Number, typename MemorySpaceType>
+ void
+ Vector<Number, MemorySpaceType>::reinit(
+ const types::global_dof_index local_size,
+ const types::global_dof_index ghost_size,
+ const MPI_Comm comm)
+ {
+ clear_mpi_requests();
+
+ // check whether we need to reallocate
+ resize_val(local_size + ghost_size);
+
+ // delete previous content in import data
+ import_data.values.reset();
+ import_data.values_dev.reset();
+
+ // create partitioner
+ partitioner = std::make_shared<Utilities::MPI::Partitioner>(local_size,
+ ghost_size,
+ comm);
+
+ this->operator=(Number());
+ }
+
+
+
template <typename Number, typename MemorySpaceType>
template <typename Number2>
void
+ Partitioner::Partitioner(const types::global_dof_index local_size,
+ const types::global_dof_index ghost_size,
+ const MPI_Comm communicator)
+ : global_size(Utilities::MPI::sum<types::global_dof_index>(local_size,
+ communicator))
+ , locally_owned_range_data(global_size)
+ , local_range_data{0, local_size}
+ , n_ghost_indices_data(ghost_size)
+ , n_import_indices_data(0)
+ , n_ghost_indices_in_larger_set(0)
+ , my_pid(Utilities::MPI::this_mpi_process(communicator))
+ , n_procs(Utilities::MPI::n_mpi_processes(communicator))
+ , communicator(communicator)
+ , have_ghost_indices(true)
+ {
+ types::global_dof_index prefix_sum = 0;
+
+#ifdef DEAL_II_WITH_MPI
+ MPI_Exscan(&local_size,
+ &prefix_sum,
+ 1,
+ Utilities::MPI::internal::mpi_type_id(&prefix_sum),
+ MPI_SUM,
+ communicator);
+#endif
+
+ local_range_data = {prefix_sum, prefix_sum + local_size};
+
+ locally_owned_range_data.add_range(prefix_sum, prefix_sum + local_size);
+ locally_owned_range_data.compress();
+ }
+
+
+
Partitioner::Partitioner(const IndexSet &locally_owned_indices,
const IndexSet &ghost_indices_in,
const MPI_Comm communicator_in)