* device_id = my_rank % n_devices;
* cudaSetDevice(device_id);
* </code>
+ *
+ * <h4>MPI-3 shared-memory support</h4>
+ *
+ * In Host mode, this class allows to use MPI-3 shared-memory features
+ * by providing a separate MPI communicator that consists of processes on
+ * the same shared-memory domain. By calling
+ * <code> vector.shared_vector_data();
+ * </code>
+ * users have read-only access to both locally-owned and ghost values of
+ * processes combined in the shared-memory communicator (@p comm_sm in
+ * reinit()).
+ *
+ * You can create a communicator consisting of all processes on
+ * the same shared-memory domain with:
+ * <code> MPI_Comm comm_sm;
+ * MPI_Comm_split_type(comm, MPI_COMM_TYPE_SHARED, rank, MPI_INFO_NULL,
+ * &comm_sm);
+ * </code>
+ *
* @see CUDAWrappers
*/
template <typename Number, typename MemorySpace = MemorySpace::Host>
/**
* Create the vector based on the parallel partitioning described in @p
- * partitioner. The input argument is a shared pointer, which store the
+ * partitioner. The input argument is a shared pointer, which stores the
* partitioner data only once and share it between several vectors with
* the same layout.
*/
/**
* Initialize the vector given to the parallel partitioning described in
- * @p partitioner. The input argument is a shared pointer, which store
+ * @p partitioner. The input argument is a shared pointer, which stores
* the partitioner data only once and share it between several vectors
* with the same layout.
+ *
+ * The optional argument @p comm_sm, which consists of processes on
+ * the same shared-memory domain, allows users have read-only access to
+ * both locally-owned and ghost values of processes combined in the
+ * shared-memory communicator.
*/
void
reinit(
- const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner);
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner,
+ const MPI_Comm &comm_sm = MPI_COMM_SELF);
/**
* Initialize vector with @p local_size locally-owned and @p ghost_size
* ghost degrees of freedoms.
*
+ * The optional argument @p comm_sm, which consists of processes on
+ * the same shared-memory domain, allows users have read-only access to
+ * both locally-owned and ghost values of processes combined in the
+ * shared-memory communicator.
+ *
* @note In the created underlying partitioner, the local index range is
* translated to global indices in an ascending and one-to-one fashion,
* i.e., the indices of process $p$ sit exactly between the indices of
void
reinit(const types::global_dof_index local_size,
const types::global_dof_index ghost_size,
- const MPI_Comm comm);
+ const MPI_Comm & comm,
+ const MPI_Comm & comm_sm = MPI_COMM_SELF);
/**
* Swap the contents of this vector and the other vector @p v. One could
void
set_ghost_state(const bool ghosted) const;
+ /**
+ * Get pointers to the beginning of the values of the other
+ * processes of the same shared-memory domain.
+ */
+ const std::vector<ArrayView<const Number>> &
+ shared_vector_data() const;
+
//@}
/**
*/
mutable std::mutex mutex;
+ /**
+ * Communicator to be used for the shared-memory domain.
+ */
+ MPI_Comm comm_sm;
+
/**
* A helper function that clears the compress_requests and
* update_ghost_values_requests field. Used in reinit() functions.
* A helper function that is used to resize the val array.
*/
void
- resize_val(const size_type new_allocated_size);
+ resize_val(const size_type new_allocated_size,
+ const MPI_Comm &comm_sm = MPI_COMM_SELF);
// Make all other vector types friends.
template <typename Number2, typename MemorySpace2>
+ template <typename Number, typename MemorySpace>
+ const std::vector<ArrayView<const Number>> &
+ Vector<Number, MemorySpace>::shared_vector_data() const
+ {
+ return data.values_sm;
+ }
+
+
+
template <typename Number, typename MemorySpace>
inline Number
Vector<Number, MemorySpace>::operator()(const size_type global_index) const
const types::global_dof_index /*new_alloc_size*/,
types::global_dof_index & /*allocated_size*/,
::dealii::MemorySpace::MemorySpaceData<Number, MemorySpaceType>
- & /*data*/)
+ & /*data*/,
+ const MPI_Comm & /*comm_sm*/)
{}
static void
resize_val(const types::global_dof_index new_alloc_size,
types::global_dof_index & allocated_size,
::dealii::MemorySpace::
- MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
+ MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data,
+ const MPI_Comm &comm_shared)
{
- if (new_alloc_size > allocated_size)
+ if (comm_shared == MPI_COMM_SELF)
{
- Assert(((allocated_size > 0 && data.values != nullptr) ||
- data.values == nullptr),
- ExcInternalError());
+ if (new_alloc_size > allocated_size)
+ {
+ Assert(((allocated_size > 0 && data.values != nullptr) ||
+ data.values == nullptr),
+ ExcInternalError());
- Number *new_val;
- Utilities::System::posix_memalign(
- reinterpret_cast<void **>(&new_val),
- 64,
- sizeof(Number) * new_alloc_size);
- data.values = {new_val, [](Number *data) { std::free(data); }};
+ Number *new_val;
+ Utilities::System::posix_memalign(
+ reinterpret_cast<void **>(&new_val),
+ 64,
+ sizeof(Number) * new_alloc_size);
+ data.values = {new_val,
+ [](Number *data) { std::free(data); }};
- allocated_size = new_alloc_size;
+ allocated_size = new_alloc_size;
+ }
+ else if (new_alloc_size == 0)
+ {
+ data.values.reset();
+ allocated_size = 0;
+ }
+ data.values_sm = {
+ ArrayView<const Number>(data.values.get(), new_alloc_size)};
}
- else if (new_alloc_size == 0)
+ else
{
- data.values.reset();
- allocated_size = 0;
+#ifdef DEAL_II_WITH_MPI
+# if DEAL_II_MPI_VERSION_GTE(3, 0)
+ allocated_size = new_alloc_size;
+
+ const unsigned int size_sm =
+ Utilities::MPI::n_mpi_processes(comm_shared);
+ const unsigned int rank_sm =
+ Utilities::MPI::this_mpi_process(comm_shared);
+
+ MPI_Win mpi_window;
+ Number *data_this;
+
+
+ std::vector<Number *> others(size_sm);
+
+ MPI_Info info;
+ MPI_Info_create(&info);
+
+ MPI_Info_set(info, "alloc_shared_noncontig", "true");
+
+ const std::size_t align_by = 64;
+
+ std::size_t s =
+ ((new_alloc_size * sizeof(Number) + align_by - 1) /
+ sizeof(Number)) *
+ sizeof(Number);
+
+ auto ierr = MPI_Win_allocate_shared(
+ s, sizeof(Number), info, comm_shared, &data_this, &mpi_window);
+ AssertThrowMPI(ierr);
+
+ for (unsigned int i = 0; i < size_sm; i++)
+ {
+ int disp_unit;
+ MPI_Aint ssize;
+ const auto ierr = MPI_Win_shared_query(
+ mpi_window, i, &ssize, &disp_unit, &others[i]);
+ AssertThrowMPI(ierr);
+ }
+
+ Number *ptr_unaligned = others[rank_sm];
+ Number *ptr_aligned = ptr_unaligned;
+
+ AssertThrow(std::align(align_by,
+ new_alloc_size * sizeof(Number),
+ reinterpret_cast<void *&>(ptr_aligned),
+ s) != nullptr,
+ ExcNotImplemented());
+
+ unsigned int n_align_local = ptr_aligned - ptr_unaligned;
+ std::vector<unsigned int> n_align_sm(size_sm);
+
+ ierr = MPI_Allgather(&n_align_local,
+ 1,
+ MPI_UNSIGNED,
+ n_align_sm.data(),
+ 1,
+ MPI_UNSIGNED,
+ comm_shared);
+ AssertThrowMPI(ierr);
+
+ for (unsigned int i = 0; i < size_sm; i++)
+ others[i] += n_align_sm[i];
+
+ std::vector<unsigned int> new_alloc_sizes(size_sm);
+
+ ierr = MPI_Allgather(&new_alloc_size,
+ 1,
+ MPI_UNSIGNED,
+ new_alloc_sizes.data(),
+ 1,
+ MPI_UNSIGNED,
+ comm_shared);
+ AssertThrowMPI(ierr);
+
+ data.values_sm.resize(size_sm);
+ for (unsigned int i = 0; i < size_sm; i++)
+ data.values_sm[i] =
+ ArrayView<const Number>(others[i], new_alloc_sizes[i]);
+
+ data.values = {ptr_aligned, [mpi_window](Number *) mutable {
+ // note: we are creating here a copy of the
+ // window other approaches led to segmentation
+ // faults
+ const auto ierr = MPI_Win_free(&mpi_window);
+ AssertThrowMPI(ierr);
+ }};
+# else
+ AssertThrow(false,
+ ExcMessage(
+ "Sorry, this feature requires MPI 3.0 support"));
+# endif
+#else
+ Assert(false, ExcInternalError());
+#endif
}
}
resize_val(const types::global_dof_index new_alloc_size,
types::global_dof_index & allocated_size,
::dealii::MemorySpace::
- MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data)
+ MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data,
+ const MPI_Comm &comm_sm)
{
+ (void)comm_sm;
+
static_assert(
std::is_same<Number, float>::value ||
std::is_same<Number, double>::value,
template <typename Number, typename MemorySpaceType>
void
- Vector<Number, MemorySpaceType>::resize_val(const size_type new_alloc_size)
+ Vector<Number, MemorySpaceType>::resize_val(const size_type new_alloc_size,
+ const MPI_Comm &comm_sm)
{
internal::la_parallel_vector_templates_functions<
Number,
- MemorySpaceType>::resize_val(new_alloc_size, allocated_size, data);
+ MemorySpaceType>::resize_val(new_alloc_size,
+ allocated_size,
+ data,
+ comm_sm);
thread_loop_partitioner =
std::make_shared<::dealii::parallel::internal::TBBPartitioner>();
clear_mpi_requests();
// check whether we need to reallocate
- resize_val(size);
+ resize_val(size, comm_sm);
// delete previous content in import data
import_data.values.reset();
Vector<Number, MemorySpaceType>::reinit(
const types::global_dof_index local_size,
const types::global_dof_index ghost_size,
- const MPI_Comm comm)
+ const MPI_Comm & comm,
+ const MPI_Comm & comm_sm)
{
clear_mpi_requests();
+ this->comm_sm = comm_sm;
+
// check whether we need to reallocate
- resize_val(local_size + ghost_size);
+ resize_val(local_size + ghost_size, comm_sm);
// delete previous content in import data
import_data.values.reset();
clear_mpi_requests();
Assert(v.partitioner.get() != nullptr, ExcNotInitialized());
+ this->comm_sm = v.comm_sm;
+
// check whether the partitioners are
// different (check only if the are allocated
// differently, not if the actual data is
partitioner = v.partitioner;
const size_type new_allocated_size =
partitioner->local_size() + partitioner->n_ghost_indices();
- resize_val(new_allocated_size);
+ resize_val(new_allocated_size, this->comm_sm);
}
if (omit_zeroing_entries == false)
template <typename Number, typename MemorySpaceType>
void
Vector<Number, MemorySpaceType>::reinit(
- const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_in)
+ const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_in,
+ const MPI_Comm & comm_sm)
{
clear_mpi_requests();
partitioner = partitioner_in;
+ this->comm_sm = comm_sm;
+
// set vector size and allocate memory
const size_type new_allocated_size =
partitioner->local_size() + partitioner->n_ghost_indices();
- resize_val(new_allocated_size);
+ resize_val(new_allocated_size, comm_sm);
// initialize to zero
*this = Number();
Vector<Number, MemorySpaceType>::Vector()
: partitioner(std::make_shared<Utilities::MPI::Partitioner>())
, allocated_size(0)
+ , comm_sm(MPI_COMM_SELF)
{
reinit(0);
}
: Subscriptor()
, allocated_size(0)
, vector_is_ghosted(false)
+ , comm_sm(MPI_COMM_SELF)
{
reinit(v, true);
const MPI_Comm communicator)
: allocated_size(0)
, vector_is_ghosted(false)
+ , comm_sm(MPI_COMM_SELF)
{
reinit(local_range, ghost_indices, communicator);
}
const MPI_Comm communicator)
: allocated_size(0)
, vector_is_ghosted(false)
+ , comm_sm(MPI_COMM_SELF)
{
reinit(local_range, communicator);
}
Vector<Number, MemorySpaceType>::Vector(const size_type size)
: allocated_size(0)
, vector_is_ghosted(false)
+ , comm_sm(MPI_COMM_SELF)
{
reinit(size, false);
}
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner)
: allocated_size(0)
, vector_is_ghosted(false)
+ , comm_sm(MPI_COMM_SELF)
{
reinit(partitioner);
}
// the same local range but different ghost layout
bool must_update_ghost_values = c.vector_is_ghosted;
+ this->comm_sm = c.comm_sm;
+
// check whether the two vectors use the same parallel partitioner. if
// not, check if all local ranges are the same (that way, we can
// exchange data between different parallel layouts). One variant which