Partitioner(const MPI_Comm &comm,
const MPI_Comm &comm_sm,
const IndexSet &is_locally_owned,
- const IndexSet &is_locally_ghost)
- : comm(comm)
- , comm_sm(comm_sm)
- {
- reinit(is_locally_owned, is_locally_ghost);
- }
+ const IndexSet &is_locally_ghost);
const MPI_Comm &
- get_mpi_communicator() const override
- {
- return comm;
- }
+ get_mpi_communicator() const override;
const MPI_Comm &
- get_sm_mpi_communicator() const
- {
- return comm_sm;
- }
+ get_sm_mpi_communicator() const;
void
reinit(const IndexSet &is_locally_owned,
const IndexSet &is_locally_ghost,
- const MPI_Comm &communicator) override
- {
- Assert(false, ExcNotImplemented());
- (void)is_locally_owned;
- (void)is_locally_ghost;
- (void)communicator;
- }
+ const MPI_Comm &communicator) override;
void
- reinit(const IndexSet &is_locally_owned, const IndexSet &is_locally_ghost)
- {
- this->n_local_elements = is_locally_owned.n_elements();
- this->n_ghost_elements = is_locally_ghost.n_elements();
-
- this->n_mpi_processes_ = Utilities::MPI::n_mpi_processes(comm);
-
- std::vector<unsigned int> sm_ranks(
- Utilities::MPI::n_mpi_processes(comm_sm));
-
- const unsigned int rank = Utilities::MPI::this_mpi_process(comm);
-
- MPI_Allgather(
- &rank, 1, MPI_UNSIGNED, sm_ranks.data(), 1, MPI_UNSIGNED, comm_sm);
-
- std::vector<unsigned int> owning_ranks_of_ghosts(
- is_locally_ghost.n_elements());
-
- Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
- process(is_locally_owned,
- is_locally_ghost,
- comm,
- owning_ranks_of_ghosts,
- true);
-
- Utilities::MPI::ConsensusAlgorithms::Selector<
- std::pair<types::global_dof_index, types::global_dof_index>,
- unsigned int>
- consensus_algorithm(process, comm);
- consensus_algorithm.run();
-
- {
- std::map<unsigned int, std::vector<types::global_dof_index>>
- rank_to_local_indices;
-
- for (unsigned int i = 0; i < owning_ranks_of_ghosts.size(); i++)
- rank_to_local_indices[owning_ranks_of_ghosts[i]].push_back(i);
-
- unsigned int offset = 0;
-
-
- for (const auto &rank_and_local_indices : rank_to_local_indices)
- {
- const auto ptr = std::find(sm_ranks.begin(),
- sm_ranks.end(),
- rank_and_local_indices.first);
-
- if (ptr == sm_ranks.end())
- {
- // remote process
- recv_remote_ranks.push_back(rank_and_local_indices.first);
- recv_remote_ptr.push_back(
- recv_remote_ptr.back() +
- rank_and_local_indices.second.size());
- }
- else
- {
- // shared process
- recv_sm_ranks.push_back(std::distance(sm_ranks.begin(), ptr));
- recv_sm_ptr.push_back(recv_sm_ptr.back() +
- rank_and_local_indices.second.size());
- recv_sm_offset.push_back(is_locally_owned.n_elements() +
- offset);
- }
- offset += rank_and_local_indices.second.size();
- }
- recv_remote_req.resize(recv_remote_ranks.size());
- recv_sm_req.resize(recv_sm_ranks.size());
-
- recv_sm_indices.resize(recv_sm_ptr.back());
- }
-
- {
- const auto rank_to_global_indices = process.get_requesters();
-
- for (const auto &rank_and_global_indices : rank_to_global_indices)
- {
- const auto ptr = std::find(sm_ranks.begin(),
- sm_ranks.end(),
- rank_and_global_indices.first);
-
- if (ptr == sm_ranks.end())
- {
- // remote process
- send_remote_ranks.push_back(rank_and_global_indices.first);
-
- for (const auto &i : rank_and_global_indices.second)
- send_remote_indices.push_back(
- is_locally_owned.index_within_set(i));
-
- send_remote_ptr.push_back(send_remote_indices.size());
- }
- else
- {
- // shared process
- send_sm_ranks.push_back(std::distance(sm_ranks.begin(), ptr));
-
- for (const auto &i : rank_and_global_indices.second)
- send_sm_indices.push_back(
- is_locally_owned.index_within_set(i));
-
- send_sm_ptr.push_back(send_sm_indices.size());
- }
- }
- send_remote_req.resize(send_remote_ranks.size());
- send_sm_req.resize(send_sm_ranks.size());
- }
-
- {
- for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
- MPI_Isend(send_sm_indices.data() + send_sm_ptr[i],
- send_sm_ptr[i + 1] - send_sm_ptr[i],
- MPI_UNSIGNED,
- send_sm_ranks[i],
- 2,
- comm_sm,
- send_sm_req.data() + i);
-
- for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
- MPI_Irecv(recv_sm_indices.data() + recv_sm_ptr[i],
- recv_sm_ptr[i + 1] - recv_sm_ptr[i],
- MPI_UNSIGNED,
- recv_sm_ranks[i],
- 2,
- comm_sm,
- recv_sm_req.data() + i);
-
- MPI_Waitall(recv_sm_req.size(),
- recv_sm_req.data(),
- MPI_STATUSES_IGNORE);
- MPI_Waitall(send_sm_req.size(),
- send_sm_req.data(),
- MPI_STATUSES_IGNORE);
- }
-
- {
- send_sm_offset.resize(send_sm_ranks.size());
-
- for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
- MPI_Irecv(send_sm_offset.data() + i,
- 1,
- MPI_UNSIGNED,
- send_sm_ranks[i],
- 3,
- comm_sm,
- send_sm_req.data() + i);
-
- for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
- MPI_Isend(recv_sm_offset.data() + i,
- 1,
- MPI_UNSIGNED,
- recv_sm_ranks[i],
- 3,
- comm_sm,
- recv_sm_req.data() + i);
-
- MPI_Waitall(recv_sm_req.size(),
- recv_sm_req.data(),
- MPI_STATUSES_IGNORE);
- MPI_Waitall(send_sm_req.size(),
- send_sm_req.data(),
- MPI_STATUSES_IGNORE);
- }
- }
+ reinit(const IndexSet &is_locally_owned,
+ const IndexSet &is_locally_ghost);
template <typename Number>
void
Number * data_this,
std::vector<Number *> & data_others,
dealii::AlignedVector<Number> &buffer,
- const unsigned int communication_channel = 0) const
- {
- (void)data_others;
-
- if (send_remote_ptr.back() != buffer.size())
- buffer.resize(send_remote_ptr.back());
-
- int dummy;
- for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
- MPI_Irecv(&dummy,
- 0,
- MPI_INT,
- recv_sm_ranks[i],
- communication_channel + 2,
- comm_sm,
- recv_sm_req.data() + i);
-
- for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
- MPI_Isend(&dummy,
- 0,
- MPI_INT,
- send_sm_ranks[i],
- communication_channel + 2,
- comm_sm,
- send_sm_req.data() + i);
-
- for (unsigned int i = 0; i < recv_remote_ranks.size(); i++)
- MPI_Irecv(data_this + recv_remote_ptr[i] + n_local_elements,
- recv_remote_ptr[i + 1] - recv_remote_ptr[i],
- Utilities::MPI::internal::mpi_type_id(data_this),
- recv_remote_ranks[i],
- communication_channel + 3,
- comm,
- recv_remote_req.data() + i);
-
- for (unsigned int i = 0; i < send_remote_ranks.size(); i++)
- {
- for (unsigned int j = send_remote_ptr[i];
- j < send_remote_ptr[i + 1];
- j++)
- buffer[j] = data_this[send_remote_indices[j]];
-
- MPI_Isend(buffer.data() + send_remote_ptr[i],
- send_remote_ptr[i + 1] - send_remote_ptr[i],
- Utilities::MPI::internal::mpi_type_id(data_this),
- send_remote_ranks[i],
- communication_channel + 3,
- comm,
- send_remote_req.data() + i);
- }
- }
+ const unsigned int communication_channel = 0) const;
template <typename Number>
void
update_ghost_values_finish(Number * data_this,
std::vector<Number *> & data_others,
- dealii::AlignedVector<Number> &buffer) const
- {
- (void)buffer;
-
- for (unsigned int c = 0; c < recv_sm_ranks.size(); c++)
- {
- int i;
- MPI_Waitany(recv_sm_req.size(),
- recv_sm_req.data(),
- &i,
- MPI_STATUS_IGNORE);
-
- const Number *__restrict__ data_others_ptr =
- data_others[recv_sm_ranks[i]];
- Number *__restrict__ data_this_ptr = data_this;
-
- for (unsigned int j = recv_sm_ptr[i], k = recv_sm_offset[i];
- j < recv_sm_ptr[i + 1];
- j++, k++)
- data_this_ptr[k] = data_others_ptr[recv_sm_indices[j]];
- }
-
- MPI_Waitall(send_sm_req.size(),
- send_sm_req.data(),
- MPI_STATUSES_IGNORE);
- MPI_Waitall(send_remote_req.size(),
- send_remote_req.data(),
- MPI_STATUSES_IGNORE);
- MPI_Waitall(recv_remote_req.size(),
- recv_remote_req.data(),
- MPI_STATUSES_IGNORE);
- }
+ dealii::AlignedVector<Number> &buffer) const;
template <typename Number>
void
update_ghost_values(Number * data_this,
std::vector<Number *> & data_others,
- dealii::AlignedVector<Number> &buffer) const
- {
- update_ghost_values_start(data_this, data_others, buffer);
- update_ghost_values_finish(data_this, data_others, buffer);
- }
+ dealii::AlignedVector<Number> &buffer) const;
template <typename Number>
void
compress_start(Number * data_this,
std::vector<Number *> & data_others,
dealii::AlignedVector<Number> &buffer,
- const unsigned int communication_channel = 0) const
- {
- (void)data_others;
-
- if (send_remote_ptr.back() != buffer.size())
- buffer.resize(send_remote_ptr.back());
-
- int dummy;
- for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
- MPI_Isend(&dummy,
- 0,
- MPI_INT,
- recv_sm_ranks[i],
- communication_channel + 1,
- comm_sm,
- recv_sm_req.data() + i);
-
- for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
- MPI_Irecv(&dummy,
- 0,
- MPI_INT,
- send_sm_ranks[i],
- communication_channel + 1,
- comm_sm,
- send_sm_req.data() + i);
-
- for (unsigned int i = 0; i < recv_remote_ranks.size(); i++)
- MPI_Isend(data_this + recv_remote_ptr[i] + n_local_elements,
- recv_remote_ptr[i + 1] - recv_remote_ptr[i],
- Utilities::MPI::internal::mpi_type_id(data_this),
- recv_remote_ranks[i],
- communication_channel + 0,
- comm,
- recv_remote_req.data() + i);
-
- for (unsigned int i = 0; i < send_remote_ranks.size(); i++)
- MPI_Irecv(buffer.data() + send_remote_ptr[i],
- send_remote_ptr[i + 1] - send_remote_ptr[i],
- Utilities::MPI::internal::mpi_type_id(data_this),
- send_remote_ranks[i],
- communication_channel + 0,
- comm,
- send_remote_req.data() + i);
- }
+ const unsigned int communication_channel = 0) const;
template <typename Number>
void
compress_finish(Number * data_this,
std::vector<Number *> & data_others,
- dealii::AlignedVector<Number> &buffer) const
- {
- for (unsigned int c = 0; c < send_sm_ranks.size(); c++)
- {
- int i;
- MPI_Waitany(send_sm_req.size(),
- send_sm_req.data(),
- &i,
- MPI_STATUS_IGNORE);
-
- Number *__restrict__ data_others_ptr =
- data_others[send_sm_ranks[i]];
- Number *__restrict__ data_this_ptr = data_this;
-
- for (unsigned int j = send_sm_ptr[i], k = send_sm_offset[i];
- j < send_sm_ptr[i + 1];
- j++, k++)
- {
- data_this_ptr[send_sm_indices[j]] += data_others_ptr[k];
- data_others_ptr[k] = 0.0;
- }
- }
-
- for (unsigned int c = 0; c < send_remote_ranks.size(); c++)
- {
- int i;
- MPI_Waitany(send_remote_req.size(),
- send_remote_req.data(),
- &i,
- MPI_STATUS_IGNORE);
-
- for (unsigned int j = send_remote_ptr[i];
- j < send_remote_ptr[i + 1];
- j++)
- data_this[send_remote_indices[j]] += buffer[j];
- }
-
- MPI_Waitall(recv_sm_req.size(),
- recv_sm_req.data(),
- MPI_STATUSES_IGNORE);
-
- MPI_Waitall(recv_remote_req.size(),
- recv_remote_req.data(),
- MPI_STATUSES_IGNORE);
- }
+ dealii::AlignedVector<Number> &buffer) const;
template <typename Number>
void
compress(Number * data_this,
std::vector<Number *> & data_others,
- dealii::AlignedVector<Number> &buffer) const
- {
- compress_start(data_this, data_others, buffer);
- compress_finish(data_this, data_others, buffer);
- }
+ dealii::AlignedVector<Number> &buffer) const;
std::size_t
- local_size() const
- {
- return n_local_elements;
- }
+ local_size() const;
std::size_t
- n_ghost_indices() const
- {
- return n_ghost_elements;
- }
+ n_ghost_indices() const;
std::size_t
- n_mpi_processes() const
- {
- return n_mpi_processes_;
- }
+ n_mpi_processes() const;
private:
const MPI_Comm &comm;
const unsigned int communication_channel,
::dealii::VectorOperation::values operation)
{
+ (void)operation;
Assert(::dealii::VectorOperation::values::add == operation,
ExcNotImplemented());
Assert(vector_is_ghosted == false,
Vector<Number, MemorySpaceType>::compress_finish(
::dealii::VectorOperation::values operation)
{
+ (void)operation;
Assert(::dealii::VectorOperation::values::add == operation,
ExcNotImplemented());
vector_is_ghosted = false;
la_vector.cc
la_parallel_vector.cc
la_parallel_block_vector.cc
+ la_sm_partitioner.cc
la_sm_vector.cc
matrix_lib.cc
matrix_out.cc
la_vector.inst.in
la_parallel_vector.inst.in
la_parallel_block_vector.inst.in
+ la_sm_partitioner.inst.in
la_sm_vector.inst.in
precondition_block.inst.in
relaxation_block.inst.in
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/lac/la_sm_partitioner.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace LinearAlgebra
+{
+ namespace SharedMPI
+ {
+ Partitioner::Partitioner(const MPI_Comm &comm,
+ const MPI_Comm &comm_sm,
+ const IndexSet &is_locally_owned,
+ const IndexSet &is_locally_ghost)
+ : comm(comm)
+ , comm_sm(comm_sm)
+ {
+ reinit(is_locally_owned, is_locally_ghost);
+ }
+
+ const MPI_Comm &
+ Partitioner::get_mpi_communicator() const
+ {
+ return comm;
+ }
+
+ const MPI_Comm &
+ Partitioner::get_sm_mpi_communicator() const
+ {
+ return comm_sm;
+ }
+
+ void
+ Partitioner::reinit(const IndexSet &is_locally_owned,
+ const IndexSet &is_locally_ghost,
+ const MPI_Comm &communicator)
+ {
+ Assert(false, ExcNotImplemented());
+ (void)is_locally_owned;
+ (void)is_locally_ghost;
+ (void)communicator;
+ }
+
+ void
+ Partitioner::reinit(const IndexSet &is_locally_owned,
+ const IndexSet &is_locally_ghost)
+ {
+ this->n_local_elements = is_locally_owned.n_elements();
+ this->n_ghost_elements = is_locally_ghost.n_elements();
+
+ this->n_mpi_processes_ = Utilities::MPI::n_mpi_processes(comm);
+
+ std::vector<unsigned int> sm_ranks(
+ Utilities::MPI::n_mpi_processes(comm_sm));
+
+ const unsigned int rank = Utilities::MPI::this_mpi_process(comm);
+
+ MPI_Allgather(
+ &rank, 1, MPI_UNSIGNED, sm_ranks.data(), 1, MPI_UNSIGNED, comm_sm);
+
+ std::vector<unsigned int> owning_ranks_of_ghosts(
+ is_locally_ghost.n_elements());
+
+ Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
+ process(is_locally_owned,
+ is_locally_ghost,
+ comm,
+ owning_ranks_of_ghosts,
+ true);
+
+ Utilities::MPI::ConsensusAlgorithms::Selector<
+ std::pair<types::global_dof_index, types::global_dof_index>,
+ unsigned int>
+ consensus_algorithm(process, comm);
+ consensus_algorithm.run();
+
+ {
+ std::map<unsigned int, std::vector<types::global_dof_index>>
+ rank_to_local_indices;
+
+ for (unsigned int i = 0; i < owning_ranks_of_ghosts.size(); i++)
+ rank_to_local_indices[owning_ranks_of_ghosts[i]].push_back(i);
+
+ unsigned int offset = 0;
+
+
+ for (const auto &rank_and_local_indices : rank_to_local_indices)
+ {
+ const auto ptr = std::find(sm_ranks.begin(),
+ sm_ranks.end(),
+ rank_and_local_indices.first);
+
+ if (ptr == sm_ranks.end())
+ {
+ // remote process
+ recv_remote_ranks.push_back(rank_and_local_indices.first);
+ recv_remote_ptr.push_back(recv_remote_ptr.back() +
+ rank_and_local_indices.second.size());
+ }
+ else
+ {
+ // shared process
+ recv_sm_ranks.push_back(std::distance(sm_ranks.begin(), ptr));
+ recv_sm_ptr.push_back(recv_sm_ptr.back() +
+ rank_and_local_indices.second.size());
+ recv_sm_offset.push_back(is_locally_owned.n_elements() +
+ offset);
+ }
+ offset += rank_and_local_indices.second.size();
+ }
+ recv_remote_req.resize(recv_remote_ranks.size());
+ recv_sm_req.resize(recv_sm_ranks.size());
+
+ recv_sm_indices.resize(recv_sm_ptr.back());
+ }
+
+ {
+ const auto rank_to_global_indices = process.get_requesters();
+
+ for (const auto &rank_and_global_indices : rank_to_global_indices)
+ {
+ const auto ptr = std::find(sm_ranks.begin(),
+ sm_ranks.end(),
+ rank_and_global_indices.first);
+
+ if (ptr == sm_ranks.end())
+ {
+ // remote process
+ send_remote_ranks.push_back(rank_and_global_indices.first);
+
+ for (const auto &i : rank_and_global_indices.second)
+ send_remote_indices.push_back(
+ is_locally_owned.index_within_set(i));
+
+ send_remote_ptr.push_back(send_remote_indices.size());
+ }
+ else
+ {
+ // shared process
+ send_sm_ranks.push_back(std::distance(sm_ranks.begin(), ptr));
+
+ for (const auto &i : rank_and_global_indices.second)
+ send_sm_indices.push_back(
+ is_locally_owned.index_within_set(i));
+
+ send_sm_ptr.push_back(send_sm_indices.size());
+ }
+ }
+ send_remote_req.resize(send_remote_ranks.size());
+ send_sm_req.resize(send_sm_ranks.size());
+ }
+
+ {
+ for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
+ MPI_Isend(send_sm_indices.data() + send_sm_ptr[i],
+ send_sm_ptr[i + 1] - send_sm_ptr[i],
+ MPI_UNSIGNED,
+ send_sm_ranks[i],
+ 2,
+ comm_sm,
+ send_sm_req.data() + i);
+
+ for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
+ MPI_Irecv(recv_sm_indices.data() + recv_sm_ptr[i],
+ recv_sm_ptr[i + 1] - recv_sm_ptr[i],
+ MPI_UNSIGNED,
+ recv_sm_ranks[i],
+ 2,
+ comm_sm,
+ recv_sm_req.data() + i);
+
+ MPI_Waitall(recv_sm_req.size(),
+ recv_sm_req.data(),
+ MPI_STATUSES_IGNORE);
+ MPI_Waitall(send_sm_req.size(),
+ send_sm_req.data(),
+ MPI_STATUSES_IGNORE);
+ }
+
+ {
+ send_sm_offset.resize(send_sm_ranks.size());
+
+ for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
+ MPI_Irecv(send_sm_offset.data() + i,
+ 1,
+ MPI_UNSIGNED,
+ send_sm_ranks[i],
+ 3,
+ comm_sm,
+ send_sm_req.data() + i);
+
+ for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
+ MPI_Isend(recv_sm_offset.data() + i,
+ 1,
+ MPI_UNSIGNED,
+ recv_sm_ranks[i],
+ 3,
+ comm_sm,
+ recv_sm_req.data() + i);
+
+ MPI_Waitall(recv_sm_req.size(),
+ recv_sm_req.data(),
+ MPI_STATUSES_IGNORE);
+ MPI_Waitall(send_sm_req.size(),
+ send_sm_req.data(),
+ MPI_STATUSES_IGNORE);
+ }
+ }
+
+ template <typename Number>
+ void
+ Partitioner::update_ghost_values_start(
+ Number * data_this,
+ std::vector<Number *> & data_others,
+ dealii::AlignedVector<Number> &buffer,
+ const unsigned int communication_channel) const
+ {
+ (void)data_others;
+
+ if (send_remote_ptr.back() != buffer.size())
+ buffer.resize(send_remote_ptr.back());
+
+ int dummy;
+ for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
+ MPI_Irecv(&dummy,
+ 0,
+ MPI_INT,
+ recv_sm_ranks[i],
+ communication_channel + 2,
+ comm_sm,
+ recv_sm_req.data() + i);
+
+ for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
+ MPI_Isend(&dummy,
+ 0,
+ MPI_INT,
+ send_sm_ranks[i],
+ communication_channel + 2,
+ comm_sm,
+ send_sm_req.data() + i);
+
+ for (unsigned int i = 0; i < recv_remote_ranks.size(); i++)
+ MPI_Irecv(data_this + recv_remote_ptr[i] + n_local_elements,
+ recv_remote_ptr[i + 1] - recv_remote_ptr[i],
+ Utilities::MPI::internal::mpi_type_id(data_this),
+ recv_remote_ranks[i],
+ communication_channel + 3,
+ comm,
+ recv_remote_req.data() + i);
+
+ for (unsigned int i = 0; i < send_remote_ranks.size(); i++)
+ {
+ for (unsigned int j = send_remote_ptr[i]; j < send_remote_ptr[i + 1];
+ j++)
+ buffer[j] = data_this[send_remote_indices[j]];
+
+ MPI_Isend(buffer.data() + send_remote_ptr[i],
+ send_remote_ptr[i + 1] - send_remote_ptr[i],
+ Utilities::MPI::internal::mpi_type_id(data_this),
+ send_remote_ranks[i],
+ communication_channel + 3,
+ comm,
+ send_remote_req.data() + i);
+ }
+ }
+
+ template <typename Number>
+ void
+ Partitioner::update_ghost_values_finish(
+ Number * data_this,
+ std::vector<Number *> & data_others,
+ dealii::AlignedVector<Number> &buffer) const
+ {
+ (void)buffer;
+
+ for (unsigned int c = 0; c < recv_sm_ranks.size(); c++)
+ {
+ int i;
+ MPI_Waitany(recv_sm_req.size(),
+ recv_sm_req.data(),
+ &i,
+ MPI_STATUS_IGNORE);
+
+ const Number *__restrict__ data_others_ptr =
+ data_others[recv_sm_ranks[i]];
+ Number *__restrict__ data_this_ptr = data_this;
+
+ for (unsigned int j = recv_sm_ptr[i], k = recv_sm_offset[i];
+ j < recv_sm_ptr[i + 1];
+ j++, k++)
+ data_this_ptr[k] = data_others_ptr[recv_sm_indices[j]];
+ }
+
+ MPI_Waitall(send_sm_req.size(), send_sm_req.data(), MPI_STATUSES_IGNORE);
+ MPI_Waitall(send_remote_req.size(),
+ send_remote_req.data(),
+ MPI_STATUSES_IGNORE);
+ MPI_Waitall(recv_remote_req.size(),
+ recv_remote_req.data(),
+ MPI_STATUSES_IGNORE);
+ }
+
+ template <typename Number>
+ void
+ Partitioner::update_ghost_values(
+ Number * data_this,
+ std::vector<Number *> & data_others,
+ dealii::AlignedVector<Number> &buffer) const
+ {
+ update_ghost_values_start(data_this, data_others, buffer);
+ update_ghost_values_finish(data_this, data_others, buffer);
+ }
+
+ template <typename Number>
+ void
+ Partitioner::compress_start(Number * data_this,
+ std::vector<Number *> & data_others,
+ dealii::AlignedVector<Number> &buffer,
+ const unsigned int communication_channel) const
+ {
+ (void)data_others;
+
+ if (send_remote_ptr.back() != buffer.size())
+ buffer.resize(send_remote_ptr.back());
+
+ int dummy;
+ for (unsigned int i = 0; i < recv_sm_ranks.size(); i++)
+ MPI_Isend(&dummy,
+ 0,
+ MPI_INT,
+ recv_sm_ranks[i],
+ communication_channel + 1,
+ comm_sm,
+ recv_sm_req.data() + i);
+
+ for (unsigned int i = 0; i < send_sm_ranks.size(); i++)
+ MPI_Irecv(&dummy,
+ 0,
+ MPI_INT,
+ send_sm_ranks[i],
+ communication_channel + 1,
+ comm_sm,
+ send_sm_req.data() + i);
+
+ for (unsigned int i = 0; i < recv_remote_ranks.size(); i++)
+ MPI_Isend(data_this + recv_remote_ptr[i] + n_local_elements,
+ recv_remote_ptr[i + 1] - recv_remote_ptr[i],
+ Utilities::MPI::internal::mpi_type_id(data_this),
+ recv_remote_ranks[i],
+ communication_channel + 0,
+ comm,
+ recv_remote_req.data() + i);
+
+ for (unsigned int i = 0; i < send_remote_ranks.size(); i++)
+ MPI_Irecv(buffer.data() + send_remote_ptr[i],
+ send_remote_ptr[i + 1] - send_remote_ptr[i],
+ Utilities::MPI::internal::mpi_type_id(data_this),
+ send_remote_ranks[i],
+ communication_channel + 0,
+ comm,
+ send_remote_req.data() + i);
+ }
+
+ template <typename Number>
+ void
+ Partitioner::compress_finish(Number * data_this,
+ std::vector<Number *> & data_others,
+ dealii::AlignedVector<Number> &buffer) const
+ {
+ for (unsigned int c = 0; c < send_sm_ranks.size(); c++)
+ {
+ int i;
+ MPI_Waitany(send_sm_req.size(),
+ send_sm_req.data(),
+ &i,
+ MPI_STATUS_IGNORE);
+
+ Number *__restrict__ data_others_ptr = data_others[send_sm_ranks[i]];
+ Number *__restrict__ data_this_ptr = data_this;
+
+ for (unsigned int j = send_sm_ptr[i], k = send_sm_offset[i];
+ j < send_sm_ptr[i + 1];
+ j++, k++)
+ {
+ data_this_ptr[send_sm_indices[j]] += data_others_ptr[k];
+ data_others_ptr[k] = 0.0;
+ }
+ }
+
+ for (unsigned int c = 0; c < send_remote_ranks.size(); c++)
+ {
+ int i;
+ MPI_Waitany(send_remote_req.size(),
+ send_remote_req.data(),
+ &i,
+ MPI_STATUS_IGNORE);
+
+ for (unsigned int j = send_remote_ptr[i]; j < send_remote_ptr[i + 1];
+ j++)
+ data_this[send_remote_indices[j]] += buffer[j];
+ }
+
+ MPI_Waitall(recv_sm_req.size(), recv_sm_req.data(), MPI_STATUSES_IGNORE);
+
+ MPI_Waitall(recv_remote_req.size(),
+ recv_remote_req.data(),
+ MPI_STATUSES_IGNORE);
+ }
+
+ template <typename Number>
+ void
+ Partitioner::compress(Number * data_this,
+ std::vector<Number *> & data_others,
+ dealii::AlignedVector<Number> &buffer) const
+ {
+ compress_start(data_this, data_others, buffer);
+ compress_finish(data_this, data_others, buffer);
+ }
+
+ std::size_t
+ Partitioner::local_size() const
+ {
+ return n_local_elements;
+ }
+
+ std::size_t
+ Partitioner::n_ghost_indices() const
+ {
+ return n_ghost_elements;
+ }
+
+ std::size_t
+ Partitioner::n_mpi_processes() const
+ {
+ return n_mpi_processes_;
+ }
+
+ } // end of namespace SharedMPI
+} // end of namespace LinearAlgebra
+
+#include "la_sm_partitioner.inst"
+
+
+DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (SCALAR : REAL_SCALARS)
+ {
+ namespace LinearAlgebra
+ \{
+ namespace SharedMPI
+ \{
+ template void
+ Partitioner::update_ghost_values_start(
+ SCALAR * data_this,
+ std::vector<SCALAR *> & data_others,
+ dealii::AlignedVector<SCALAR> &buffer,
+ const unsigned int communication_channel) const;
+
+ template void
+ Partitioner::update_ghost_values_finish(
+ SCALAR * data_this,
+ std::vector<SCALAR *> & data_others,
+ dealii::AlignedVector<SCALAR> &buffer) const;
+
+ template void
+ Partitioner::update_ghost_values(
+ SCALAR * data_this,
+ std::vector<SCALAR *> & data_others,
+ dealii::AlignedVector<SCALAR> &buffer) const;
+
+ template void
+ Partitioner::compress_start(
+ SCALAR * data_this,
+ std::vector<SCALAR *> & data_others,
+ dealii::AlignedVector<SCALAR> &buffer,
+ const unsigned int communication_channel) const;
+
+ template void
+ Partitioner::compress_finish(
+ SCALAR * data_this,
+ std::vector<SCALAR *> & data_others,
+ dealii::AlignedVector<SCALAR> &buffer) const;
+
+ template void
+ Partitioner::compress(SCALAR * data_this,
+ std::vector<SCALAR *> & data_others,
+ dealii::AlignedVector<SCALAR> &buffer) const;
+ \}
+ \}
+ }