From 11f2a1cc33ccd861d42c1ce50bcc2e1a3fa71589 Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Sat, 21 Mar 2020 20:20:55 +0100 Subject: [PATCH] Add update_ghost_values and compress --- include/deal.II/lac/la_sm_partitioner.h | 231 +++++++++++++++++-- include/deal.II/lac/la_sm_vector.h | 5 +- include/deal.II/lac/la_sm_vector.templates.h | 24 +- include/deal.II/matrix_free/matrix_free.h | 45 ++++ tests/sm/vector_sm_01.cc | 10 +- 5 files changed, 280 insertions(+), 35 deletions(-) diff --git a/include/deal.II/lac/la_sm_partitioner.h b/include/deal.II/lac/la_sm_partitioner.h index aac7cc57e7..2ad2dcea57 100644 --- a/include/deal.II/lac/la_sm_partitioner.h +++ b/include/deal.II/lac/la_sm_partitioner.h @@ -232,34 +232,227 @@ namespace LinearAlgebra buffer.resize(send_remote_ptr.back()); } + void + update_ghost_values_start( + Number * data_this, + std::vector &data_others, + const unsigned int communication_channel = 0) const + { + (void)data_others; + + int dummy; + for (unsigned int i = 0; i < recv_sm_ranks.size(); i++) + MPI_Irecv(&dummy, + 0, + MPI_INT, + recv_sm_ranks[i], + communication_channel + 1, + comm_sm, + recv_sm_req.data() + i); + + for (unsigned int i = 0; i < send_sm_ranks.size(); i++) + MPI_Isend(&dummy, + 0, + MPI_INT, + send_sm_ranks[i], + communication_channel + 1, + comm_sm, + send_sm_req.data() + i); + + for (unsigned int i = 0; i < recv_remote_ranks.size(); i++) + MPI_Irecv(data_this + recv_remote_ptr[i] + n_local_elements, + recv_remote_ptr[i + 1] - recv_remote_ptr[i], + Utilities::MPI::internal::mpi_type_id(data_this), + recv_remote_ranks[i], + communication_channel + 0, + comm, + recv_remote_req.data() + i); + + for (unsigned int i = 0; i < send_remote_ranks.size(); i++) + { + for (unsigned int j = send_remote_ptr[i]; + j < send_remote_ptr[i + 1]; + j++) + buffer[j] = data_this[send_remote_indices[j]]; + + MPI_Isend(buffer.data() + send_remote_ptr[i], + send_remote_ptr[i + 1] - send_remote_ptr[i], + Utilities::MPI::internal::mpi_type_id(data_this), + send_remote_ranks[i], + communication_channel + 0, + comm, + send_remote_req.data() + i); + } + } + + void + update_ghost_values_finish(Number * data_this, + std::vector &data_others) const + { + for (unsigned int c = 0; c < recv_sm_ranks.size(); c++) + { + int i; + MPI_Waitany(recv_sm_req.size(), + recv_sm_req.data(), + &i, + MPI_STATUS_IGNORE); + + const Number *__restrict__ data_others_ptr = + data_others[recv_sm_ranks[i]]; + Number *__restrict__ data_this_ptr = data_this; + + for (unsigned int j = recv_sm_ptr[i], k = recv_sm_offset[i]; + j < recv_sm_ptr[i + 1]; + j++, k++) + data_this_ptr[k] = data_others_ptr[recv_sm_indices[j]]; + } + + MPI_Waitall(send_sm_req.size(), + send_sm_req.data(), + MPI_STATUSES_IGNORE); + MPI_Waitall(send_remote_req.size(), + send_remote_req.data(), + MPI_STATUSES_IGNORE); + MPI_Waitall(recv_remote_req.size(), + recv_remote_req.data(), + MPI_STATUSES_IGNORE); + } + + void + update_ghost_values(Number * data_this, + std::vector &data_others) const + { + update_ghost_values_start(data_this, data_others); + update_ghost_values_finish(data_this, data_others); + } + + void + compress_start(Number * data_this, + std::vector &data_others, + const unsigned int communication_channel = 0) const + { + (void)data_others; + + int dummy; + for (unsigned int i = 0; i < recv_sm_ranks.size(); i++) + MPI_Isend(&dummy, + 0, + MPI_INT, + recv_sm_ranks[i], + communication_channel + 1, + comm_sm, + recv_sm_req.data() + i); + + for (unsigned int i = 0; i < send_sm_ranks.size(); i++) + MPI_Irecv(&dummy, + 0, + MPI_INT, + send_sm_ranks[i], + communication_channel + 1, + comm_sm, + send_sm_req.data() + i); + + for (unsigned int i = 0; i < recv_remote_ranks.size(); i++) + MPI_Isend(data_this + recv_remote_ptr[i] + n_local_elements, + recv_remote_ptr[i + 1] - recv_remote_ptr[i], + Utilities::MPI::internal::mpi_type_id(data_this), + recv_remote_ranks[i], + communication_channel + 0, + comm, + recv_remote_req.data() + i); + + for (unsigned int i = 0; i < send_remote_ranks.size(); i++) + MPI_Irecv(buffer.data() + send_remote_ptr[i], + send_remote_ptr[i + 1] - send_remote_ptr[i], + Utilities::MPI::internal::mpi_type_id(data_this), + send_remote_ranks[i], + communication_channel + 0, + comm, + send_remote_req.data() + i); + } + + void + compress_finish(Number * data_this, + std::vector &data_others) const + { + for (unsigned int c = 0; c < send_sm_ranks.size(); c++) + { + int i; + MPI_Waitany(send_sm_req.size(), + send_sm_req.data(), + &i, + MPI_STATUS_IGNORE); + + const Number *__restrict__ data_others_ptr = + data_others[send_sm_ranks[i]]; + Number *__restrict__ data_this_ptr = data_this; + + for (unsigned int j = send_sm_ptr[i], k = send_sm_offset[i]; + j < send_sm_ptr[i + 1]; + j++, k++) + { + data_this_ptr[send_sm_indices[j]] += data_others_ptr[k]; + } + } + + for (unsigned int c = 0; c < send_remote_ranks.size(); c++) + { + int i; + MPI_Waitany(send_remote_req.size(), + send_remote_req.data(), + &i, + MPI_STATUS_IGNORE); + + for (unsigned int j = send_remote_ptr[i]; + j < send_remote_ptr[i + 1]; + j++) + data_this[send_remote_indices[j]] += buffer[j]; + } + + MPI_Waitall(recv_sm_req.size(), + recv_sm_req.data(), + MPI_STATUSES_IGNORE); + + MPI_Waitall(recv_remote_req.size(), + recv_remote_req.data(), + MPI_STATUSES_IGNORE); + } + + void + compress(Number *data_this, std::vector &data_others) const + { + compress_start(data_this, data_others); + compress_finish(data_this, data_others); + } + private: const MPI_Comm &comm; const MPI_Comm &comm_sm; unsigned int n_local_elements; - AlignedVector buffer; + mutable AlignedVector buffer; std::vector recv_remote_ranks; std::vector recv_remote_ptr = {0}; - std::vector recv_remote_req; - - std::vector recv_sm_ranks; - std::vector recv_sm_ptr = {0}; - std::vector recv_sm_req; - std::vector recv_sm_indices; - std::vector recv_sm_offset; - - std::vector send_remote_ranks; - std::vector send_remote_ptr = {0}; - std::vector send_remote_indices; - std::vector send_remote_req; - - std::vector send_sm_ranks; - std::vector send_sm_ptr = {0}; - std::vector send_sm_indices; - std::vector send_sm_req; - std::vector send_sm_offset; + mutable std::vector recv_remote_req; + + std::vector recv_sm_ranks; + std::vector recv_sm_ptr = {0}; + mutable std::vector recv_sm_req; + std::vector recv_sm_indices; + std::vector recv_sm_offset; + + std::vector send_remote_ranks; + std::vector send_remote_ptr = {0}; + std::vector send_remote_indices; + mutable std::vector send_remote_req; + + std::vector send_sm_ranks; + std::vector send_sm_ptr = {0}; + std::vector send_sm_indices; + mutable std::vector send_sm_req; + std::vector send_sm_offset; }; } // end of namespace SharedMPI diff --git a/include/deal.II/lac/la_sm_vector.h b/include/deal.II/lac/la_sm_vector.h index 02c8508a29..79105114ee 100644 --- a/include/deal.II/lac/la_sm_vector.h +++ b/include/deal.II/lac/la_sm_vector.h @@ -98,6 +98,8 @@ namespace LinearAlgebra std::unique_ptr> values; MPI_Win *values_win = nullptr; + std::vector others; + std::unique_ptr values_dev; }; @@ -539,8 +541,7 @@ namespace LinearAlgebra inline bool Vector::has_ghost_elements() const { - Assert(false, ExcNotImplemented()); - return false; + return vector_is_ghosted; } diff --git a/include/deal.II/lac/la_sm_vector.templates.h b/include/deal.II/lac/la_sm_vector.templates.h index 62826b57a5..91b9e9dcdc 100644 --- a/include/deal.II/lac/la_sm_vector.templates.h +++ b/include/deal.II/lac/la_sm_vector.templates.h @@ -56,12 +56,9 @@ namespace LinearAlgebra allocated_size = new_alloc_size; - // TODO - std::vector data_others; - data.values_win = new MPI_Win; Number *data_this = (Number *)malloc(0); - data_others.resize(Utilities::MPI::n_mpi_processes(comm_shared)); + data.others.resize(Utilities::MPI::n_mpi_processes(comm_shared)); MPI_Info info; @@ -82,11 +79,11 @@ namespace LinearAlgebra int disp_unit; MPI_Aint ssize; MPI_Win_shared_query( - *data.values_win, i, &ssize, &disp_unit, &data_others[i]); + *data.values_win, i, &ssize, &disp_unit, &data.others[i]); } data.values = { - data_others[Utilities::MPI::this_mpi_process(comm_shared)], + data.others[Utilities::MPI::this_mpi_process(comm_shared)], [&data](Number *&) { MPI_Win_free(data.values_win); }}; } }; @@ -380,9 +377,10 @@ namespace LinearAlgebra const unsigned int communication_channel, ::dealii::VectorOperation::values operation) { - Assert(false, ExcNotImplemented()); - (void)communication_channel; (void)operation; + partitioner_sm->compress_start(data.values.get(), + data.others, + communication_channel); } @@ -392,8 +390,8 @@ namespace LinearAlgebra Vector::compress_finish( ::dealii::VectorOperation::values operation) { - Assert(false, ExcNotImplemented()); (void)operation; + partitioner_sm->compress_finish(data.values.get(), data.others); } @@ -403,8 +401,9 @@ namespace LinearAlgebra Vector::update_ghost_values_start( const unsigned int communication_channel) const { - Assert(false, ExcNotImplemented()); - (void)communication_channel; + partitioner_sm->update_ghost_values_start(data.values.get(), + data.others, + communication_channel); } @@ -413,7 +412,8 @@ namespace LinearAlgebra void Vector::update_ghost_values_finish() const { - Assert(false, ExcNotImplemented()); + partitioner_sm->update_ghost_values_finish(data.values.get(), + data.others); } diff --git a/include/deal.II/matrix_free/matrix_free.h b/include/deal.II/matrix_free/matrix_free.h index 4f1f67e5d4..b4509d9c67 100644 --- a/include/deal.II/matrix_free/matrix_free.h +++ b/include/deal.II/matrix_free/matrix_free.h @@ -3765,6 +3765,51 @@ namespace internal } } + void + reset_ghost_values( + const LinearAlgebra::SharedMPI::Vector &vec) const + { + if (ghosts_were_set == true) + return; + + if (vector_face_access == + dealii::MatrixFree:: + DataAccessOnFaces::unspecified || + vec.size() == 0) + vec.zero_out_ghosts(); + else + { +# ifdef DEAL_II_WITH_MPI + AssertDimension(requests.size(), tmp_data.size()); + + const unsigned int mf_component = find_vector_in_mf(vec); + const Utilities::MPI::Partitioner &part = + get_partitioner(mf_component); + if (&part == + matrix_free.get_dof_info(mf_component).vector_partitioner.get()) + vec.zero_out_ghosts(); + else if (part.n_ghost_indices() > 0) + { + for (std::vector>:: + const_iterator my_ghosts = + part.ghost_indices_within_larger_ghost_set().begin(); + my_ghosts != + part.ghost_indices_within_larger_ghost_set().end(); + ++my_ghosts) + for (unsigned int j = my_ghosts->first; j < my_ghosts->second; + j++) + { + const_cast &>(vec) + .local_element(j + part.local_size()) = 0.; + } + } + + // let vector know that it's not ghosted anymore + vec.set_ghost_state(false); +# endif + } + } + /** diff --git a/tests/sm/vector_sm_01.cc b/tests/sm/vector_sm_01.cc index ec3622a90a..ed2ebca093 100644 --- a/tests/sm/vector_sm_01.cc +++ b/tests/sm/vector_sm_01.cc @@ -67,8 +67,14 @@ test(const int n_refinements, const int degree, const int group_size) MPI_Comm comm_sm; MPI_Comm_split(comm, rank / group_size, rank, &comm_sm); - LinearAlgebra::SharedMPI::Vector vec; - matrix_free.initialize_dof_vector(vec, comm_sm); + using VectorType = LinearAlgebra::SharedMPI::Vector; + + LinearAlgebra::SharedMPI::Vector dst, src; + matrix_free.initialize_dof_vector(dst, comm_sm); + matrix_free.initialize_dof_vector(src, comm_sm); + + matrix_free.template cell_loop( + [](const auto &, auto &, const auto &, const auto) {}, dst, src); } int -- 2.39.5