From: Peter Munch Date: Fri, 23 Oct 2020 11:58:33 +0000 (+0200) Subject: MatrixFree: add DoFInfo::dof_indices_contiguous_sm X-Git-Tag: v9.3.0-rc1~943^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=ebd81677f95597493449c6208de461ea6172a987;p=dealii.git MatrixFree: add DoFInfo::dof_indices_contiguous_sm --- diff --git a/include/deal.II/matrix_free/dof_info.h b/include/deal.II/matrix_free/dof_info.h index 551d6fe4e6..c51c6f2072 100644 --- a/include/deal.II/matrix_free/dof_info.h +++ b/include/deal.II/matrix_free/dof_info.h @@ -242,6 +242,16 @@ namespace internal const std::vector> &ghosted_faces, const bool fill_cell_centric); + /** + * Given @p cell_indices_contiguous_sm containing the local index of + * cells of macro faces (inner/outer) and macro faces compute + * dof_indices_contiguous_sm. + */ + void + compute_shared_memory_contiguous_indices( + std::array>, 3> + &cell_indices_contiguous_sm); + /** * Compute a renumbering of the degrees of freedom to improve the data * access patterns for this class that can be utilized by the categories @@ -488,6 +498,17 @@ namespace internal */ std::array, 3> dof_indices_contiguous; + /** + * The same as above but for shared-memory usage. The first value of the + * pair is identifying the owning process and the second the index + * within that locally-owned data of that process. + * + * @note This data structure is only set up if all entries in + * index_storage_variants[2] are IndexStorageVariants::contiguous. + */ + std::array>, 3> + dof_indices_contiguous_sm; + /** * Compressed index storage for faster access than through @p * dof_indices used according to the description in IndexStorageVariants. diff --git a/include/deal.II/matrix_free/dof_info.templates.h b/include/deal.II/matrix_free/dof_info.templates.h index 7d7c189904..d436118394 100644 --- a/include/deal.II/matrix_free/dof_info.templates.h +++ b/include/deal.II/matrix_free/dof_info.templates.h @@ -1483,6 +1483,33 @@ namespace internal + void + DoFInfo::compute_shared_memory_contiguous_indices( + std::array>, 3> + &cell_indices_contiguous_sm) + { + AssertDimension(dofs_per_cell.size(), 1); + + for (unsigned int i = 0; i < 3; ++i) + { + dof_indices_contiguous_sm[i].resize( + cell_indices_contiguous_sm[i].size()); + + for (unsigned int j = 0; j < cell_indices_contiguous_sm[i].size(); + ++j) + if (cell_indices_contiguous_sm[i][j].first != + numbers::invalid_unsigned_int) + dof_indices_contiguous_sm[i][j] = { + cell_indices_contiguous_sm[i][j].first, + cell_indices_contiguous_sm[i][j].second * dofs_per_cell[0]}; + else + dof_indices_contiguous_sm[i][j] = {numbers::invalid_unsigned_int, + numbers::invalid_unsigned_int}; + } + } + + + template void DoFInfo::compute_vector_zero_access_pattern( diff --git a/include/deal.II/matrix_free/matrix_free.h b/include/deal.II/matrix_free/matrix_free.h index 9240fa77b6..f7433f60e6 100644 --- a/include/deal.II/matrix_free/matrix_free.h +++ b/include/deal.II/matrix_free/matrix_free.h @@ -243,6 +243,7 @@ public: , hold_all_faces_to_owned_cells(hold_all_faces_to_owned_cells) , cell_vectorization_categories_strict( cell_vectorization_categories_strict) + , communicator_sm(MPI_COMM_SELF) {} /** @@ -268,6 +269,7 @@ public: , cell_vectorization_category(other.cell_vectorization_category) , cell_vectorization_categories_strict( other.cell_vectorization_categories_strict) + , communicator_sm(other.communicator_sm) {} // remove with level_mg_handler @@ -297,6 +299,7 @@ public: cell_vectorization_category = other.cell_vectorization_category; cell_vectorization_categories_strict = other.cell_vectorization_categories_strict; + communicator_sm = other.communicator_sm; return *this; } @@ -529,6 +532,11 @@ public: * them in a single vectorized array. */ bool cell_vectorization_categories_strict; + + /** + * Shared-memory MPI communicator. Default: MPI_COMM_SELF. + */ + MPI_Comm communicator_sm; }; /** @@ -2170,7 +2178,7 @@ MatrixFree::initialize_dof_vector( const unsigned int comp) const { AssertIndexRange(comp, n_components()); - vec.reinit(dof_info[comp].vector_partitioner); + vec.reinit(dof_info[comp].vector_partitioner, task_info.communicator_sm); } diff --git a/include/deal.II/matrix_free/matrix_free.templates.h b/include/deal.II/matrix_free/matrix_free.templates.h index 95a08c5d8e..6933925da7 100644 --- a/include/deal.II/matrix_free/matrix_free.templates.h +++ b/include/deal.II/matrix_free/matrix_free.templates.h @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -337,12 +338,14 @@ MatrixFree::internal_reinit( Utilities::MPI::this_mpi_process(task_info.communicator); task_info.n_procs = Utilities::MPI::n_mpi_processes(task_info.communicator); + task_info.communicator_sm = additional_data.communicator_sm; } else { - task_info.communicator = MPI_COMM_SELF; - task_info.my_pid = 0; - task_info.n_procs = 1; + task_info.communicator = MPI_COMM_SELF; + task_info.communicator_sm = MPI_COMM_SELF; + task_info.my_pid = 0; + task_info.n_procs = 1; } initialize_dof_handlers(dof_handler, additional_data); @@ -949,8 +952,6 @@ namespace internal { const bool strict_categories = cell_vectorization_categories_strict || hp_functionality_enabled; - // Use max dofs per cell in create_blocks_serial. This is necessary - // to use FE_Nothing. unsigned int max_dofs_per_cell = 0; for (const auto &info : dof_info) for (const auto &dofs : info.dofs_per_cell) @@ -1474,6 +1475,231 @@ MatrixFree::initialize_indices( for (auto &di : dof_info) di.compute_vector_zero_access_pattern(task_info, face_info.faces); +#ifdef DEAL_II_WITH_MPI + { + // non-buffering mode is only supported if the indices of all cells are + // contiguous for all dof_info objects. + bool is_non_buffering_sm_supported = true; + for (const auto &di : dof_info) + for (const auto &v : di.index_storage_variants[2]) + is_non_buffering_sm_supported &= + (v == internal::MatrixFreeFunctions::DoFInfo::IndexStorageVariants:: + contiguous); + + is_non_buffering_sm_supported = + Utilities::MPI::min(static_cast( + is_non_buffering_sm_supported), + task_info.communicator); + + const MPI_Comm communicator_sm = this->task_info.communicator_sm; + + if (is_non_buffering_sm_supported) + { + // TODO: move into Utilities::MPI + const auto procs_of_sm = [](const MPI_Comm &comm, + const MPI_Comm &comm_shared) { + if (Utilities::MPI::job_supports_mpi() == false) + return std::vector{0}; + + const unsigned int rank = Utilities::MPI::this_mpi_process(comm); + const unsigned int size = + Utilities::MPI::n_mpi_processes(comm_shared); + + std::vector ranks(size); + MPI_Allgather( + &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_shared); + + return ranks; + }; + + // gather the ranks of the shared-memory domain + const std::vector sm_procs = + procs_of_sm(task_info.communicator, communicator_sm); + + // get my rank within the shared-memory domain + const unsigned int my_sm_pid = std::distance( + sm_procs.begin(), + std::find(sm_procs.begin(), sm_procs.end(), task_info.my_pid)); + + // process any dof_info + const auto di_index = 0; + const auto &di = dof_info[di_index]; + + std::array>, 3> + cell_indices_contiguous_sm; + + // collect for each cell: sm rank + offset + cell_indices_contiguous_sm[2] = [&]() { + // result + std::vector> cells( + di.dof_indices_contiguous[2].size(), + {numbers::invalid_unsigned_int, numbers::invalid_unsigned_int}); + + // locally-owned cells + std::vector> + cells_locally_owned; + cells_locally_owned.reserve(n_cell_batches() * n_lanes); + + // shared-ghost cells (categorized according to sm-rank) + std::vector< + std::vector>> + cells_shared_ghosts(sm_procs.size()); + + for (unsigned int cell = 0; + cell < n_cell_batches() + n_ghost_cell_batches(); + ++cell) + for (unsigned int v = 0; v < this->n_components_filled(cell); ++v) + { + const unsigned int index = cell * n_lanes + v; + + // determine global cell index + const unsigned int local_dof_index = + di.dof_indices_contiguous[2][index]; + const types::global_cell_index global_cell_index = + di.vector_partitioner->local_to_global(local_dof_index) / + this->get_dofs_per_cell(di_index); + + // determine sm rank + unsigned int sm_rank = my_sm_pid; + + if (cell < n_cell_batches()) + { + // locally-owned cell + cells_locally_owned.emplace_back(index, global_cell_index); + } + else + { + // ghost cell + const auto ptr = std::find( + sm_procs.begin(), + sm_procs.end(), + additional_data.mg_level == + numbers::invalid_unsigned_int ? + get_cell_iterator(cell, v, di_index)->subdomain_id() : + get_cell_iterator(cell, v, di_index) + ->level_subdomain_id()); + + if (ptr != sm_procs.end()) + { + // shared ghost cell: + sm_rank = std::distance(sm_procs.begin(), ptr); + cells_shared_ghosts[sm_rank].emplace_back( + index, global_cell_index); + } + else + { + // remote ghost cell: nothing to do since the values are + // communicated (i.e. locally available) + } + } + + // write back result + cells[cell * n_lanes + v] = { + sm_rank, local_dof_index / this->get_dofs_per_cell(di_index)}; + } + + std::sort(cells_locally_owned.begin(), + cells_locally_owned.end(), + [](const auto &a, const auto &b) { + return a.second < b.second; + }); + + // get offsets of shared cells + Utilities::MPI::ConsensusAlgorithms:: + AnonymousProcess + process( + [&]() { + std::vector targets; + for (unsigned int i = 0; i < cells_shared_ghosts.size(); ++i) + if (cells_shared_ghosts[i].size() > 0) + targets.push_back(i); + return targets; + }, + [&](const auto other_rank, auto &send_buffer) { + auto &source = cells_shared_ghosts[other_rank]; + std::sort(source.begin(), + source.end(), + [](const auto &a, const auto &b) { + return a.second < b.second; + }); + + send_buffer.reserve(source.size()); + for (const auto &i : source) + send_buffer.push_back(i.second); + }, + [&](const auto &other_rank, + const auto &buffer_recv, + auto & request_buffer) { + (void)other_rank; + + request_buffer.reserve(buffer_recv.size()); + + unsigned int j = 0; + + for (unsigned int i = 0; i < buffer_recv.size(); ++i) + { + for (; j < cells_locally_owned.size(); ++j) + if (cells_locally_owned[j].second == buffer_recv[i]) + { + request_buffer.push_back( + cells_locally_owned[j].first); + break; + } + } + + AssertDimension(request_buffer.size(), buffer_recv.size()); + }, + [&](const auto other_rank, auto &recv_buffer) { + recv_buffer.resize(cells_shared_ghosts[other_rank].size()); + }, + [&](const auto other_rank, const auto &recv_buffer) { + for (unsigned int i = 0; i < recv_buffer.size(); ++i) + { + cells[cells_shared_ghosts[other_rank][i].first] = { + other_rank, recv_buffer[i]}; + } + }); + + Utilities::MPI::ConsensusAlgorithms::Selector< + dealii::types::global_dof_index, + unsigned int>(process, communicator_sm) + .run(); + + return cells; + }(); + + // process faces (0: interior; 1: exterior) + for (unsigned int i = 0; i < 2; ++i) + { + cell_indices_contiguous_sm[i].assign( + di.dof_indices_contiguous[i].size(), + {numbers::invalid_unsigned_int, numbers::invalid_unsigned_int}); + + for (unsigned int face_index = 0; + face_index < cell_indices_contiguous_sm[i].size(); + ++face_index) + { + const unsigned int cell_index = + (i == 0) ? get_face_info(face_index / n_lanes) + .cells_interior[face_index % n_lanes] : + get_face_info(face_index / n_lanes) + .cells_exterior[face_index % n_lanes]; + + if (cell_index == numbers::invalid_unsigned_int) + continue; + + cell_indices_contiguous_sm[i][face_index] = + cell_indices_contiguous_sm[2][cell_index]; + } + } + + for (auto &di : dof_info) + di.compute_shared_memory_contiguous_indices( + cell_indices_contiguous_sm); + } + } +#endif + indices_are_initialized = true; } diff --git a/include/deal.II/matrix_free/task_info.h b/include/deal.II/matrix_free/task_info.h index e7fe06ab1e..322b863ddb 100644 --- a/include/deal.II/matrix_free/task_info.h +++ b/include/deal.II/matrix_free/task_info.h @@ -565,6 +565,11 @@ namespace internal */ MPI_Comm communicator; + /** + * Shared-memory MPI communicator + */ + MPI_Comm communicator_sm; + /** * Rank of MPI process */ diff --git a/source/base/mpi_consensus_algorithms.cc b/source/base/mpi_consensus_algorithms.cc index c135143a0a..1eebaa29ea 100644 --- a/source/base/mpi_consensus_algorithms.cc +++ b/source/base/mpi_consensus_algorithms.cc @@ -48,6 +48,17 @@ namespace Utilities std::pair, unsigned int>; +#ifdef DEAL_II_WITH_64BIT_INDICES + template class Process; + + template class NBX; + + template class PEX; + + template class Selector; +#endif + + } // namespace ConsensusAlgorithms } // end of namespace MPI } // end of namespace Utilities