From 5223a20fb081e5b0fd81281692278e6ba3efa210 Mon Sep 17 00:00:00 2001 From: Bruno Turcksin Date: Wed, 15 Apr 2020 02:08:59 +0000 Subject: [PATCH] Overlap communication and computation --- .../deal.II/matrix_free/cuda_matrix_free.h | 44 ++++- .../matrix_free/cuda_matrix_free.templates.h | 186 ++++++++++++++---- 2 files changed, 182 insertions(+), 48 deletions(-) diff --git a/include/deal.II/matrix_free/cuda_matrix_free.h b/include/deal.II/matrix_free/cuda_matrix_free.h index 7b93e43eeb..7b64f5d673 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.h @@ -106,18 +106,27 @@ namespace CUDAWrappers const ParallelizationScheme parallelization_scheme = parallel_in_elem, const UpdateFlags mapping_update_flags = update_gradients | update_JxW_values, - const bool use_coloring = false, - const bool n_colors = 1) + const bool use_coloring = false, + const bool n_colors = 1, + const bool overlap_communication_computation = false) : parallelization_scheme(parallelization_scheme) , mapping_update_flags(mapping_update_flags) , use_coloring(use_coloring) , n_colors(n_colors) - {} - - /** - * Number of colors created by the graph coloring algorithm. - */ - unsigned int n_colors; + , overlap_communication_computation(overlap_communication_computation) + { +# ifndef DEAL_II_MPI_WITH_CUDA_SUPPORT + AssertThrow( + overlap_communication_computation == false, + ExcMessage( + "Overlapping communication and computation requires CUDA-aware MPI.")); +# endif + if (overlap_communication_computation == true) + AssertThrow( + use_coloring == false || overlap_communication_computation == false, + ExcMessage( + "Overlapping communication and coloring are incompatible options. Only one of them can be enabled.")); + } /** * Parallelization scheme used, parallelization over degrees of freedom or * over cells. @@ -140,6 +149,17 @@ namespace CUDAWrappers * newer architectures. */ bool use_coloring; + + /** + * Number of colors created by the graph coloring algorithm. + */ + unsigned int n_colors; + + /** + * Overlap MPI communications with computation. This requires CUDA-aware + * MPI and use_coloring must be false. + */ + bool overlap_communication_computation; }; /** @@ -442,6 +462,12 @@ namespace CUDAWrappers */ bool use_coloring; + /** + * Overlap MPI communications with computation. This requires CUDA-aware + * MPI and use_coloring must be false. + */ + bool overlap_communication_computation; + /** * Total number of degrees of freedom. */ @@ -507,7 +533,7 @@ namespace CUDAWrappers types::global_dof_index *constrained_dofs; /** - * Mask deciding where constraints are set on a given cell. + * Mask deciding where constraints are set on a given cell. */ std::vector constraint_mask; diff --git a/include/deal.II/matrix_free/cuda_matrix_free.templates.h b/include/deal.II/matrix_free/cuda_matrix_free.templates.h index 83a74c0c2c..2be0707491 100644 --- a/include/deal.II/matrix_free/cuda_matrix_free.templates.h +++ b/include/deal.II/matrix_free/cuda_matrix_free.templates.h @@ -38,6 +38,7 @@ # include +# include # include @@ -277,7 +278,9 @@ namespace CUDAWrappers void ReinitHelper::setup_color_arrays(const unsigned int n_colors) { - data->n_cells.resize(n_colors); + // We need at least three colors when we are using CUDA-aware MPI and + // overlapping the communication + data->n_cells.resize(std::max(n_colors, 3U), 0); data->grid_dim.resize(n_colors); data->block_dim.resize(n_colors); data->local_to_global.resize(n_colors); @@ -785,11 +788,12 @@ namespace CUDAWrappers MatrixFree::evaluate_coefficients(Functor func) const { for (unsigned int i = 0; i < n_colors; ++i) - { - internal::evaluate_coeff - <<>>(func, get_data(i)); - AssertCudaKernel(); - } + if (n_cells[i] > 0) + { + internal::evaluate_coeff + <<>>(func, get_data(i)); + AssertCudaKernel(); + } } @@ -840,6 +844,8 @@ namespace CUDAWrappers this->parallelization_scheme = additional_data.parallelization_scheme; this->use_coloring = additional_data.use_coloring; + this->overlap_communication_computation = + additional_data.overlap_communication_computation; // TODO: only free if we actually need arrays of different length free(); @@ -929,11 +935,61 @@ namespace CUDAWrappers } else { - // If we are not using coloring, all the cells belong to the same - // color. - graph.resize(1, std::vector()); - for (auto cell = begin; cell != end; ++cell) - graph[0].emplace_back(cell); + if (additional_data.overlap_communication_computation) + { + // We create one color (1) with the cells on the boundary of the + // local domain and two colors (0 and 2) with the interior + // cells. + graph.resize(3, std::vector()); + + std::vector ghost_vertices( + dof_handler.get_triangulation().n_vertices(), false); + + for (const auto cell : + dof_handler.get_triangulation().active_cell_iterators()) + if (cell->is_ghost()) + for (unsigned int i = 0; + i < GeometryInfo::vertices_per_cell; + i++) + ghost_vertices[cell->vertex_index(i)] = true; + + std::vector, + false>>>> + inner_cells; + + for (auto cell = begin; cell != end; ++cell) + { + bool ghost_vertex = false; + + for (unsigned int i = 0; + i < GeometryInfo::vertices_per_cell; + i++) + if (ghost_vertices[cell->vertex_index(i)]) + { + ghost_vertex = true; + break; + } + + if (ghost_vertex) + graph[1].emplace_back(cell); + else + inner_cells.emplace_back(cell); + } + for (unsigned i = 0; i < inner_cells.size(); i++) + if (i < inner_cells.size() / 2) + graph[0].emplace_back(inner_cells[i]); + else + graph[2].emplace_back(inner_cells[i]); + } + else + { + // If we are not using coloring, all the cells belong to the + // same color. + graph.resize(1, std::vector()); + for (auto cell = begin; cell != end; ++cell) + graph[0].emplace_back(cell); + } } } n_colors = graph.size(); @@ -994,8 +1050,8 @@ namespace CUDAWrappers unsigned int i_constraint = 0; for (unsigned int i = 0; i < n_local_dofs; ++i) { - // is_constrained uses a global dof id but constrained_dofs_host - // works on the local id + // is_constrained uses a global dof id but + // constrained_dofs_host works on the local id if (constraints.is_constrained(partitioner->local_to_global(i))) { constrained_dofs_host[i_constraint] = i; @@ -1042,14 +1098,15 @@ namespace CUDAWrappers { // Execute the loop on the cells for (unsigned int i = 0; i < n_colors; ++i) - { - internal::apply_kernel_shmem - <<>>(func, - get_data(i), - src.get_values(), - dst.get_values()); - AssertCudaKernel(); - } + if (n_cells[i] > 0) + { + internal::apply_kernel_shmem + <<>>(func, + get_data(i), + src.get_values(), + dst.get_values()); + AssertCudaKernel(); + } } @@ -1067,19 +1124,69 @@ namespace CUDAWrappers if (src.get_partitioner().get() == partitioner.get() && dst.get_partitioner().get() == partitioner.get()) { - src.update_ghost_values(); + // This code is inspired to the code in TaskInfo::loop. + if (overlap_communication_computation) + { + src.update_ghost_values_start(0); + // In parallel, it's possible that some processors do not own any + // cells. + if (n_cells[0] > 0) + { + internal::apply_kernel_shmem + <<>>(func, + get_data(0), + src.get_values(), + dst.get_values()); + AssertCudaKernel(); + } + src.update_ghost_values_finish(); - // Execute the loop on the cells - for (unsigned int i = 0; i < n_colors; ++i) + // In serial this color does not exist because there are no ghost + // cells + if (n_cells[1] > 0) + { + internal::apply_kernel_shmem + <<>>(func, + get_data(1), + src.get_values(), + dst.get_values()); + AssertCudaKernel(); + // We need a synchronization point because we don't want + // CUDA-aware MPI to start the MPI communication until the + // kernel is done. + cudaDeviceSynchronize(); + } + + dst.compress_start(0, VectorOperation::add); + // When the mesh is coarse it is possible that some processors do + // not own any cells + if (n_cells[2] > 0) + { + internal::apply_kernel_shmem + <<>>(func, + get_data(2), + src.get_values(), + dst.get_values()); + AssertCudaKernel(); + } + dst.compress_finish(VectorOperation::add); + } + else { - internal::apply_kernel_shmem - <<>>(func, - get_data(i), - src.get_values(), - dst.get_values()); - AssertCudaKernel(); + src.update_ghost_values(); + + // Execute the loop on the cells + for (unsigned int i = 0; i < n_colors; ++i) + if (n_cells[i] > 0) + { + internal::apply_kernel_shmem + <<>>(func, + get_data(i), + src.get_values(), + dst.get_values()); + } + dst.compress(VectorOperation::add); } - dst.compress(VectorOperation::add); src.zero_out_ghosts(); } else @@ -1093,14 +1200,15 @@ namespace CUDAWrappers // Execute the loop on the cells for (unsigned int i = 0; i < n_colors; ++i) - { - internal::apply_kernel_shmem - <<>>(func, - get_data(i), - ghosted_src.get_values(), - ghosted_dst.get_values()); - AssertCudaKernel(); - } + if (n_cells[i] > 0) + { + internal::apply_kernel_shmem + <<>>(func, + get_data(i), + ghosted_src.get_values(), + ghosted_dst.get_values()); + AssertCudaKernel(); + } // Add the ghosted values ghosted_dst.compress(VectorOperation::add); -- 2.39.5