]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Let Partitioner have a MemorySpace template parameter
authorDaniel Arndt <daniel.arndt@iwr.uni-heidelberg.de>
Wed, 31 Oct 2018 18:38:05 +0000 (19:38 +0100)
committerDaniel Arndt <daniel.arndt@iwr.uni-heidelberg.de>
Fri, 4 Jan 2019 13:26:47 +0000 (14:26 +0100)
include/deal.II/base/partitioner.h
include/deal.II/base/partitioner.templates.h
source/base/CMakeLists.txt
source/base/partitioner.cc
source/base/partitioner.cu [new file with mode: 0644]
source/base/partitioner.cuda.inst.in [new file with mode: 0644]
source/base/partitioner.inst.in

index 1c2f6efeacf8d27766954c386d4b09aecc365a1a..e44238ebb09464e4b39c41c78aede8761964f3e6 100644 (file)
@@ -21,6 +21,7 @@
 #include <deal.II/base/array_view.h>
 #include <deal.II/base/index_set.h>
 #include <deal.II/base/memory_consumption.h>
+#include <deal.II/base/memory_space.h>
 #include <deal.II/base/mpi.h>
 #include <deal.II/base/types.h>
 #include <deal.II/base/utilities.h>
@@ -419,7 +420,7 @@ namespace Utilities
        * This functionality is used in
        * LinearAlgebra::distributed::Vector::update_ghost_values().
        */
-      template <typename Number>
+      template <typename Number, typename MemorySpaceType = MemorySpace::Host>
       void
       export_to_ghosted_array_start(
         const unsigned int             communication_channel,
@@ -529,7 +530,7 @@ namespace Utilities
        * This functionality is used in
        * LinearAlgebra::distributed::Vector::compress().
        */
-      template <typename Number>
+      template <typename Number, typename MemorySpaceType = MemorySpace::Host>
       void
       import_from_ghosted_array_finish(
         const VectorOperation::values  vector_operation,
index 09e9c73175cdc8eb13d7b20f44f91ae5a1fcbbae..62d1cd30ae6f5dfd7188af5083430934a5ebe43b 100644 (file)
 
 #include <deal.II/base/config.h>
 
+#include <deal.II/base/cuda_size.h>
 #include <deal.II/base/partitioner.h>
 
+#include <deal.II/lac/cuda_kernels.templates.h>
 #include <deal.II/lac/la_parallel_vector.h>
 
 #include <type_traits>
@@ -35,7 +37,7 @@ namespace Utilities
 
 #  ifdef DEAL_II_WITH_MPI
 
-    template <typename Number>
+    template <typename Number, typename MemorySpaceType>
     void
     Partitioner::export_to_ghosted_array_start(
       const unsigned int             communication_channel,
@@ -92,7 +94,7 @@ namespace Utilities
                       communicator,
                       &requests[i]);
           AssertThrowMPI(ierr);
-          ghost_array_ptr += ghost_targets()[i].second;
+          ghost_array_ptr += ghost_targets_data[i].second;
         }
 
       Number *temp_array_ptr = temporary_storage.data();
@@ -106,9 +108,30 @@ namespace Utilities
                              import_indices_chunks_by_rank_data[i + 1];
           unsigned int index = 0;
           for (; my_imports != end_my_imports; ++my_imports)
-            for (unsigned int j = my_imports->first; j < my_imports->second;
-                 j++)
-              temp_array_ptr[index++] = locally_owned_array[j];
+            {
+              const unsigned int chunk_size =
+                my_imports->second - my_imports->first;
+#    if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+      defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+              if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+                {
+                  const cudaError_t cuda_error_code =
+                    cudaMemcpy(temp_array_ptr + index,
+                               locally_owned_array.data() + my_imports->first,
+                               chunk_size * sizeof(Number),
+                               cudaMemcpyDeviceToDevice);
+                  AssertCuda(cuda_error_code);
+                }
+              else
+#    endif
+                {
+                  std::memcpy(temp_array_ptr + index,
+                              locally_owned_array.data() + my_imports->first,
+                              chunk_size * sizeof(Number));
+                }
+              index += chunk_size;
+            }
+
           AssertDimension(index, import_targets_data[i].second);
 
           // start the send operations
@@ -370,7 +393,7 @@ namespace Utilities
 
 
 
-    template <typename Number>
+    template <typename Number, typename MemorySpaceType>
     void
     Partitioner::import_from_ghosted_array_finish(
       const VectorOperation::values  vector_operation,
@@ -423,7 +446,6 @@ namespace Utilities
 
       if (vector_operation != dealii::VectorOperation::insert)
         AssertDimension(n_ghost_targets + n_import_targets, requests.size());
-
       // first wait for the receive to complete
       if (requests.size() > 0 && n_import_targets > 0)
         {
@@ -433,21 +455,20 @@ namespace Utilities
           AssertThrowMPI(ierr);
 
           const Number *read_position = temporary_storage.data();
-          std::vector<std::pair<unsigned int, unsigned int>>::const_iterator
-            my_imports = import_indices_data.begin();
-
+#    if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+          defined(DEAL_II_WITH_CUDA_AWARE_MPI))
           // If the operation is no insertion, add the imported data to the
           // local values. For insert, nothing is done here (but in debug mode
           // we assert that the specified value is either zero or matches with
           // the ones already present
           if (vector_operation == dealii::VectorOperation::add)
-            for (; my_imports != import_indices_data.end(); ++my_imports)
-              for (unsigned int j = my_imports->first; j < my_imports->second;
+            for (const auto &import_range : import_indices_data)
+              for (unsigned int j = import_range.first; j < import_range.second;
                    j++)
                 locally_owned_array[j] += *read_position++;
           else if (vector_operation == dealii::VectorOperation::min)
-            for (; my_imports != import_indices_data.end(); ++my_imports)
-              for (unsigned int j = my_imports->first; j < my_imports->second;
+            for (const auto &import_range : import_indices_data)
+              for (unsigned int j = import_range.first; j < import_range.second;
                    j++)
                 {
                   locally_owned_array[j] =
@@ -455,8 +476,8 @@ namespace Utilities
                   read_position++;
                 }
           else if (vector_operation == dealii::VectorOperation::max)
-            for (; my_imports != import_indices_data.end(); ++my_imports)
-              for (unsigned int j = my_imports->first; j < my_imports->second;
+            for (const auto &import_range : import_indices_data)
+              for (unsigned int j = import_range.first; j < import_range.second;
                    j++)
                 {
                   locally_owned_array[j] =
@@ -464,8 +485,8 @@ namespace Utilities
                   read_position++;
                 }
           else
-            for (; my_imports != import_indices_data.end(); ++my_imports)
-              for (unsigned int j = my_imports->first; j < my_imports->second;
+            for (const auto &import_range : import_indices_data)
+              for (unsigned int j = import_range.first; j < import_range.second;
                    j++, read_position++)
                 // Below we use relatively large precision in units in the last
                 // place (ULP) as this Assert can be easily triggered in
@@ -485,6 +506,47 @@ namespace Utilities
                          Number>::ExcNonMatchingElements(*read_position,
                                                          locally_owned_array[j],
                                                          my_pid));
+#    else
+          if (vector_operation == dealii::VectorOperation::add)
+            {
+              for (const auto &import_range : import_indices_data)
+                {
+                  const auto chunk_size =
+                    import_range.second - import_range.first;
+                  const int n_blocks =
+                    1 + (chunk_size - 1) / (::dealii::CUDAWrappers::chunk_size *
+                                            ::dealii::CUDAWrappers::block_size);
+                  dealii::LinearAlgebra::CUDAWrappers::kernel::vector_bin_op<
+                    Number,
+                    dealii::LinearAlgebra::CUDAWrappers::kernel::Binop_Addition>
+                    <<<n_blocks, dealii::CUDAWrappers::block_size>>>(
+                      locally_owned_array.data() + import_range.first,
+                      read_position,
+                      chunk_size);
+                  read_position += chunk_size;
+                }
+            }
+          else
+            for (const auto &import_range : import_indices_data)
+              {
+                const auto chunk_size =
+                  import_range.second - import_range.first;
+                const cudaError_t cuda_error_code =
+                  cudaMemcpy(locally_owned_array.data() + import_range.first,
+                             read_position,
+                             chunk_size * sizeof(Number),
+                             cudaMemcpyDeviceToDevice);
+                AssertCuda(cuda_error_code);
+                read_position += chunk_size;
+              }
+
+          static_assert(
+            std::is_same<MemorySpaceType, MemorySpace::CUDA>::value,
+            "If we are using the CPU implementation, we should not trigger the restriction");
+          Assert(vector_operation == dealii::VectorOperation::insert ||
+                   vector_operation == dealii::VectorOperation::add,
+                 ExcNotImplemented());
+#    endif
           AssertDimension(read_position - temporary_storage.data(),
                           n_import_indices());
         }
@@ -505,18 +567,34 @@ namespace Utilities
       if (ghost_array.size() > 0)
         {
           Assert(ghost_array.begin() != nullptr, ExcInternalError());
+
+#    if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
+      defined(DEAL_II_WITH_CUDA_AWARE_MPI)
+          if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+            {
+              Assert(std::is_trivial<Number>::value, ExcNotImplemented());
+              cudaMemset(ghost_array.data(),
+                         0,
+                         sizeof(Number) * n_ghost_indices());
+            }
+          else
+#    endif
+            {
 #    ifdef DEAL_II_WITH_CXX17
-          if constexpr (std::is_trivial<Number>::value)
+              if constexpr (std::is_trivial<Number>::value)
 #    else
-          if (std::is_trivial<Number>::value)
+            if (std::is_trivial<Number>::value)
 #    endif
-            std::memset(ghost_array.data(),
-                        0,
-                        sizeof(Number) * n_ghost_indices());
-          else
-            std::fill(ghost_array.data(),
-                      ghost_array.data() + n_ghost_indices(),
-                      0);
+                {
+                  std::memset(ghost_array.data(),
+                              0,
+                              sizeof(Number) * n_ghost_indices());
+                }
+              else
+                std::fill(ghost_array.data(),
+                          ghost_array.data() + n_ghost_indices(),
+                          0);
+            }
         }
 
       // clear the compress requests
index 8e03e013c53f756bf0d3fa22911dce0f0b1b2ba1..1379d6b6e226b749d38f7e85ac2d0592382900f6 100644 (file)
@@ -96,6 +96,7 @@ IF(DEAL_II_WITH_CUDA)
   SET(_separate_src
     ${_separate_src}
     cuda.cu
+    partitioner.cu
   )
 ENDIF()
 
@@ -117,6 +118,7 @@ SET(_inst
   geometric_utilities.inst.in
   mpi.inst.in
   partitioner.inst.in
+  partitioner.cuda.inst.in
   polynomials_rannacher_turek.inst.in
   symmetric_tensor.inst.in
   tensor.inst.in
index 75a45a9df7f20a323674ed22292ba844733d6527..40a1e9cd6c015bf3583d1c1d9856fbdf0355c9a4 100644 (file)
@@ -320,7 +320,8 @@ namespace Utilities
         n_import_indices_data);
       {
         unsigned int             current_index_start = 0;
-        std::vector<MPI_Request> import_requests(import_targets_data.size());
+        std::vector<MPI_Request> import_requests(import_targets_data.size() +
+                                                 n_ghost_targets);
         for (unsigned int i = 0; i < import_targets_data.size(); i++)
           {
             const int ierr =
@@ -336,17 +337,19 @@ namespace Utilities
           }
         AssertDimension(current_index_start, n_import_indices_data);
 
-        // use blocking send for ghost indices stored in expanded_ghost_indices
+        // use non-blocking send for ghost indices stored in
+        // expanded_ghost_indices
         current_index_start = 0;
         for (unsigned int i = 0; i < n_ghost_targets; i++)
           {
             const int ierr =
-              MPI_Send(&expanded_ghost_indices[current_index_start],
-                       ghost_targets_data[i].second,
-                       DEAL_II_DOF_INDEX_MPI_TYPE,
-                       ghost_targets_data[i].first,
-                       my_pid,
-                       communicator);
+              MPI_Isend(&expanded_ghost_indices[current_index_start],
+                        ghost_targets_data[i].second,
+                        DEAL_II_DOF_INDEX_MPI_TYPE,
+                        ghost_targets_data[i].first,
+                        my_pid,
+                        communicator,
+                        &import_requests[import_targets_data.size() + i]);
             AssertThrowMPI(ierr);
             current_index_start += ghost_targets_data[i].second;
           }
diff --git a/source/base/partitioner.cu b/source/base/partitioner.cu
new file mode 100644 (file)
index 0000000..ed02193
--- /dev/null
@@ -0,0 +1,24 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 1999 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/partitioner.h>
+#include <deal.II/base/partitioner.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+// explicit instantiations from .templates.h file
+#include "partitioner.cuda.inst"
+
+DEAL_II_NAMESPACE_CLOSE
diff --git a/source/base/partitioner.cuda.inst.in b/source/base/partitioner.cuda.inst.in
new file mode 100644 (file)
index 0000000..c590984
--- /dev/null
@@ -0,0 +1,36 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+for (SCALAR : MPI_SCALARS)
+  {
+#ifdef DEAL_II_WITH_MPI
+    template void Utilities::MPI::Partitioner::export_to_ghosted_array_start<
+      SCALAR,
+      dealii::MemorySpace::CUDA>(const unsigned int,
+                                 const ArrayView<const SCALAR> &,
+                                 const ArrayView<SCALAR> &,
+                                 const ArrayView<SCALAR> &,
+                                 std::vector<MPI_Request> &) const;
+    template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish<
+      SCALAR,
+      MemorySpace::CUDA>(const VectorOperation::values,
+                         const ArrayView<const SCALAR> &,
+                         const ArrayView<SCALAR> &,
+                         const ArrayView<SCALAR> &,
+                         std::vector<MPI_Request> &) const;
+#endif
+  }
index 5e74ab99c4958dc1e442b3db6433103edf151608..f20ad8a6a516f7e10de97b0a0a17a096e8a4adea 100644 (file)
@@ -19,11 +19,12 @@ for (SCALAR : MPI_SCALARS)
   {
 #ifdef DEAL_II_WITH_MPI
     template void Utilities::MPI::Partitioner::export_to_ghosted_array_start<
-      SCALAR>(const unsigned int,
-              const ArrayView<const SCALAR> &,
-              const ArrayView<SCALAR> &,
-              const ArrayView<SCALAR> &,
-              std::vector<MPI_Request> &) const;
+      SCALAR,
+      MemorySpace::Host>(const unsigned int,
+                         const ArrayView<const SCALAR> &,
+                         const ArrayView<SCALAR> &,
+                         const ArrayView<SCALAR> &,
+                         std::vector<MPI_Request> &) const;
     template void Utilities::MPI::Partitioner::export_to_ghosted_array_finish<
       SCALAR>(const ArrayView<SCALAR> &, std::vector<MPI_Request> &) const;
     template void Utilities::MPI::Partitioner::import_from_ghosted_array_start<
@@ -33,10 +34,11 @@ for (SCALAR : MPI_SCALARS)
               const ArrayView<SCALAR> &,
               std::vector<MPI_Request> &) const;
     template void Utilities::MPI::Partitioner::import_from_ghosted_array_finish<
-      SCALAR>(const VectorOperation::values,
-              const ArrayView<const SCALAR> &,
-              const ArrayView<SCALAR> &,
-              const ArrayView<SCALAR> &,
-              std::vector<MPI_Request> &) const;
+      SCALAR,
+      MemorySpace::Host>(const VectorOperation::values,
+                         const ArrayView<const SCALAR> &,
+                         const ArrayView<SCALAR> &,
+                         const ArrayView<SCALAR> &,
+                         std::vector<MPI_Request> &) const;
 #endif
   }

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.