]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Convert distributed::Vector to Kokkos
authorDaniel Arndt <arndtd@ornl.gov>
Tue, 6 Dec 2022 14:21:50 +0000 (14:21 +0000)
committerDaniel Arndt <arndtd@ornl.gov>
Thu, 8 Dec 2022 22:13:15 +0000 (17:13 -0500)
include/deal.II/lac/la_parallel_vector.templates.h
source/grid/grid_tools.cc
source/lac/vector_memory.cc
source/lac/vector_memory_cuda.cc

index 21b101d18b6dbdb8390532ef5d5bb5e153d69a80..179a15d8b3da1719f44bbc4905d21fd5161067f3 100644 (file)
@@ -87,7 +87,7 @@ namespace LinearAlgebra
       struct la_parallel_vector_templates_functions
       {
         static_assert(std::is_same<MemorySpaceType, MemorySpace::Host>::value ||
-                        std::is_same<MemorySpaceType, MemorySpace::CUDA>::value,
+                        std::is_same<MemorySpaceType, MemorySpace::Device>::value,
                       "MemorySpace should be Host or CUDA");
 
         static void
@@ -316,10 +316,9 @@ namespace LinearAlgebra
         }
       };
 
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
       template <typename Number>
       struct la_parallel_vector_templates_functions<Number,
-                                                    ::dealii::MemorySpace::CUDA>
+                                                    ::dealii::MemorySpace::Device>
       {
         using size_type = types::global_dof_index;
 
@@ -327,7 +326,7 @@ namespace LinearAlgebra
         resize_val(const types::global_dof_index new_alloc_size,
                    types::global_dof_index &     allocated_size,
                    ::dealii::MemorySpace::
-                     MemorySpaceData<Number, ::dealii::MemorySpace::CUDA> &data,
+                     MemorySpaceData<Number, ::dealii::MemorySpace::Device> &data,
                    const MPI_Comm &comm_sm)
         {
           (void)comm_sm;
@@ -362,7 +361,7 @@ namespace LinearAlgebra
                           communication_pattern,
           const IndexSet &locally_owned_elem,
           ::dealii::MemorySpace::MemorySpaceData<Number,
-                                                 ::dealii::MemorySpace::CUDA>
+                                                 ::dealii::MemorySpace::Device>
             &data)
         {
           Assert(
@@ -372,7 +371,7 @@ namespace LinearAlgebra
               "Only VectorOperation::add and VectorOperation::insert are allowed"));
 
           ::dealii::LinearAlgebra::distributed::
-            Vector<Number, ::dealii::MemorySpace::CUDA>
+            Vector<Number, ::dealii::MemorySpace::Device>
               tmp_vector(communication_pattern);
 
           // fill entries from ReadWriteVector into the distributed vector,
@@ -381,99 +380,93 @@ namespace LinearAlgebra
           // and once for operator() of tmp_vector
           const IndexSet &       v_stored   = V.get_stored_elements();
           const size_type        n_elements = v_stored.n_elements();
-          std::vector<size_type> indices(n_elements);
-          for (size_type i = 0; i < n_elements; ++i)
+         Kokkos::DefaultHostExecutionSpace host_exec;
+         Kokkos::View<size_type*, Kokkos::HostSpace> indices("indices", n_elements);
+         Kokkos::parallel_for(Kokkos::RangePolicy<Kokkos::DefaultHostExecutionSpace>(host_exec, 0, n_elements),
+                         KOKKOS_LAMBDA(size_type i) { 
             indices[i] = communication_pattern->global_to_local(
               v_stored.nth_index_in_set(i));
+           });
+         host_exec.fence();
+         
           // Move the indices to the device
-          size_type *indices_dev;
-          ::dealii::Utilities::CUDA::malloc(indices_dev, n_elements);
-          ::dealii::Utilities::CUDA::copy_to_dev(indices, indices_dev);
+          ::dealii::MemorySpace::Device::kokkos_space::execution_space exec;
+         Kokkos::View<size_type *, ::dealii::MemorySpace::Device::kokkos_space> indices_dev("indices_dev", n_elements);
+         Kokkos::deep_copy(exec, indices_dev, indices);
+         exec.fence();
+         
           // Move the data to the device
-          Number *V_dev;
-          ::dealii::Utilities::CUDA::malloc(V_dev, n_elements);
-          cudaError_t cuda_error_code = cudaMemcpy(V_dev,
-                                                   V.begin(),
-                                                   n_elements * sizeof(Number),
-                                                   cudaMemcpyHostToDevice);
-          AssertCuda(cuda_error_code);
+         Kokkos::View<Number *, ::dealii::MemorySpace::Device::kokkos_space> V_dev("V_dev", n_elements);
+         Kokkos::deep_copy(exec, V_dev, Kokkos::View<Number*, Kokkos::HostSpace>(V.begin(), n_elements));
+          exec.fence();
 
           // Set the values in tmp_vector
-          const int n_blocks =
-            1 + n_elements / (::dealii::CUDAWrappers::chunk_size *
-                              ::dealii::CUDAWrappers::block_size);
-          ::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated<Number>
-            <<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
-              indices_dev, tmp_vector.begin(), V_dev, n_elements);
+          Kokkos::parallel_for(
+            Kokkos::RangePolicy<
+              ::dealii::MemorySpace::Device::kokkos_space::execution_space>(
+              exec, 0, n_elements),
+            KOKKOS_LAMBDA(size_type i) {
+              tmp_vector(indices_dev(i)) = V_dev(i);
+            });
+          exec.fence();
 
           tmp_vector.compress(operation);
 
           // Copy the local elements of tmp_vector to the right place in val
           IndexSet        tmp_index_set  = tmp_vector.locally_owned_elements();
           const size_type tmp_n_elements = tmp_index_set.n_elements();
-          indices.resize(tmp_n_elements);
-          for (size_type i = 0; i < tmp_n_elements; ++i)
-            indices[i] = locally_owned_elem.index_within_set(
+         Kokkos::realloc(indices, tmp_n_elements);
+          Kokkos::parallel_for(Kokkos::RangePolicy<Kokkos::DefaultHostExecutionSpace>(host_exec, 0, n_elements),
+                          KOKKOS_LAMBDA(size_type i) {
+          indices[i] = locally_owned_elem.index_within_set(
               tmp_index_set.nth_index_in_set(i));
-          ::dealii::Utilities::CUDA::free(indices_dev);
-          ::dealii::Utilities::CUDA::malloc(indices_dev, tmp_n_elements);
-          ::dealii::Utilities::CUDA::copy_to_dev(indices, indices_dev);
-
-          if (operation == VectorOperation::add)
-            ::dealii::LinearAlgebra::CUDAWrappers::kernel::add_permutated<
-              Number><<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
-              indices_dev,
-              data.values.data(),
-              tmp_vector.begin(),
-              tmp_n_elements);
+            });
+          host_exec.fence();
+         Kokkos::realloc(indices_dev, tmp_n_elements);
+         Kokkos::deep_copy(indices_dev, Kokkos::View<size_type*>(indices.data(), tmp_n_elements));
+
+          if (operation == VectorOperation::add)
+            Kokkos::parallel_for(
+              Kokkos::RangePolicy<
+                ::dealii::MemorySpace::Device::kokkos_space::execution_space>(
+                exec, 0, n_elements),
+              KOKKOS_LAMBDA(size_type i) {
+                data.values(indices_dev(i)) += tmp_vector(i);
+              });
           else
-            ::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated<
-              Number><<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
-              indices_dev,
-              data.values.data(),
-              tmp_vector.begin(),
-              tmp_n_elements);
-
-          ::dealii::Utilities::CUDA::free(indices_dev);
-          ::dealii::Utilities::CUDA::free(V_dev);
+            Kokkos::parallel_for(
+              Kokkos::RangePolicy<
+                ::dealii::MemorySpace::Device::kokkos_space::execution_space>(
+                exec, 0, n_elements),
+              KOKKOS_LAMBDA(size_type i) {
+                data.values(indices_dev(i)) = tmp_vector(i);
+              });
+          exec.fence();
         }
 
         template <typename RealType>
         static void
         linfty_norm_local(const ::dealii::MemorySpace::MemorySpaceData<
                             Number,
-                            ::dealii::MemorySpace::CUDA> &data,
+                            ::dealii::MemorySpace::Device> &data,
                           const unsigned int              size,
                           RealType &                      result)
         {
           static_assert(std::is_same<Number, RealType>::value,
                         "RealType should be the same type as Number");
 
-          Number *    result_device;
-          cudaError_t error_code = cudaMalloc(&result_device, sizeof(Number));
-          AssertCuda(error_code);
-          error_code = cudaMemset(result_device, 0, sizeof(Number));
-
-          const int n_blocks = 1 + size / (::dealii::CUDAWrappers::chunk_size *
-                                           ::dealii::CUDAWrappers::block_size);
-          ::dealii::LinearAlgebra::CUDAWrappers::kernel::reduction<
-            Number,
-            ::dealii::LinearAlgebra::CUDAWrappers::kernel::LInfty<Number>>
-            <<<dim3(n_blocks, 1), dim3(::dealii::CUDAWrappers::block_size)>>>(
-              result_device, data.values.data(), size);
-
-          // Copy the result back to the host
-          error_code = cudaMemcpy(&result,
-                                  result_device,
-                                  sizeof(Number),
-                                  cudaMemcpyDeviceToHost);
-          AssertCuda(error_code);
-          // Free the memory on the device
-          error_code = cudaFree(result_device);
-          AssertCuda(error_code);
+         typename ::dealii::MemorySpace::Device::kokkos_space::execution_space
+            exec;
+          Kokkos::parallel_reduce(
+            Kokkos::RangePolicy<
+              ::dealii::MemorySpace::Device::kokkos_space::execution_space>(
+              exec, 0, size),
+            KOKKOS_LAMBDA(size_type i, RealType & update) {
+              update += Kokkos::abs(data.values(i));
+            },
+            result);
         }
       };
-#endif
     } // namespace internal
 
 
@@ -930,27 +923,13 @@ namespace LinearAlgebra
     void
     Vector<Number, MemorySpaceType>::zero_out_ghost_values() const
     {
-      if (data.values.size() != 0)
-        {
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
-          if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
-            {
-              const cudaError_t cuda_error_code =
-                cudaMemset(data.values.data() +
-                             partitioner->locally_owned_size(),
-                           0,
-                           partitioner->n_ghost_indices() * sizeof(Number));
-              AssertCuda(cuda_error_code);
-            }
-          else
-#endif
-            {
-              std::fill_n(data.values.data() +
-                            partitioner->locally_owned_size(),
-                          partitioner->n_ghost_indices(),
-                          Number());
-            }
-        }
+      Kokkos::pair<size_type, size_type> range(
+        partitioner->locally_owned_size(),
+        partitioner->locally_owned_size() + partitioner->n_ghost_indices());
+      if (data.values_host_buffer.size() > 0)
+        Kokkos::deep_copy(Kokkos::subview(data.values_host_buffer, range), 0);
+      if (data.values.size() > 0)
+        Kokkos::deep_copy(Kokkos::subview(data.values, range), 0);         
 
       vector_is_ghosted = false;
     }
@@ -974,9 +953,8 @@ namespace LinearAlgebra
       // allocate import_data in case it is not set up yet
       if (partitioner->n_import_indices() > 0)
         {
-#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-          if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
+#  if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+          if (std::is_same<MemorySpaceType, dealii::MemorySpace::Device>::value)
             {
               if (import_data.values_host_buffer.size() == 0)
                 Kokkos::resize(import_data.values_host_buffer,
@@ -985,21 +963,14 @@ namespace LinearAlgebra
           else
 #  endif
             {
-#  if !defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-              static_assert(
-                std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
-                "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
-#  endif
               if (import_data.values.size() == 0)
                 Kokkos::resize(import_data.values,
                                partitioner->n_import_indices());
             }
         }
 
-#  if defined DEAL_II_COMPILER_CUDA_AWARE && \
-    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-      if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
+#  if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+      if (std::is_same<MemorySpaceType, dealii::MemorySpace::Device>::value)
         {
           // Move the data to the host and then move it back to the
           // device. We use values to store the elements because the function
@@ -1055,9 +1026,8 @@ namespace LinearAlgebra
 
       // make this function thread safe
       std::lock_guard<std::mutex> lock(mutex);
-#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-      if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+#  if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+      if (std::is_same<MemorySpaceType, MemorySpace::Device>::value)
         {
           Assert(partitioner->n_import_indices() == 0 ||
                    import_data.values_host_buffer.size() != 0,
@@ -1079,12 +1049,7 @@ namespace LinearAlgebra
 
           // The communication is done on the host, so we need to
           // move the data back to the device.
-          cudaError_t cuda_error_code =
-            cudaMemcpy(data.values.data(),
-                       data.values_host_buffer.data(),
-                       allocated_size * sizeof(Number),
-                       cudaMemcpyHostToDevice);
-          AssertCuda(cuda_error_code);
+          Kokkos::deep_copy(data.values, data.values_host_buffer);
 
           Kokkos::resize(data.values_host_buffer, 0);
         }
@@ -1132,9 +1097,8 @@ namespace LinearAlgebra
       // allocate import_data in case it is not set up yet
       if (partitioner->n_import_indices() > 0)
         {
-#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-          if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+#  if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+          if (std::is_same<MemorySpaceType, MemorySpace::Device>::value)
             {
               if (import_data.values_host_buffer.size() == 0)
                 Kokkos::resize(import_data.values_host_buffer,
@@ -1154,9 +1118,8 @@ namespace LinearAlgebra
             }
         }
 
-#  if defined DEAL_II_COMPILER_CUDA_AWARE && \
-    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-      if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+#  if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+      if (std::is_same<MemorySpaceType, MemorySpace::Device>::value)
         {
           // Move the data to the host and then move it back to the
           // device. We use values to store the elements because the function
@@ -1217,9 +1180,8 @@ namespace LinearAlgebra
           // make this function thread safe
           std::lock_guard<std::mutex> lock(mutex);
 
-#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-          if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
+#  if !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+          if (std::is_same<MemorySpaceType, MemorySpace::Device>::value)
             {
               partitioner->export_to_ghosted_array_finish(
                 ArrayView<Number, MemorySpace::Host>(
@@ -1230,14 +1192,11 @@ namespace LinearAlgebra
 
               // The communication is done on the host, so we need to
               // move the data back to the device.
-              cudaError_t cuda_error_code =
-                cudaMemcpy(data.values.data() +
-                             partitioner->locally_owned_size(),
-                           data.values_host_buffer.data() +
-                             partitioner->locally_owned_size(),
-                           partitioner->n_ghost_indices() * sizeof(Number),
-                           cudaMemcpyHostToDevice);
-              AssertCuda(cuda_error_code);
+             auto range = Kokkos::make_pair(
+                partitioner->locally_owned_size(),
+                partitioner->locally_owned_size() + partitioner->n_ghost_indices());
+              Kokkos::deep_copy(Kokkos::subview(data.values, range),
+                            Kokkos::subview(data.values_host_buffer, range));
 
               Kokkos::resize(data.values_host_buffer, 0);
             }
index dc2aa6242ae79d109a3b25b279ca7d145c9e66cd..c32cc0758e5fe55cb148c8a97a4d3b93e7923468 100644 (file)
@@ -2973,10 +2973,11 @@ namespace GridTools
                 else
                   marked = [](const ValueType &) -> bool { return true; };
 
+               auto bla = boost::geometry::index::nearest(p, 1);
+               auto blub = boost::geometry::index::satisfies(marked);
                 std::vector<std::pair<Point<spacedim>, unsigned int>> res;
                 used_vertices_rtree.query(
-                  boost::geometry::index::nearest(p, 1) &&
-                    boost::geometry::index::satisfies(marked),
+                               bla && blub,
                   std::back_inserter(res));
 
                 // Searching for a point which is located outside the
index 461c1cf50c28440efbc24a35a712f2d5bede00bd..98d71dae83a989b98ea96978360c70c45c70726c 100644 (file)
 DEAL_II_NAMESPACE_OPEN
 
 #include "vector_memory.inst"
+template class VectorMemory<
+  LinearAlgebra::distributed::Vector<float, MemorySpace::Device>>;
+template class VectorMemory<
+  LinearAlgebra::distributed::Vector<double, MemorySpace::Device>>;
+template class GrowingVectorMemory<
+  LinearAlgebra::distributed::Vector<float, MemorySpace::Device>>;
+template class GrowingVectorMemory<
+  LinearAlgebra::distributed::Vector<double, MemorySpace::Device>>;
 
 namespace internal
 {
@@ -44,6 +52,12 @@ namespace internal
     release_all_unused_memory()
     {
 #include "vector_memory_release.inst"
+             dealii::GrowingVectorMemory<
+        dealii::LinearAlgebra::distributed::Vector<float, MemorySpace::Device>>::
+        release_unused_memory();
+      dealii::GrowingVectorMemory<
+        dealii::LinearAlgebra::distributed::Vector<double, MemorySpace::Device>>::
+        release_unused_memory();
 #ifdef DEAL_II_WITH_CUDA
       release_all_unused_cuda_memory();
 #endif
index 147c6ac8c536039ea5e91b5eb2d22c31e7af588e..c8f05f8381faf52c4f797ee7b45447812645e7e5 100644 (file)
@@ -24,14 +24,6 @@ template class VectorMemory<LinearAlgebra::CUDAWrappers::Vector<float>>;
 template class VectorMemory<LinearAlgebra::CUDAWrappers::Vector<double>>;
 template class GrowingVectorMemory<LinearAlgebra::CUDAWrappers::Vector<float>>;
 template class GrowingVectorMemory<LinearAlgebra::CUDAWrappers::Vector<double>>;
-template class VectorMemory<
-  LinearAlgebra::distributed::Vector<float, MemorySpace::CUDA>>;
-template class VectorMemory<
-  LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>>;
-template class GrowingVectorMemory<
-  LinearAlgebra::distributed::Vector<float, MemorySpace::CUDA>>;
-template class GrowingVectorMemory<
-  LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>>;
 
 namespace internal
 {
@@ -44,12 +36,6 @@ namespace internal
         float>>::release_unused_memory();
       dealii::GrowingVectorMemory<dealii::LinearAlgebra::CUDAWrappers::Vector<
         double>>::release_unused_memory();
-      dealii::GrowingVectorMemory<
-        dealii::LinearAlgebra::distributed::Vector<float, MemorySpace::CUDA>>::
-        release_unused_memory();
-      dealii::GrowingVectorMemory<
-        dealii::LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>>::
-        release_unused_memory();
     }
   } // namespace GrowingVectorMemoryImplementation
 } // namespace internal

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.