]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Rename variables
authorDaniel Arndt <arndtd@ornl.gov>
Thu, 1 Dec 2022 22:15:38 +0000 (22:15 +0000)
committerDaniel Arndt <arndtd@ornl.gov>
Thu, 8 Dec 2022 22:13:15 +0000 (17:13 -0500)
include/deal.II/base/memory_space_data.h
include/deal.II/lac/la_parallel_vector.h
include/deal.II/lac/la_parallel_vector.templates.h
include/deal.II/lac/vector_operations_internal.h

index 6daecffd5ac465f141394880e3cd7e964352256a..cc6d03a182f05b0242793978b2236d58f3de933e 100644 (file)
@@ -121,14 +121,14 @@ namespace MemorySpace
     using MemorySpace = Host;
 
     MemorySpaceData()
-    : values((dealii::Impl::ensure_kokkos_initialized(),
+    : values_dev((dealii::Impl::ensure_kokkos_initialized(),
               Kokkos::View<T *, Kokkos::HostSpace>("host data", 0)))
     {}
 
     void
     copy_to(T *begin, std::size_t n_elements)
     {
-      Assert(n_elements <= values.extent(0),
+      Assert(n_elements <= values_dev.extent(0),
              ExcMessage("n_elements greater than the size of values."));
       using ExecutionSpace = typename MemorySpace::kokkos_space::execution_space;
       Kokkos::
@@ -137,14 +137,14 @@ namespace MemorySpace
       Kokkos::deep_copy(
         ExecutionSpace{},
         begin_view,
-        Kokkos::subview(values, Kokkos::make_pair(std::size_t(0), n_elements)));
+        Kokkos::subview(values_dev, Kokkos::make_pair(std::size_t(0), n_elements)));
       ExecutionSpace{}.fence();
     }
 
     void
     copy_from(T *begin, std::size_t n_elements)
     {
-      Assert(n_elements <= values.extent(0),
+      Assert(n_elements <= values_dev.extent(0),
              ExcMessage("n_elements greater than the size of values."));
       using ExecutionSpace = typename MemorySpace::kokkos_space::execution_space;
       Kokkos::View<const T *,
@@ -153,14 +153,14 @@ namespace MemorySpace
         begin_view(begin, n_elements);
       Kokkos::deep_copy(
         ExecutionSpace{},
-        Kokkos::subview(values, Kokkos::make_pair(std::size_t(0), n_elements)),
+        Kokkos::subview(values_dev, Kokkos::make_pair(std::size_t(0), n_elements)),
         begin_view);
       ExecutionSpace{}.fence();
     }
 
-    Kokkos::View<T *, Kokkos::HostSpace> values;
-
     // unused
+    Kokkos::View<T *, Kokkos::HostSpace> values_host_buffer;
+
     Kokkos::View<T *, typename MemorySpace::kokkos_space> values_dev;
 
     std::shared_ptr<T> values_sm_ptr;
@@ -186,7 +186,7 @@ namespace MemorySpace
     using MemorySpace = Device;
 
     MemorySpaceData()
-    : values((dealii::Impl::ensure_kokkos_initialized(),
+    : values_host_buffer((dealii::Impl::ensure_kokkos_initialized(),
               Kokkos::View<T *, Kokkos::HostSpace>("host data", 0)))
     , values_dev(Kokkos::View<T *, typename MemorySpace::kokkos_space>(
         "memoryspace data",
@@ -226,7 +226,7 @@ namespace MemorySpace
       ExecutionSpace{}.fence();
     }
 
-    Kokkos::View<T *, Kokkos::HostSpace> values;
+    Kokkos::View<T *, Kokkos::HostSpace> values_host_buffer;
     
     Kokkos::View<T *, typename MemorySpace::kokkos_space> values_dev;
     
index 6b7e23f468a74315a436e92af9e89e2befeee18a..09bcefad0d8b61d00d124fe7c739c66f1d553106 100644 (file)
@@ -1474,7 +1474,7 @@ namespace LinearAlgebra
           begin(::dealii::MemorySpace::
                   MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
         {
-          return data.values.data();
+          return data.values_dev.data();
         }
 
         static inline
@@ -1482,14 +1482,14 @@ namespace LinearAlgebra
           begin(const ::dealii::MemorySpace::
                   MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
         {
-          return data.values.data();
+          return data.values_dev.data();
         }
 
         static inline Number *
         get_values(::dealii::MemorySpace::
                      MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
         {
-          return data.values.data();
+          return data.values_dev.data();
         }
       };
 
@@ -1652,7 +1652,7 @@ namespace LinearAlgebra
                vector_is_ghosted == true,
              ExcMessage("You tried to read a ghost element of this vector, "
                         "but it has not imported its ghost values."));
-      return data.values[partitioner->global_to_local(global_index)];
+      return data.values_dev[partitioner->global_to_local(global_index)];
     }
 
 
@@ -1679,7 +1679,7 @@ namespace LinearAlgebra
       // (then, the compiler picks this method according to the C++ rule book
       // even if a human would pick the const method when this subsequent use
       // is just a read)
-      return data.values[partitioner->global_to_local(global_index)];
+      return data.values_dev[partitioner->global_to_local(global_index)];
     }
 
 
@@ -1718,7 +1718,7 @@ namespace LinearAlgebra
              ExcMessage("You tried to read a ghost element of this vector, "
                         "but it has not imported its ghost values."));
 
-      return data.values[local_index];
+      return data.values_dev[local_index];
     }
 
 
@@ -1735,7 +1735,7 @@ namespace LinearAlgebra
                        partitioner->locally_owned_size() +
                          partitioner->n_ghost_indices());
 
-      return data.values[local_index];
+      return data.values_dev[local_index];
     }
 
 
index b6fd2efb7b35b7483db8197e31131e055f8d1c7d..78298e8e5bb258d45ef8bcbe7df46ca01c9d73f3 100644 (file)
@@ -135,12 +135,12 @@ namespace LinearAlgebra
         {
           if (comm_shared == MPI_COMM_SELF)
             {
-             Kokkos::resize(data.values, new_alloc_size);          
+             Kokkos::resize(data.values_dev, new_alloc_size);              
 
               allocated_size = new_alloc_size;
 
               data.values_sm = {
-                ArrayView<const Number>(data.values.data(), new_alloc_size)};
+                ArrayView<const Number>(data.values_dev.data(), new_alloc_size)};
             }
           else
             {
@@ -224,7 +224,7 @@ namespace LinearAlgebra
                 data.values_sm[i] =
                   ArrayView<const Number>(others[i], new_alloc_sizes[i]);
 
-              data.values =
+              data.values_dev =
                 Kokkos::View<Number *,
                              Kokkos::HostSpace,
                              Kokkos::MemoryTraits<Kokkos::Unmanaged>>(
@@ -312,7 +312,7 @@ namespace LinearAlgebra
         {
           for (size_type i = 0; i < size; ++i)
             max =
-              std::max(numbers::NumberTraits<Number>::abs(data.values[i]), max);
+              std::max(numbers::NumberTraits<Number>::abs(data.values_dev[i]), max);
         }
       };
 
@@ -528,7 +528,7 @@ namespace LinearAlgebra
       resize_val(size, comm_sm);
 
       // delete previous content in import data
-      Kokkos::resize(import_data.values, 0);
+      Kokkos::resize(import_data.values_host_buffer, 0);
       Kokkos::resize(import_data.values_dev, 0);
 
       // set partitioner to serial version
@@ -559,7 +559,7 @@ namespace LinearAlgebra
       resize_val(local_size + ghost_size, comm_sm);
 
       // delete previous content in import data
-      Kokkos::resize(import_data.values, 0);
+      Kokkos::resize(import_data.values_host_buffer, 0);
       Kokkos::resize(import_data.values_dev, 0);
 
       // create partitioner
@@ -605,7 +605,7 @@ namespace LinearAlgebra
       // is only used as temporary storage for compress() and
       // update_ghost_values, and we might have vectors where we never
       // call these methods and hence do not need to have the storage.
-      Kokkos::resize(import_data.values, 0);
+      Kokkos::resize(import_data.values_host_buffer, 0);
       Kokkos::resize(import_data.values_dev, 0);
 
       thread_loop_partitioner = v.thread_loop_partitioner;
@@ -668,7 +668,7 @@ namespace LinearAlgebra
       // is only used as temporary storage for compress() and
       // update_ghost_values, and we might have vectors where we never
       // call these methods and hence do not need to have the storage.
-      Kokkos::resize(import_data.values, 0);
+      Kokkos::resize(import_data.values_host_buffer, 0);
       Kokkos::resize(import_data.values_dev, 0);
 
       vector_is_ghosted = false;
@@ -930,12 +930,10 @@ namespace LinearAlgebra
     void
     Vector<Number, MemorySpaceType>::zero_out_ghost_values() const
     {
-      if (data.values.size() != 0)
-        std::fill_n(data.values.data() + partitioner->locally_owned_size(),
-                    partitioner->n_ghost_indices(),
-                    Number());
-#ifdef DEAL_II_COMPILER_CUDA_AWARE
       if (data.values_dev.size() != 0)
+      {
+#ifdef DEAL_II_COMPILER_CUDA_AWARE
+        if (std::is_same_v<MemorySpaceType, MemorySpace::CUDA>)
         {
           const cudaError_t cuda_error_code =
             cudaMemset(data.values_dev.data() +
@@ -944,7 +942,14 @@ namespace LinearAlgebra
                        partitioner->n_ghost_indices() * sizeof(Number));
           AssertCuda(cuda_error_code);
         }
+       else
 #endif
+       {
+          std::fill_n(data.values_dev.data() + partitioner->locally_owned_size(),
+                    partitioner->n_ghost_indices(),
+                    Number());
+        }
+      }
 
       vector_is_ghosted = false;
     }
@@ -969,11 +974,11 @@ namespace LinearAlgebra
       if (partitioner->n_import_indices() > 0)
         {
 #  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
           if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
             {
-              if (import_data.values_dev.size() == 0)
-                Kokkos::resize(import_data.values_dev, partitioner->n_import_indices());
+              if (import_data.values_host_buffer.size() == 0)
+                Kokkos::resize(import_data.values_host_buffer, partitioner->n_import_indices());
             }
           else
 #  endif
@@ -984,8 +989,8 @@ namespace LinearAlgebra
                 std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
                 "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
 #  endif
-              if (import_data.values.size() == 0)
-                Kokkos::resize(import_data.values, partitioner->n_import_indices());
+              if (import_data.values_dev.size() == 0)
+                Kokkos::resize(import_data.values_dev, partitioner->n_import_indices());
             }
         }
 
@@ -997,22 +1002,15 @@ namespace LinearAlgebra
           // device. We use values to store the elements because the function
           // uses a view of the array and thus we need the data on the host to
           // outlive the scope of the function.
-          data.values = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev);
-        }
-#  endif
-
-#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-      if (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value)
-        {
+          data.values_host_buffer = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev);
           partitioner->import_from_ghosted_array_start(
             operation,
             communication_channel,
-            ArrayView<Number, MemorySpace::CUDA>(
-              data.values_dev.data() + partitioner->locally_owned_size(),
+            ArrayView<Number, MemorySpace::Host>(
+              data.values_host_buffer.data() + partitioner->locally_owned_size(),
               partitioner->n_ghost_indices()),
-            ArrayView<Number, MemorySpace::CUDA>(
-              import_data.values_dev.data(), partitioner->n_import_indices()),
+            ArrayView<Number, MemorySpace::Host>(
+              import_data.values_host_buffer.data(), partitioner->n_import_indices()),
             compress_requests);
         }
       else
@@ -1021,11 +1019,11 @@ namespace LinearAlgebra
           partitioner->import_from_ghosted_array_start(
             operation,
             communication_channel,
-            ArrayView<Number, MemorySpace::Host>(
-              data.values.data() + partitioner->locally_owned_size(),
+            ArrayView<Number, MemorySpaceType>(
+              data.values_dev.data() + partitioner->locally_owned_size(),
               partitioner->n_ghost_indices()),
-            ArrayView<Number, MemorySpace::Host>(
-              import_data.values.data(), partitioner->n_import_indices()),
+            ArrayView<Number, MemorySpaceType>(
+              import_data.values_dev.data(), partitioner->n_import_indices()),
             compress_requests);
         }
 #else
@@ -1051,59 +1049,54 @@ namespace LinearAlgebra
       // make this function thread safe
       std::lock_guard<std::mutex> lock(mutex);
 #  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
       if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
         {
           Assert(partitioner->n_import_indices() == 0 ||
-                   import_data.values_dev.size() != 0,
-                 ExcNotInitialized());
-          partitioner
-            ->import_from_ghosted_array_finish<Number, MemorySpace::CUDA>(
-              operation,
-              ArrayView<const Number, MemorySpace::CUDA>(
-                import_data.values_dev.data(), partitioner->n_import_indices()),
-              ArrayView<Number, MemorySpace::CUDA>(
-                data.values_dev.data(), partitioner->locally_owned_size()),
-              ArrayView<Number, MemorySpace::CUDA>(
-                data.values_dev.data() + partitioner->locally_owned_size(),
-                partitioner->n_ghost_indices()),
-              compress_requests);
-        }
-      else
-#  endif
-        {
-          Assert(partitioner->n_import_indices() == 0 ||
-                   import_data.values.size() != 0,
+                   import_data.values_host_buffer.size() != 0,
                  ExcNotInitialized());
           partitioner
             ->import_from_ghosted_array_finish<Number, MemorySpace::Host>(
               operation,
               ArrayView<const Number, MemorySpace::Host>(
-                import_data.values.data(), partitioner->n_import_indices()),
+                import_data.values_host_buffer.data(), partitioner->n_import_indices()),
               ArrayView<Number, MemorySpace::Host>(
-                data.values.data(), partitioner->locally_owned_size()),
+                data.values_host_buffer.data(), partitioner->locally_owned_size()),
               ArrayView<Number, MemorySpace::Host>(
-                data.values.data() + partitioner->locally_owned_size(),
+                data.values_host_buffer.data() + partitioner->locally_owned_size(),
                 partitioner->n_ghost_indices()),
               compress_requests);
-        }
 
-#  if defined DEAL_II_COMPILER_CUDA_AWARE && \
-    !defined  DEAL_II_MPI_WITH_CUDA_SUPPORT
-      // The communication is done on the host, so we need to
-      // move the data back to the device.
-      if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
-        {
-          cudaError_t cuda_error_code =
+         // The communication is done on the host, so we need to
+          // move the data back to the device.
+         cudaError_t cuda_error_code =
             cudaMemcpy(data.values_dev.data(),
-                       data.values.data(),
+                       data.values_host_buffer.data(),
                        allocated_size * sizeof(Number),
                        cudaMemcpyHostToDevice);
           AssertCuda(cuda_error_code);
 
-         Kokkos::resize(data.values, 0);
+          Kokkos::resize(data.values_host_buffer, 0);
         }
+      else
 #  endif
+        {
+          Assert(partitioner->n_import_indices() == 0 ||
+                   import_data.values_dev.size() != 0,
+                 ExcNotInitialized());
+          partitioner
+            ->import_from_ghosted_array_finish<Number, MemorySpaceType>(
+              operation,
+              ArrayView<const Number, MemorySpaceType>(
+                import_data.values_dev.data(), partitioner->n_import_indices()),
+              ArrayView<Number, MemorySpaceType>(
+                data.values_dev.data(), partitioner->locally_owned_size()),
+              ArrayView<Number, MemorySpaceType>(
+                data.values_dev.data() + partitioner->locally_owned_size(),
+                partitioner->n_ghost_indices()),
+              compress_requests);
+        }
+
 #else
       (void)operation;
 #endif
@@ -1130,22 +1123,22 @@ namespace LinearAlgebra
       if (partitioner->n_import_indices() > 0)
         {
 #  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-    defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-               Assert(
-            (std::is_same<MemorySpaceType, dealii::MemorySpace::CUDA>::value),
-            ExcMessage(
-              "Using MemorySpace::CUDA only allowed if the code is compiled with a CUDA compiler!"));
-          if (import_data.values_dev.size() == 0)
-            Kokkos::resize(import_data.values_dev, partitioner->n_import_indices());
-#  else
+    !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+                    if (std::is_same_v<MemorySpaceType, MemorySpace::CUDA>) {
+          if (import_data.values_host_buffer.size() == 0)
+            Kokkos::resize(import_data.values_host_buffer, partitioner->n_import_indices());
+                    }
+                    else
+#  endif
+                    {
 #    ifdef DEAL_II_MPI_WITH_CUDA_SUPPORT
           static_assert(
             std::is_same<MemorySpaceType, dealii::MemorySpace::Host>::value,
             "This code path should only be compiled for CUDA-aware-MPI for MemorySpace::Host!");
 #    endif
-          if (import_data.values.size() == 0)
-            Kokkos::resize(import_data.values, partitioner->n_import_indices());
-#  endif
+          if (import_data.values_dev.size() == 0)
+            Kokkos::resize(import_data.values_dev, partitioner->n_import_indices());
+                    }
         }
 
 #  if defined DEAL_II_COMPILER_CUDA_AWARE && \
@@ -1155,38 +1148,32 @@ namespace LinearAlgebra
        // device. We use values to store the elements because the function
        // uses a view of the array and thus we need the data on the host to
        // outlive the scope of the function.
-       data.values = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev);
-     }
-#  endif
-
-#  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-        defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
-          if (std::is_same_v<MemorySpaceType, MemorySpace::CUDA>) {
- partitioner->export_to_ghosted_array_start<Number, MemorySpace::CUDA>(
+       data.values_host_buffer = Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, data.values_dev);
+       
+       partitioner->export_to_ghosted_array_start<Number, MemorySpace::Host>(
         communication_channel,
-        ArrayView<const Number, MemorySpace::CUDA>(
-          data.values_dev.data(), partitioner->locally_owned_size()),
-        ArrayView<Number, MemorySpace::CUDA>(import_data.values_dev.data(),
+        ArrayView<const Number, MemorySpace::Host>(
+          data.values_host_buffer.data(), partitioner->locally_owned_size()),
+        ArrayView<Number, MemorySpace::Host>(import_data.values_host_buffer.data(),
                                              partitioner->n_import_indices()),
-        ArrayView<Number, MemorySpace::CUDA>(
-          data.values_dev.data() + partitioner->locally_owned_size(),
+        ArrayView<Number, MemorySpace::Host>(
+          data.values_host_buffer.data() + partitioner->locally_owned_size(),
           partitioner->n_ghost_indices()),
         update_ghost_values_requests);
     } else
-#else
+#endif
  {
-      partitioner->export_to_ghosted_array_start<Number, MemorySpace::Host>(
+      partitioner->export_to_ghosted_array_start<Number, MemorySpaceType>(
         communication_channel,
-        ArrayView<const Number, MemorySpace::Host>(
-          data.values.data(), partitioner->locally_owned_size()),
-        ArrayView<Number, MemorySpace::Host>(import_data.values.data(),
+        ArrayView<const Number, MemorySpaceType>(
+          data.values_dev.data(), partitioner->locally_owned_size()),
+        ArrayView<Number, MemorySpaceType>(import_data.values_dev.data(),
                                              partitioner->n_import_indices()),
-        ArrayView<Number, MemorySpace::Host>(
-          data.values.data() + partitioner->locally_owned_size(),
+        ArrayView<Number, MemorySpaceType>(
+          data.values_dev.data() + partitioner->locally_owned_size(),
           partitioner->n_ghost_indices()),
         update_ghost_values_requests);
  }
-#  endif
 
 #else
       (void)communication_channel;
@@ -1211,43 +1198,36 @@ namespace LinearAlgebra
           std::lock_guard<std::mutex> lock(mutex);
 
 #  if defined(DEAL_II_COMPILER_CUDA_AWARE) && \
-        defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
+        !defined(DEAL_II_MPI_WITH_CUDA_SUPPORT)
                if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
                {
           partitioner->export_to_ghosted_array_finish(
-            ArrayView<Number, MemorySpace::CUDA>(
-              data.values_dev.data() + partitioner->locally_owned_size(),
-              partitioner->n_ghost_indices()),
-            update_ghost_values_requests);     
-               } else
-#else
-               {
-          partitioner->export_to_ghosted_array_finish(
             ArrayView<Number, MemorySpace::Host>(
-              data.values.data() + partitioner->locally_owned_size(),
+              data.values_host_buffer.data() + partitioner->locally_owned_size(),
               partitioner->n_ghost_indices()),
             update_ghost_values_requests);
-               }
-#  endif
-        }
 
-#  if defined DEAL_II_COMPILER_CUDA_AWARE && \
-    !defined  DEAL_II_MPI_WITH_CUDA_SUPPORT
-      // The communication is done on the host, so we need to
+// The communication is done on the host, so we need to
       // move the data back to the device.
-      if (std::is_same<MemorySpaceType, MemorySpace::CUDA>::value)
-        {
-          cudaError_t cuda_error_code =
+ cudaError_t cuda_error_code =
             cudaMemcpy(data.values_dev.data() +
                          partitioner->locally_owned_size(),
-                       data.values.data() + partitioner->locally_owned_size(),
+                       data.values_host_buffer.data() + partitioner->locally_owned_size(),
                        partitioner->n_ghost_indices() * sizeof(Number),
                        cudaMemcpyHostToDevice);
           AssertCuda(cuda_error_code);
 
-          Kokkos::resize(data.values, 0);
+          Kokkos::resize(data.values_host_buffer, 0);     
+               } else
+#endif
+               {
+          partitioner->export_to_ghosted_array_finish(
+            ArrayView<Number, MemorySpaceType>(
+              data.values_dev.data() + partitioner->locally_owned_size(),
+              partitioner->n_ghost_indices()),
+            update_ghost_values_requests);
+               }
         }
-#  endif
 
 #endif
       vector_is_ghosted = true;
@@ -2053,7 +2033,7 @@ namespace LinearAlgebra
       if (partitioner.use_count() > 0)
         memory +=
           partitioner->memory_consumption() / partitioner.use_count() + 1;
-      if (import_data.values.size() != 0 || import_data.values_dev.size() != 0)
+      if (import_data.values_host_buffer.size() != 0 || import_data.values_dev.size() != 0)
         memory += (static_cast<std::size_t>(partitioner->n_import_indices()) *
                    sizeof(Number));
       return memory;
index 20cb26af29f7e4412da36f952c7f9c3fe3f22df3..80754c2412513d6dc8fb09e9bda19228d0cfd572 100644 (file)
@@ -1734,8 +1734,8 @@ namespace internal
                                                   ::dealii::MemorySpace::Host>
              &data)
       {
-        Vector_copy<Number, Number2> copier(v_data.values.data(),
-                                            data.values.data());
+        Vector_copy<Number, Number2> copier(v_data.values_dev.data(),
+                                            data.values_dev.data());
         parallel_for(copier, 0, size, thread_loop_partitioner);
       }
 
@@ -1748,7 +1748,7 @@ namespace internal
                                                  ::dealii::MemorySpace::Host>
             &data)
       {
-        Vector_set<Number> setter(s, data.values.data());
+        Vector_set<Number> setter(s, data.values_dev.data());
         parallel_for(setter, 0, size, thread_loop_partitioner);
       }
 
@@ -1763,8 +1763,8 @@ namespace internal
                                                ::dealii::MemorySpace::Host>
           &data)
       {
-        Vectorization_add_v<Number> vector_add(data.values.data(),
-                                               v_data.values.data());
+        Vectorization_add_v<Number> vector_add(data.values_dev.data(),
+                                               v_data.values_dev.data());
         parallel_for(vector_add, 0, size, thread_loop_partitioner);
       }
 
@@ -1779,8 +1779,8 @@ namespace internal
                                                ::dealii::MemorySpace::Host>
           &data)
       {
-        Vectorization_subtract_v<Number> vector_subtract(data.values.data(),
-                                                         v_data.values.data());
+        Vectorization_subtract_v<Number> vector_subtract(data.values_dev.data(),
+                                                         v_data.values_dev.data());
         parallel_for(vector_subtract, 0, size, thread_loop_partitioner);
       }
 
@@ -1794,7 +1794,7 @@ namespace internal
                                                ::dealii::MemorySpace::Host>
           &data)
       {
-        Vectorization_add_factor<Number> vector_add(data.values.data(), a);
+        Vectorization_add_factor<Number> vector_add(data.values_dev.data(), a);
         parallel_for(vector_add, 0, size, thread_loop_partitioner);
       }
 
@@ -1809,8 +1809,8 @@ namespace internal
                                                     ::dealii::MemorySpace::Host>
                &data)
       {
-        Vectorization_add_av<Number> vector_add(data.values.data(),
-                                                v_data.values.data(),
+        Vectorization_add_av<Number> vector_add(data.values_dev.data(),
+                                                v_data.values_dev.data(),
                                                 a);
         parallel_for(vector_add, 0, size, thread_loop_partitioner);
       }
@@ -1831,7 +1831,7 @@ namespace internal
           &data)
       {
         Vectorization_add_avpbw<Number> vector_add(
-          data.values.data(), v_data.values.data(), w_data.values.data(), a, b);
+          data.values_dev.data(), v_data.values_dev.data(), w_data.values_dev.data(), a, b);
         parallel_for(vector_add, 0, size, thread_loop_partitioner);
       }
 
@@ -1847,8 +1847,8 @@ namespace internal
                                                ::dealii::MemorySpace::Host>
           &data)
       {
-        Vectorization_sadd_xv<Number> vector_sadd(data.values.data(),
-                                                  v_data.values.data(),
+        Vectorization_sadd_xv<Number> vector_sadd(data.values_dev.data(),
+                                                  v_data.values_dev.data(),
                                                   x);
         parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
       }
@@ -1866,8 +1866,8 @@ namespace internal
                                                ::dealii::MemorySpace::Host>
           &data)
       {
-        Vectorization_sadd_xav<Number> vector_sadd(data.values.data(),
-                                                   v_data.values.data(),
+        Vectorization_sadd_xav<Number> vector_sadd(data.values_dev.data(),
+                                                   v_data.values_dev.data(),
                                                    a,
                                                    x);
         parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
@@ -1890,7 +1890,7 @@ namespace internal
           &data)
       {
         Vectorization_sadd_xavbw<Number> vector_sadd(
-          data.values.data(), v_data.values.data(), w_data.values.data(), x, a, b);
+          data.values_dev.data(), v_data.values_dev.data(), w_data.values_dev.data(), x, a, b);
         parallel_for(vector_sadd, 0, size, thread_loop_partitioner);
       }
 
@@ -1904,7 +1904,7 @@ namespace internal
                                                ::dealii::MemorySpace::Host>
           &data)
       {
-        Vectorization_multiply_factor<Number> vector_multiply(data.values.data(),
+        Vectorization_multiply_factor<Number> vector_multiply(data.values_dev.data(),
                                                               factor);
         parallel_for(vector_multiply, 0, size, thread_loop_partitioner);
       }
@@ -1919,8 +1919,8 @@ namespace internal
                                                    ::dealii::MemorySpace::Host>
               &data)
       {
-        Vectorization_scale<Number> vector_scale(data.values.data(),
-                                                 v_data.values.data());
+        Vectorization_scale<Number> vector_scale(data.values_dev.data(),
+                                                 v_data.values_dev.data());
         parallel_for(vector_scale, 0, size, thread_loop_partitioner);
       }
 
@@ -1935,8 +1935,8 @@ namespace internal
                                                     ::dealii::MemorySpace::Host>
                &data)
       {
-        Vectorization_equ_au<Number> vector_equ(data.values.data(),
-                                                v_data.values.data(),
+        Vectorization_equ_au<Number> vector_equ(data.values_dev.data(),
+                                                v_data.values_dev.data(),
                                                 a);
         parallel_for(vector_equ, 0, size, thread_loop_partitioner);
       }
@@ -1957,7 +1957,7 @@ namespace internal
           &data)
       {
         Vectorization_equ_aubv<Number> vector_equ(
-          data.values.data(), v_data.values.data(), w_data.values.data(), a, b);
+          data.values_dev.data(), v_data.values_dev.data(), w_data.values_dev.data(), a, b);
         parallel_for(vector_equ, 0, size, thread_loop_partitioner);
       }
 
@@ -1973,7 +1973,7 @@ namespace internal
       {
         Number                                                   sum;
         dealii::internal::VectorOperations::Dot<Number, Number2> dot(
-          data.values.data(), v_data.values.data());
+          data.values_dev.data(), v_data.values_dev.data());
         dealii::internal::VectorOperations::parallel_reduce(
           dot, 0, size, sum, thread_loop_partitioner);
         AssertIsFinite(sum);
@@ -1991,7 +1991,7 @@ namespace internal
                                                     ::dealii::MemorySpace::Host>
                &data)
       {
-        Norm2<Number, real_type> norm2(data.values.data());
+        Norm2<Number, real_type> norm2(data.values_dev.data());
         parallel_reduce(norm2, 0, size, sum, thread_loop_partitioner);
       }
 
@@ -2004,7 +2004,7 @@ namespace internal
           MemorySpaceData<Number, ::dealii::MemorySpace::Host> &data)
       {
         Number            sum;
-        MeanValue<Number> mean(data.values.data());
+        MeanValue<Number> mean(data.values_dev.data());
         parallel_reduce(mean, 0, size, sum, thread_loop_partitioner);
 
         return sum;
@@ -2020,7 +2020,7 @@ namespace internal
                                                     ::dealii::MemorySpace::Host>
                &data)
       {
-        Norm1<Number, real_type> norm1(data.values.data());
+        Norm1<Number, real_type> norm1(data.values_dev.data());
         parallel_reduce(norm1, 0, size, sum, thread_loop_partitioner);
       }
 
@@ -2035,7 +2035,7 @@ namespace internal
                                                     ::dealii::MemorySpace::Host>
                &data)
       {
-        NormP<Number, real_type> normp(data.values.data(), p);
+        NormP<Number, real_type> normp(data.values_dev.data(), p);
         parallel_reduce(normp, 0, size, sum, thread_loop_partitioner);
       }
 
@@ -2054,9 +2054,9 @@ namespace internal
           &data)
       {
         Number            sum;
-        AddAndDot<Number> adder(data.values.data(),
-                                v_data.values.data(),
-                                w_data.values.data(),
+        AddAndDot<Number> adder(data.values_dev.data(),
+                                v_data.values_dev.data(),
+                                w_data.values_dev.data(),
                                 a);
         parallel_reduce(adder, 0, size, sum, thread_loop_partitioner);
 
@@ -2112,7 +2112,7 @@ namespace internal
       {
         if (operation == VectorOperation::insert)
           {
-            cudaError_t cuda_error_code = cudaMemcpy(data.values.data(),
+            cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.data(),
                                                      v_data.values_dev.data(),
                                                      size * sizeof(Number),
                                                      cudaMemcpyDeviceToHost);
@@ -2630,7 +2630,7 @@ namespace internal
         if (operation == VectorOperation::insert)
           {
             cudaError_t cuda_error_code = cudaMemcpy(data.values_dev.data(),
-                                                     v_data.values.data(),
+                                                     v_data.values_dev.data(),
                                                      size * sizeof(Number),
                                                      cudaMemcpyHostToDevice);
             AssertCuda(cuda_error_code);

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.