]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Check result 8338/head
authorDaniel Arndt <arndtd@ornl.gov>
Mon, 8 Jul 2019 19:22:45 +0000 (15:22 -0400)
committerDaniel Arndt <arndtd@ornl.gov>
Tue, 9 Jul 2019 01:29:17 +0000 (21:29 -0400)
include/deal.II/base/tensor.h
tests/cuda/cuda_tensor_02.cu

index 5754cbc7f230afd6d1e03fe63b08a42989c246b4..65088e7a9ca63184eefb7213afa58e650015f633 100644 (file)
@@ -323,11 +323,9 @@ public:
    * Return the Frobenius-norm of a tensor, i.e. the square root of the sum of
    * the absolute squares of all entries. For the present case of rank-1
    * tensors, this equals the usual <tt>l<sub>2</sub></tt> norm of the vector.
-   *
-   * @note This function can also be used in CUDA device code.
    */
-  DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV real_type
-                                          norm() const;
+  DEAL_II_CONSTEXPR real_type
+                    norm() const;
 
   /**
    * Return the square of the Frobenius-norm of a tensor, i.e. the sum of the
@@ -1036,14 +1034,11 @@ Tensor<0, dim, Number>::operator-() const
 
 
 template <int dim, typename Number>
-DEAL_II_CONSTEXPR DEAL_II_CUDA_HOST_DEV inline
-  typename Tensor<0, dim, Number>::real_type
-  Tensor<0, dim, Number>::norm() const
+DEAL_II_CONSTEXPR inline typename Tensor<0, dim, Number>::real_type
+Tensor<0, dim, Number>::norm() const
 {
-#ifndef __CUDA_ARCH__
   Assert(dim != 0,
          ExcMessage("Cannot access an object of type Tensor<0,0,Number>"));
-#endif
   return numbers::NumberTraits<Number>::abs(value);
 }
 
index 83864d20f18c1fe6d32717750bb04e844b8c265c..be392d5583d6576416c48f1a2ad0019870205698 100644 (file)
 
 template <int rank, int dim, typename Number>
 __global__ void
-miscellaneous_kernel()
+miscellaneous_kernel(Number *check_1,
+                     Number *check_2,
+                     Number *check_3,
+                     Number *check_4,
+                     Number *check_5)
 {
   // constructors
   typename Tensor<rank, dim, Number>::array_type array{};
   Tensor<rank, dim, Number>                      dummy_1(array);
-  Tensor<rank, dim, Number>                      dummy_2;
-  Tensor<rank, dim, Number>                      dummy_3 = dummy_2;
+  *check_1 = dummy_1.norm_square();
+  Tensor<rank, dim, Number> dummy_2;
+  *check_2                          = dummy_2.norm_square();
+  Tensor<rank, dim, Number> dummy_3 = dummy_2;
+  *check_3                          = dummy_3.norm_square();
 
   // access
-  Tensor<rank + 1, dim, Number> initializer_1;
-  const auto                    dummy_5 = initializer_1[0];
+  Tensor<rank + 1, dim, Number>   initializer_1;
+  const Tensor<rank, dim, Number> dummy_5 = initializer_1[0];
+  *check_4                                = dummy_5.norm_square();
 
   // assignment
-  dummy_2 = dummy_3;
+  dummy_2  = dummy_3;
+  *check_5 = dummy_2.norm_square();
 }
 
 template <int rank, int dim, typename Number>
@@ -224,9 +233,6 @@ test_gpu()
   AssertThrow((t2_host - reference_host).norm() < tolerance,
               ExcInternalError());
 
-  // Miscellaneous
-  miscellaneous_kernel<rank, dim, Number><<<1, 1>>>();
-
   // Free memory
   cuda_error = cudaFree(t_dev);
   AssertCuda(cuda_error);
@@ -234,6 +240,74 @@ test_gpu()
   AssertCuda(cuda_error);
   cuda_error = cudaFree(t2_dev);
   AssertCuda(cuda_error);
+
+  // Miscellaneous
+  {
+    Number *check_1;
+    Number *check_2;
+    Number *check_3;
+    Number *check_4;
+    Number *check_5;
+
+    cuda_error = cudaMalloc(&check_1, sizeof(Number));
+    AssertCuda(cuda_error);
+    cuda_error = cudaMalloc(&check_2, sizeof(Number));
+    AssertCuda(cuda_error);
+    cuda_error = cudaMalloc(&check_3, sizeof(Number));
+    AssertCuda(cuda_error);
+    cuda_error = cudaMalloc(&check_4, sizeof(Number));
+    AssertCuda(cuda_error);
+    cuda_error = cudaMalloc(&check_5, sizeof(Number));
+    AssertCuda(cuda_error);
+
+    miscellaneous_kernel<rank, dim, Number>
+      <<<1, 1>>>(check_1, check_2, check_3, check_4, check_5);
+
+    Number check_1_host, check_2_host, check_3_host, check_4_host, check_5_host;
+
+    cuda_error = cudaMemcpy(&check_1_host,
+                            check_1,
+                            sizeof(Number),
+                            cudaMemcpyDeviceToHost);
+    AssertCuda(cuda_error);
+    cuda_error = cudaMemcpy(&check_2_host,
+                            check_2,
+                            sizeof(Number),
+                            cudaMemcpyDeviceToHost);
+    AssertCuda(cuda_error);
+    cuda_error = cudaMemcpy(&check_3_host,
+                            check_3,
+                            sizeof(Number),
+                            cudaMemcpyDeviceToHost);
+    AssertCuda(cuda_error);
+    cuda_error = cudaMemcpy(&check_4_host,
+                            check_4,
+                            sizeof(Number),
+                            cudaMemcpyDeviceToHost);
+    AssertCuda(cuda_error);
+    cuda_error = cudaMemcpy(&check_5_host,
+                            check_5,
+                            sizeof(Number),
+                            cudaMemcpyDeviceToHost);
+    AssertCuda(cuda_error);
+
+    AssertThrow(std::abs(check_1_host) < tolerance, ExcInternalError());
+    AssertThrow(std::abs(check_2_host) < tolerance, ExcInternalError());
+    AssertThrow(std::abs(check_3_host) < tolerance, ExcInternalError());
+    AssertThrow(std::abs(check_4_host) < tolerance, ExcInternalError());
+    AssertThrow(std::abs(check_5_host) < tolerance, ExcInternalError());
+
+    cuda_error = cudaFree(check_1);
+    AssertCuda(cuda_error);
+    cuda_error = cudaFree(check_2);
+    AssertCuda(cuda_error);
+    cuda_error = cudaFree(check_3);
+    AssertCuda(cuda_error);
+    cuda_error = cudaFree(check_4);
+    AssertCuda(cuda_error);
+    cuda_error = cudaFree(check_5);
+    AssertCuda(cuda_error);
+  }
 }
 
 int

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.