From 59f97604ce6012b4bca1613684f8ac8dcf23f849 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Fri, 30 Dec 2022 18:00:15 +0100 Subject: [PATCH] Move CUDA tests --- .../{cuda => base}/array_view_access_data.cc | 16 +- .../array_view_access_data.debug.output | 0 .../cuda_point.cc => base/kokkos_point.cc} | 137 ++++---- .../kokkos_point.output} | 0 tests/base/kokkos_tensor_01.cc | 91 +++++ .../kokkos_tensor_01.output} | 0 tests/base/kokkos_tensor_02.cc | 220 ++++++++++++ .../kokkos_tensor_02.output} | 0 tests/cuda/array_view_wrong_memory.cc | 70 ---- .../cuda/array_view_wrong_memory.debug.output | 29 -- tests/cuda/cuda_tensor_01.cc | 117 ------- tests/cuda/cuda_tensor_02.cc | 329 ------------------ .../affine_constraints_set_zero.cc | 44 +-- ...ffine_constraints_set_zero.mpirun=2.output | 0 tests/{cuda => lac}/vector_memory_01.cc | 0 tests/{cuda => lac}/vector_memory_01.output | 0 tests/{cuda => lac}/vector_memory_02.cc | 0 .../vector_memory_02.debug.output | 0 .../vector_reinit_04.cc} | 1 - .../vector_reinit_04.output} | 0 20 files changed, 405 insertions(+), 649 deletions(-) rename tests/{cuda => base}/array_view_access_data.cc (67%) rename tests/{cuda => base}/array_view_access_data.debug.output (100%) rename tests/{cuda/cuda_point.cc => base/kokkos_point.cc} (53%) rename tests/{cuda/cuda_point.output => base/kokkos_point.output} (100%) create mode 100644 tests/base/kokkos_tensor_01.cc rename tests/{cuda/cuda_tensor_01.output => base/kokkos_tensor_01.output} (100%) create mode 100644 tests/base/kokkos_tensor_02.cc rename tests/{cuda/cuda_tensor_02.output => base/kokkos_tensor_02.output} (100%) delete mode 100644 tests/cuda/array_view_wrong_memory.cc delete mode 100644 tests/cuda/array_view_wrong_memory.debug.output delete mode 100644 tests/cuda/cuda_tensor_01.cc delete mode 100644 tests/cuda/cuda_tensor_02.cc rename tests/{cuda => lac}/affine_constraints_set_zero.cc (74%) rename tests/{cuda => lac}/affine_constraints_set_zero.mpirun=2.output (100%) rename tests/{cuda => lac}/vector_memory_01.cc (100%) rename tests/{cuda => lac}/vector_memory_01.output (100%) rename tests/{cuda => lac}/vector_memory_02.cc (100%) rename tests/{cuda => lac}/vector_memory_02.debug.output (100%) rename tests/{cuda/vector_reinit_01.cc => lac/vector_reinit_04.cc} (99%) rename tests/{cuda/vector_reinit_01.output => lac/vector_reinit_04.output} (100%) diff --git a/tests/cuda/array_view_access_data.cc b/tests/base/array_view_access_data.cc similarity index 67% rename from tests/cuda/array_view_access_data.cc rename to tests/base/array_view_access_data.cc index d8f708db83..ea48eba175 100644 --- a/tests/cuda/array_view_access_data.cc +++ b/tests/base/array_view_access_data.cc @@ -14,8 +14,8 @@ // --------------------------------------------------------------------- -// check that we detect that accessing CUDA memory in an ArrayView object -// is not allowed. +// check that we detect that accessing memory in MemorySpace::Default using an +// ArrayView object is not allowed. #include @@ -28,16 +28,14 @@ main(int argc, char **argv) initlog(); - init_cuda(); - - std::unique_ptr dummy_cuda( - Utilities::CUDA::allocate_device_data(2), - Utilities::CUDA::delete_device_data); + Kokkos::ScopeGuard guard; + Kokkos::View dummy( + "dummy", 2); try { - ArrayView view(dummy_cuda.get(), 2); - const auto dummy = view[0]; + ArrayView view(dummy.data(), 2); + const auto dummy = view[0]; } catch (const ExceptionBase &exc) { diff --git a/tests/cuda/array_view_access_data.debug.output b/tests/base/array_view_access_data.debug.output similarity index 100% rename from tests/cuda/array_view_access_data.debug.output rename to tests/base/array_view_access_data.debug.output diff --git a/tests/cuda/cuda_point.cc b/tests/base/kokkos_point.cc similarity index 53% rename from tests/cuda/cuda_point.cc rename to tests/base/kokkos_point.cc index 871cf72318..093069fc00 100644 --- a/tests/cuda/cuda_point.cc +++ b/tests/base/kokkos_point.cc @@ -19,84 +19,74 @@ #include "../tests.h" -template -__global__ void -miscellaneous_kernel(Number check[16]) -{ - Point p_1; - check[0] = p_1.norm_square(); - Point p_2(Tensor<1, dim, Number>{}); - check[1] = p_2.norm_square(); - if (dim == 1) - { - Point p(1.); - check[2] = p.norm_square(); - } - if (dim == 2) - { - Point p(.6, .8); - check[2] = p.norm_square(); - } - if (dim == 3) - { - Point p(.48, .64, .6); - check[2] = p.norm_square(); - } - - auto p_3 = Point::unit_vector(0); - check[3] = p_3.norm_square(); - - auto entry_1 = p_1(0); - check[4] = entry_1; - p_1(0) = Number{1.}; - check[5] = p_1.norm_square(); - auto p_4 = p_1 + Tensor<1, dim, Number>{}; - check[6] = p_4.norm_square(); - auto p_5 = p_1 - Tensor<1, dim, Number>{}; - check[7] = p_5.norm_square(); - auto t_1 = p_1 - p_2; - check[8] = t_1.norm_square(); - auto p_6 = -p_3; - check[9] = p_6.norm_square(); - auto p_7 = p_4 / 2.; - check[10] = p_7.norm_square(); - auto p_8 = p_7 * 5.; - check[11] = p_8.norm_square(); - - auto s_1 = p_1 * t_1; - check[12] = s_1; - auto s_2 = p_2.square(); - check[13] = s_2; - auto s_3 = p_3.distance(p_5); - check[14] = s_3; - auto s_4 = p_4.distance_square(p_1); - check[15] = s_4; -} - template void test_gpu() { - Number * check; const unsigned int n_tests = 16; - auto cuda_error = cudaMalloc(&check, n_tests * sizeof(Number)); - AssertCuda(cuda_error); + Kokkos::View check("check", + n_tests); // Miscellaneous - miscellaneous_kernel<<<1, 1>>>(check); - // Check that the kernel was launched correctly - AssertCuda(cudaPeekAtLastError()); - // Check that there was no problem during the execution of the kernel - AssertCuda(cudaDeviceSynchronize()); - - std::vector check_host(n_tests); - - cuda_error = cudaMemcpy(check_host.data(), - check, - n_tests * sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); + using ExecutionSpace = MemorySpace::Default::kokkos_space::execution_space; + ExecutionSpace exec; + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, 1), KOKKOS_LAMBDA(int) { + Point p_1; + check[0] = p_1.norm_square(); + Point p_2(Tensor<1, dim, Number>{}); + check[1] = p_2.norm_square(); + if (dim == 1) + { + Point p(1.); + check[2] = p.norm_square(); + } + if (dim == 2) + { + Point p(.6, .8); + check[2] = p.norm_square(); + } + if (dim == 3) + { + Point p(.48, .64, .6); + check[2] = p.norm_square(); + } + + auto p_3 = Point::unit_vector(0); + check[3] = p_3.norm_square(); + + auto entry_1 = p_1(0); + check[4] = entry_1; + p_1(0) = Number{1.}; + check[5] = p_1.norm_square(); + auto p_4 = p_1 + Tensor<1, dim, Number>{}; + check[6] = p_4.norm_square(); + auto p_5 = p_1 - Tensor<1, dim, Number>{}; + check[7] = p_5.norm_square(); + auto t_1 = p_1 - p_2; + check[8] = t_1.norm_square(); + auto p_6 = -p_3; + check[9] = p_6.norm_square(); + auto p_7 = p_4 / 2.; + check[10] = p_7.norm_square(); + auto p_8 = p_7 * 5.; + check[11] = p_8.norm_square(); + + auto s_1 = p_1 * t_1; + check[12] = s_1; + auto s_2 = p_2.square(); + check[13] = s_2; + auto s_3 = p_3.distance(p_5); + check[14] = s_3; + auto s_4 = p_4.distance_square(p_1); + check[15] = s_4; + }); + + exec.fence(); + + auto check_host = + Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, check); const double tolerance = 1.e-8; AssertThrow(std::abs(check_host[0] - 0.) < tolerance, ExcInternalError()); @@ -116,9 +106,6 @@ test_gpu() AssertThrow(std::abs(check_host[14] - 0.) < tolerance, ExcInternalError()); AssertThrow(std::abs(check_host[15] - 0.) < tolerance, ExcInternalError()); - cuda_error = cudaFree(check); - AssertCuda(cuda_error); - deallog << "OK" << std::endl; } @@ -127,7 +114,7 @@ main() { initlog(); - init_cuda(); + Kokkos::initialize(); test_gpu<1, double>(); test_gpu<2, double>(); @@ -135,4 +122,6 @@ main() test_gpu<1, float>(); test_gpu<2, float>(); test_gpu<3, float>(); + + Kokkos::finalize(); } diff --git a/tests/cuda/cuda_point.output b/tests/base/kokkos_point.output similarity index 100% rename from tests/cuda/cuda_point.output rename to tests/base/kokkos_point.output diff --git a/tests/base/kokkos_tensor_01.cc b/tests/base/kokkos_tensor_01.cc new file mode 100644 index 0000000000..db135575ff --- /dev/null +++ b/tests/base/kokkos_tensor_01.cc @@ -0,0 +1,91 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// Test operator[], norm and norm_square of cuda_tensor. + +#include + +#include "../tests.h" + +void +test_cpu() +{ + double a[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; + const unsigned int dim = 3; + Tensor<2, dim> t; + for (unsigned int i = 0; i < dim; ++i) + for (unsigned int j = 0; j < dim; ++j) + t[i][j] = a[i][j]; + + deallog.push("values"); + for (unsigned int i = 0; i < dim; ++i) + for (unsigned int j = 0; j < dim; ++j) + deallog << t[i][j] << std::endl; + deallog.pop(); + + deallog << "norm: " << t.norm() << std::endl; + deallog << "norm_square: " << t.norm_square() << std::endl; +} + +void +test_gpu() +{ + const unsigned int dim = 3; + Kokkos::View norm_dev("norm_dev"); + double norm_host; + Kokkos::View norm_square_dev( + "norm_square_dev"); + double norm_square_host; + Kokkos::View, MemorySpace::Default::kokkos_space> t_dev( + "t_dev"); + + using ExecutionSpace = MemorySpace::Default::kokkos_space::execution_space; + ExecutionSpace exec; + + // Launch the kernels. + Kokkos::parallel_for( + Kokkos::MDRangePolicy>(exec, + {{0, 0}}, + {{dim, dim}}), + KOKKOS_LAMBDA(int i, int j) { t_dev()[i][j] = j + i * dim + 1.; }); + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, 1), KOKKOS_LAMBDA(int) { + norm_dev() = t_dev().norm(); + norm_square_dev() = t_dev().norm_square(); + }); + exec.fence(); + + // Copy the result to the host + Kokkos::deep_copy(norm_host, norm_dev); + Kokkos::deep_copy(norm_square_host, norm_square_dev); + + // Output result + deallog << "norm GPU: " << norm_host << std::endl; + deallog << "norm_square GPU: " << norm_square_host << std::endl; +} + +int +main() +{ + initlog(); + + Kokkos::initialize(); + + test_cpu(); + + test_gpu(); + + Kokkos::finalize(); +} diff --git a/tests/cuda/cuda_tensor_01.output b/tests/base/kokkos_tensor_01.output similarity index 100% rename from tests/cuda/cuda_tensor_01.output rename to tests/base/kokkos_tensor_01.output diff --git a/tests/base/kokkos_tensor_02.cc b/tests/base/kokkos_tensor_02.cc new file mode 100644 index 0000000000..7581ecb970 --- /dev/null +++ b/tests/base/kokkos_tensor_02.cc @@ -0,0 +1,220 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2016 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// Test operator[] and norm_square of cuda_tensor. + +#include + +#include "../tests.h" + +template +struct InitFunctor; + +template +struct InitFunctor<0, dim, Number> +{ + Kokkos::View, MemorySpace::Default::kokkos_space> t; + + KOKKOS_FUNCTION void + operator()(int k) const + { + t() = 1.; + } +}; + +template +struct InitFunctor<1, dim, Number> +{ + Kokkos::View, MemorySpace::Default::kokkos_space> t; + + KOKKOS_FUNCTION void + operator()(int k) const + { + t()[k] = k + 1.; + } +}; + +template +struct InitFunctor<2, dim, Number> +{ + Kokkos::View, MemorySpace::Default::kokkos_space> t; + + KOKKOS_FUNCTION void + operator()(int k) const + { + int i = k / dim; + int j = k % dim; + t()[i][j] = k + 1.; + } +}; + + +template +void +test_gpu() +{ + const double tolerance = 1.e-8; + + Kokkos::View, MemorySpace::Default::kokkos_space> + t_dev("t_dev"); + Kokkos::View, MemorySpace::Default::kokkos_space> + t1_dev("t1_dev"); + Kokkos::View, MemorySpace::Default::kokkos_space> + t2_dev("t2_dev"); + + Tensor t_host; + Tensor t1_host; + Tensor t2_host; + + Tensor reference_host; + + using ExecutionSpace = MemorySpace::Default::kokkos_space::execution_space; + ExecutionSpace exec; + + // Initialize + Kokkos::parallel_for(Kokkos::RangePolicy( + exec, 0, Utilities::fixed_power(rank)), + InitFunctor{t_dev}); + Kokkos::deep_copy(reference_host, t_dev); + + // Test multiplication. + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, 1), KOKKOS_LAMBDA(int) { + t1_dev() = t_dev() * Number(2.); + t2_dev() = Number(2.) * t_dev(); + t_dev() *= 2.; + }); + Kokkos::deep_copy(t_host, t_dev); + Kokkos::deep_copy(t1_host, t1_dev); + Kokkos::deep_copy(t2_host, t2_dev); + + reference_host *= 2; + AssertThrow((t_host - reference_host).norm() < tolerance, ExcInternalError()); + AssertThrow((t1_host - reference_host).norm() < tolerance, + ExcInternalError()); + AssertThrow((t2_host - reference_host).norm() < tolerance, + ExcInternalError()); + + deallog << "multiplication OK" << std::endl; + + // Test division. + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, 1), KOKKOS_LAMBDA(int) { + t1_dev() = t_dev() / Number(2.); + t_dev() /= 2.; + t2_dev() = t1_dev(); + }); + Kokkos::deep_copy(t_host, t_dev); + Kokkos::deep_copy(t1_host, t1_dev); + + reference_host /= 2.; + AssertThrow((t_host - reference_host).norm() < tolerance, ExcInternalError()); + AssertThrow((t1_host - reference_host).norm() < tolerance, + ExcInternalError()); + + deallog << "division OK" << std::endl; + + // Test summation + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, 1), KOKKOS_LAMBDA(int) { + t2_dev() += t_dev(); + t1_dev() = t1_dev() + t_dev(); + }); + Kokkos::deep_copy(t1_host, t1_dev); + Kokkos::deep_copy(t2_host, t2_dev); + + reference_host *= 2.; + AssertThrow((t1_host - reference_host).norm() < tolerance, + ExcInternalError()); + AssertThrow((t2_host - reference_host).norm() < tolerance, + ExcInternalError()); + + + // Test subtraction + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, 1), KOKKOS_LAMBDA(int) { + t2_dev() -= t_dev(); + t1_dev() = t1_dev() - t_dev(); + }); + Kokkos::deep_copy(t1_host, t1_dev); + Kokkos::deep_copy(t2_host, t2_dev); + + reference_host /= 2.; + AssertThrow((t1_host - reference_host).norm() < tolerance, + ExcInternalError()); + AssertThrow((t2_host - reference_host).norm() < tolerance, + ExcInternalError()); + + // Miscellaneous + { + Kokkos::View check_1("check_1"); + Kokkos::View check_2("check_2"); + Kokkos::View check_3("check_3"); + Kokkos::View check_4("check_4"); + Kokkos::View check_5("check_5"); + + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, 1), KOKKOS_LAMBDA(int) { + // constructors + typename Tensor::array_type array{}; + Tensor dummy_1(array); + check_1() = dummy_1.norm_square(); + Tensor dummy_2; + check_2() = dummy_2.norm_square(); + Tensor dummy_3 = dummy_2; + check_3() = dummy_3.norm_square(); + + // access + Tensor initializer_1; + const Tensor dummy_5 = initializer_1[0]; + check_4() = dummy_5.norm_square(); + + // assignment + dummy_2 = dummy_3; + check_5() = dummy_2.norm_square(); + }); + + Number check_1_host, check_2_host, check_3_host, check_4_host, check_5_host; + + Kokkos::deep_copy(check_1_host, check_1); + Kokkos::deep_copy(check_2_host, check_2); + Kokkos::deep_copy(check_3_host, check_3); + Kokkos::deep_copy(check_4_host, check_4); + Kokkos::deep_copy(check_5_host, check_5); + + AssertThrow(std::abs(check_1_host) < tolerance, ExcInternalError()); + AssertThrow(std::abs(check_2_host) < tolerance, ExcInternalError()); + AssertThrow(std::abs(check_3_host) < tolerance, ExcInternalError()); + AssertThrow(std::abs(check_4_host) < tolerance, ExcInternalError()); + AssertThrow(std::abs(check_5_host) < tolerance, ExcInternalError()); + } +} + +int +main() +{ + initlog(); + + Kokkos::initialize(); + + test_gpu<0, 3, double>(); + test_gpu<1, 3, double>(); + test_gpu<2, 3, double>(); + test_gpu<0, 3, float>(); + test_gpu<1, 3, float>(); + test_gpu<2, 3, float>(); + + Kokkos::finalize(); +} diff --git a/tests/cuda/cuda_tensor_02.output b/tests/base/kokkos_tensor_02.output similarity index 100% rename from tests/cuda/cuda_tensor_02.output rename to tests/base/kokkos_tensor_02.output diff --git a/tests/cuda/array_view_wrong_memory.cc b/tests/cuda/array_view_wrong_memory.cc deleted file mode 100644 index a8a58e858c..0000000000 --- a/tests/cuda/array_view_wrong_memory.cc +++ /dev/null @@ -1,70 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2018 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - - -// check that we detect creating ArrayView objects with the wrong memory space. - -#include - -#include "../tests.h" - -int -main(int argc, char **argv) -{ - deal_II_exceptions::disable_abort_on_exception(); - - initlog(); - - init_cuda(); - - std::vector dummy_host(2); - std::unique_ptr dummy_cuda( - Utilities::CUDA::allocate_device_data(2), - Utilities::CUDA::delete_device_data); - - deallog << "Testing host ArrayView with host memory" << std::endl; - ArrayView view_1(dummy_host); - - deallog << "Testing device ArrayView with host memory" << std::endl; - try - { - ArrayView view_2(dummy_host); - } - catch (const ExceptionBase &exc) - { - deallog << exc.what() << std::endl; - } - - deallog << "Testing host ArrayView with device memory" << std::endl; - try - { - ArrayView view_3(dummy_cuda.get(), 2); - } - catch (const ExceptionBase &exc) - { - deallog << exc.what() << std::endl; - } - - deallog << "Testing device ArrayView with device memory" << std::endl; - ArrayView view_4(dummy_cuda.get(), 2); - - deallog << "Testing host ArrayView to a nullptr with length 0" << std::endl; - ArrayView view_5(nullptr, 0); - - deallog << "Testing device ArrayView to a nullptr with length 0" << std::endl; - ArrayView view_6(nullptr, 0); - - return 0; -} diff --git a/tests/cuda/array_view_wrong_memory.debug.output b/tests/cuda/array_view_wrong_memory.debug.output deleted file mode 100644 index 4659b1156a..0000000000 --- a/tests/cuda/array_view_wrong_memory.debug.output +++ /dev/null @@ -1,29 +0,0 @@ - -DEAL::Testing host ArrayView with host memory -DEAL::Testing device ArrayView with host memory -DEAL:: --------------------------------------------------------- -An error occurred in file in function - dealii::ArrayView::ArrayView(dealii::ArrayView::value_type*, std::size_t) [with ElementType = unsigned int; MemorySpaceType = dealii::MemorySpace::Default; dealii::ArrayView::value_type = unsigned int; std::size_t = long unsigned int] -The violated condition was: - n_elements == 0 || internal::ArrayViewHelper::is_in_correct_memory_space( starting_element) -Additional information: - The memory space indicated by the template parameter and the one - derived from the pointer value do not match! --------------------------------------------------------- - -DEAL::Testing host ArrayView with device memory -DEAL:: --------------------------------------------------------- -An error occurred in file in function - dealii::ArrayView::ArrayView(dealii::ArrayView::value_type*, std::size_t) [with ElementType = unsigned int; MemorySpaceType = dealii::MemorySpace::Host; dealii::ArrayView::value_type = unsigned int; std::size_t = long unsigned int] -The violated condition was: - n_elements == 0 || internal::ArrayViewHelper::is_in_correct_memory_space( starting_element) -Additional information: - The memory space indicated by the template parameter and the one - derived from the pointer value do not match! --------------------------------------------------------- - -DEAL::Testing device ArrayView with device memory -DEAL::Testing host ArrayView to a nullptr with length 0 -DEAL::Testing device ArrayView to a nullptr with length 0 diff --git a/tests/cuda/cuda_tensor_01.cc b/tests/cuda/cuda_tensor_01.cc deleted file mode 100644 index 68e1f279f5..0000000000 --- a/tests/cuda/cuda_tensor_01.cc +++ /dev/null @@ -1,117 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2016 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - -// Test operator[], norm and norm_square of cuda_tensor. - -#include - -#include "../tests.h" - -void -test_cpu() -{ - double a[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}; - const unsigned int dim = 3; - Tensor<2, dim> t; - for (unsigned int i = 0; i < dim; ++i) - for (unsigned int j = 0; j < dim; ++j) - t[i][j] = a[i][j]; - - deallog.push("values"); - for (unsigned int i = 0; i < dim; ++i) - for (unsigned int j = 0; j < dim; ++j) - deallog << t[i][j] << std::endl; - deallog.pop(); - - deallog << "norm: " << t.norm() << std::endl; - deallog << "norm_square: " << t.norm_square() << std::endl; -} - -__global__ void -init_kernel(Tensor<2, 3> *t, const unsigned int N) -{ - const unsigned int i = threadIdx.y; - const unsigned int j = threadIdx.x; - if ((i < N) && (j < N)) - (*t)[i][j] = j + i * N + 1.; -} - -__global__ void -norm_kernel(Tensor<2, 3> *t, double *norm, double *norm_square) -{ - if (threadIdx.x == 0) - { - *norm = t->norm(); - *norm_square = t->norm_square(); - } -} - -void -test_gpu() -{ - const unsigned int dim = 3; - double * norm_dev; - double norm_host; - double * norm_square_dev; - double norm_square_host; - Tensor<2, dim> * t_dev; - - // Allocate objects on the device - cudaError_t cuda_error = cudaMalloc(&t_dev, sizeof(Tensor<2, dim>)); - AssertCuda(cuda_error); - cuda_error = cudaMalloc(&norm_dev, sizeof(double)); - AssertCuda(cuda_error); - cuda_error = cudaMalloc(&norm_square_dev, sizeof(double)); - AssertCuda(cuda_error); - - // Launch the kernels. - dim3 block_dim(dim, dim); - init_kernel<<<1, block_dim>>>(t_dev, dim); - norm_kernel<<<1, 1>>>(t_dev, norm_dev, norm_square_dev); - - // Copy the result to the device - cuda_error = - cudaMemcpy(&norm_host, norm_dev, sizeof(double), cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&norm_square_host, - norm_square_dev, - sizeof(double), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - - // Free memory - cuda_error = cudaFree(t_dev); - AssertCuda(cuda_error); - cuda_error = cudaFree(norm_dev); - AssertCuda(cuda_error); - cuda_error = cudaFree(norm_square_dev); - AssertCuda(cuda_error); - - // Output result - deallog << "norm GPU: " << norm_host << std::endl; - deallog << "norm_square GPU: " << norm_square_host << std::endl; -} - -int -main() -{ - initlog(); - - init_cuda(); - - test_cpu(); - - test_gpu(); -} diff --git a/tests/cuda/cuda_tensor_02.cc b/tests/cuda/cuda_tensor_02.cc deleted file mode 100644 index e3a5226bb7..0000000000 --- a/tests/cuda/cuda_tensor_02.cc +++ /dev/null @@ -1,329 +0,0 @@ -// --------------------------------------------------------------------- -// -// Copyright (C) 2016 by the deal.II authors -// -// This file is part of the deal.II library. -// -// The deal.II library is free software; you can use it, redistribute -// it, and/or modify it under the terms of the GNU Lesser General -// Public License as published by the Free Software Foundation; either -// version 2.1 of the License, or (at your option) any later version. -// The full text of the license can be found in the file LICENSE.md at -// the top level directory of deal.II. -// -// --------------------------------------------------------------------- - -// Test operator[] and norm_square of cuda_tensor. - -#include - -#include "../tests.h" - -template -__global__ void -miscellaneous_kernel(Number *check_1, - Number *check_2, - Number *check_3, - Number *check_4, - Number *check_5) -{ - // constructors - typename Tensor::array_type array{}; - Tensor dummy_1(array); - *check_1 = dummy_1.norm_square(); - Tensor dummy_2; - *check_2 = dummy_2.norm_square(); - Tensor dummy_3 = dummy_2; - *check_3 = dummy_3.norm_square(); - - // access - Tensor initializer_1; - const Tensor dummy_5 = initializer_1[0]; - *check_4 = dummy_5.norm_square(); - - // assignment - dummy_2 = dummy_3; - *check_5 = dummy_2.norm_square(); -} - -template -__global__ void -summation_kernel(Tensor *t, - Tensor *t1, - Tensor *t2) -{ - *t2 += *t; - *t1 = *t1 + *t; -} - -template -__global__ void -subtraction_kernel(Tensor *t, - Tensor *t1, - Tensor *t2) -{ - *t2 -= *t; - *t1 = *t1 - *t; -} - -template -__global__ void -multiplication_kernel(Tensor *t, - Tensor *t1, - Tensor *t2) -{ - *t1 = *t * Number(2.); - *t2 = Number(2.) * *t; - *t *= 2.; -} - -template -__global__ void -division_kernel(Tensor *t, - Tensor *t1, - Tensor *t2) -{ - *t1 = *t / Number(2.); - *t /= 2.; - *t2 = *t1; -} - -template -__global__ void -init_kernel(Tensor<0, dim, Number> *t) -{ - if (threadIdx.x == 0) - *t = 1.; -} - -template -__global__ void -init_kernel(Tensor<1, dim, Number> *t) -{ - const unsigned int i = threadIdx.x; - if (i < dim) - (*t)[i] = i + 1.; -} - -template -__global__ void -init_kernel(Tensor<2, dim, Number> *t) -{ - const unsigned int i = threadIdx.y; - const unsigned int j = threadIdx.x; - if ((i < dim) && (j < dim)) - (*t)[i][j] = j + i * dim + 1.; -} - - -template -void -test_gpu() -{ - const double tolerance = 1.e-8; - - Tensor *t_dev; - Tensor *t1_dev; - Tensor *t2_dev; - - Tensor t_host; - Tensor t1_host; - Tensor t2_host; - - Tensor reference_host; - - // Allocate objects on the device - cudaError_t cuda_error = - cudaMalloc(&t_dev, sizeof(Tensor)); - AssertCuda(cuda_error); - cuda_error = cudaMalloc(&t1_dev, sizeof(Tensor)); - AssertCuda(cuda_error); - cuda_error = cudaMalloc(&t2_dev, sizeof(Tensor)); - AssertCuda(cuda_error); - - // Initialize - dim3 block_dim(dim, dim); - init_kernel<<<1, block_dim>>>(t_dev); - cuda_error = cudaMemcpy(&reference_host, - t_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - - // Test multiplication. - multiplication_kernel<<<1, 1>>>(t_dev, t1_dev, t2_dev); - - cuda_error = cudaMemcpy(&t_host, - t_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&t1_host, - t1_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&t2_host, - t2_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - - reference_host *= 2; - AssertThrow((t_host - reference_host).norm() < tolerance, ExcInternalError()); - AssertThrow((t1_host - reference_host).norm() < tolerance, - ExcInternalError()); - AssertThrow((t2_host - reference_host).norm() < tolerance, - ExcInternalError()); - - deallog << "multiplication OK" << std::endl; - - // Test division. - division_kernel<<<1, 1>>>(t_dev, t1_dev, t2_dev); - cuda_error = cudaMemcpy(&t_host, - t_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&t1_host, - t1_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - - reference_host /= 2.; - AssertThrow((t_host - reference_host).norm() < tolerance, ExcInternalError()); - AssertThrow((t1_host - reference_host).norm() < tolerance, - ExcInternalError()); - - deallog << "division OK" << std::endl; - - // Test summation - summation_kernel<<<1, 1>>>(t_dev, t1_dev, t2_dev); - cuda_error = cudaMemcpy(&t1_host, - t1_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&t2_host, - t2_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - - reference_host *= 2.; - AssertThrow((t1_host - reference_host).norm() < tolerance, - ExcInternalError()); - AssertThrow((t2_host - reference_host).norm() < tolerance, - ExcInternalError()); - - - // Test subtraction - subtraction_kernel<<<1, 1>>>(t_dev, t1_dev, t2_dev); - cuda_error = cudaMemcpy(&t1_host, - t1_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&t2_host, - t2_dev, - sizeof(Tensor), - cudaMemcpyDeviceToHost); - - reference_host /= 2.; - AssertThrow((t1_host - reference_host).norm() < tolerance, - ExcInternalError()); - AssertThrow((t2_host - reference_host).norm() < tolerance, - ExcInternalError()); - - // Free memory - cuda_error = cudaFree(t_dev); - AssertCuda(cuda_error); - cuda_error = cudaFree(t1_dev); - AssertCuda(cuda_error); - cuda_error = cudaFree(t2_dev); - AssertCuda(cuda_error); - - // Miscellaneous - { - Number *check_1; - Number *check_2; - Number *check_3; - Number *check_4; - Number *check_5; - - cuda_error = cudaMalloc(&check_1, sizeof(Number)); - AssertCuda(cuda_error); - cuda_error = cudaMalloc(&check_2, sizeof(Number)); - AssertCuda(cuda_error); - cuda_error = cudaMalloc(&check_3, sizeof(Number)); - AssertCuda(cuda_error); - cuda_error = cudaMalloc(&check_4, sizeof(Number)); - AssertCuda(cuda_error); - cuda_error = cudaMalloc(&check_5, sizeof(Number)); - AssertCuda(cuda_error); - - miscellaneous_kernel - <<<1, 1>>>(check_1, check_2, check_3, check_4, check_5); - - Number check_1_host, check_2_host, check_3_host, check_4_host, check_5_host; - - cuda_error = cudaMemcpy(&check_1_host, - check_1, - sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&check_2_host, - check_2, - sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&check_3_host, - check_3, - sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&check_4_host, - check_4, - sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - cuda_error = cudaMemcpy(&check_5_host, - check_5, - sizeof(Number), - cudaMemcpyDeviceToHost); - AssertCuda(cuda_error); - - AssertThrow(std::abs(check_1_host) < tolerance, ExcInternalError()); - AssertThrow(std::abs(check_2_host) < tolerance, ExcInternalError()); - AssertThrow(std::abs(check_3_host) < tolerance, ExcInternalError()); - AssertThrow(std::abs(check_4_host) < tolerance, ExcInternalError()); - AssertThrow(std::abs(check_5_host) < tolerance, ExcInternalError()); - - cuda_error = cudaFree(check_1); - AssertCuda(cuda_error); - cuda_error = cudaFree(check_2); - AssertCuda(cuda_error); - cuda_error = cudaFree(check_3); - AssertCuda(cuda_error); - cuda_error = cudaFree(check_4); - AssertCuda(cuda_error); - cuda_error = cudaFree(check_5); - AssertCuda(cuda_error); - } -} - -int -main() -{ - initlog(); - - init_cuda(); - - test_gpu<0, 3, double>(); - test_gpu<1, 3, double>(); - test_gpu<2, 3, double>(); - test_gpu<0, 3, float>(); - test_gpu<1, 3, float>(); - test_gpu<2, 3, float>(); -} diff --git a/tests/cuda/affine_constraints_set_zero.cc b/tests/lac/affine_constraints_set_zero.cc similarity index 74% rename from tests/cuda/affine_constraints_set_zero.cc rename to tests/lac/affine_constraints_set_zero.cc index 9b497f2fb8..e64475d284 100644 --- a/tests/cuda/affine_constraints_set_zero.cc +++ b/tests/lac/affine_constraints_set_zero.cc @@ -29,15 +29,6 @@ #include "../tests.h" -__global__ void -initialize_vector(double *vector, int local_size, int offset) -{ - const int index = threadIdx.x + blockIdx.x * blockDim.x; - if (index < local_size) - vector[index] = 1.0 + index + offset; -} - - void test() { @@ -56,15 +47,23 @@ test() deallog << "CM:" << std::endl; cm.print(deallog.get_file_stream()); - LinearAlgebra::distributed::Vector ghosted; + using ExecutionSpace = MemorySpace::Default::kokkos_space::execution_space; + ExecutionSpace exec; + + LinearAlgebra::distributed::Vector ghosted; { ghosted.reinit(local_active, complete_index_set(2 * numproc), MPI_COMM_WORLD); - - const int n_blocks = 1 + ghosted.size() / CUDAWrappers::block_size; - initialize_vector<<>>( - ghosted.get_values(), numproc, myid * numproc); + auto ghosted_values = ghosted.get_values(); + + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, numproc), + KOKKOS_LAMBDA(int i) { + int offset = myid * numproc; + ghosted_values[i] = 1.0 + i + offset; + }); + exec.fence(); ghosted.compress(VectorOperation::insert); deallog << "ghosted vector before:" << std::endl; @@ -76,15 +75,22 @@ test() ghosted.print(deallog.get_file_stream()); } - LinearAlgebra::distributed::Vector distributed; + LinearAlgebra::distributed::Vector distributed; { distributed.reinit(local_active, complete_index_set(2 * numproc), MPI_COMM_WORLD); - const int n_blocks = 1 + distributed.size() / CUDAWrappers::block_size; - initialize_vector<<>>( - distributed.get_values(), numproc, myid * numproc); + auto distributed_values = distributed.get_values(); + + Kokkos::parallel_for( + Kokkos::RangePolicy(exec, 0, numproc), + KOKKOS_LAMBDA(int i) { + int offset = myid * numproc; + distributed_values[i] = 1.0 + i + offset; + }); + exec.fence(); + distributed.compress(VectorOperation::insert); deallog << "distributed vector before:" << std::endl; @@ -106,8 +112,6 @@ main(int argc, char *argv[]) Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); MPILogInitAll log; - init_cuda(); - test(); return 0; } diff --git a/tests/cuda/affine_constraints_set_zero.mpirun=2.output b/tests/lac/affine_constraints_set_zero.mpirun=2.output similarity index 100% rename from tests/cuda/affine_constraints_set_zero.mpirun=2.output rename to tests/lac/affine_constraints_set_zero.mpirun=2.output diff --git a/tests/cuda/vector_memory_01.cc b/tests/lac/vector_memory_01.cc similarity index 100% rename from tests/cuda/vector_memory_01.cc rename to tests/lac/vector_memory_01.cc diff --git a/tests/cuda/vector_memory_01.output b/tests/lac/vector_memory_01.output similarity index 100% rename from tests/cuda/vector_memory_01.output rename to tests/lac/vector_memory_01.output diff --git a/tests/cuda/vector_memory_02.cc b/tests/lac/vector_memory_02.cc similarity index 100% rename from tests/cuda/vector_memory_02.cc rename to tests/lac/vector_memory_02.cc diff --git a/tests/cuda/vector_memory_02.debug.output b/tests/lac/vector_memory_02.debug.output similarity index 100% rename from tests/cuda/vector_memory_02.debug.output rename to tests/lac/vector_memory_02.debug.output diff --git a/tests/cuda/vector_reinit_01.cc b/tests/lac/vector_reinit_04.cc similarity index 99% rename from tests/cuda/vector_reinit_01.cc rename to tests/lac/vector_reinit_04.cc index 1bdfed9dc1..88b3ccdf1d 100644 --- a/tests/cuda/vector_reinit_01.cc +++ b/tests/lac/vector_reinit_04.cc @@ -112,7 +112,6 @@ main(int argc, char **argv) argc, argv, testing_max_num_threads()); initlog(); - init_cuda(); do_test< LinearAlgebra::distributed::Vector>(); diff --git a/tests/cuda/vector_reinit_01.output b/tests/lac/vector_reinit_04.output similarity index 100% rename from tests/cuda/vector_reinit_01.output rename to tests/lac/vector_reinit_04.output -- 2.39.5