// ---------------------------------------------------------------------
-// check that we detect that accessing CUDA memory in an ArrayView object
-// is not allowed.
+// check that we detect that accessing memory in MemorySpace::Default using an
+// ArrayView object is not allowed.
#include <deal.II/base/array_view.h>
initlog();
- init_cuda();
-
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> dummy_cuda(
- Utilities::CUDA::allocate_device_data<unsigned int>(2),
- Utilities::CUDA::delete_device_data<unsigned int>);
+ Kokkos::ScopeGuard guard;
+ Kokkos::View<unsigned int *, MemorySpace::Default::kokkos_space> dummy(
+ "dummy", 2);
try
{
- ArrayView<unsigned int, MemorySpace::CUDA> view(dummy_cuda.get(), 2);
- const auto dummy = view[0];
+ ArrayView<unsigned int, MemorySpace::Default> view(dummy.data(), 2);
+ const auto dummy = view[0];
}
catch (const ExceptionBase &exc)
{
#include "../tests.h"
-template <int dim, typename Number>
-__global__ void
-miscellaneous_kernel(Number check[16])
-{
- Point<dim, Number> p_1;
- check[0] = p_1.norm_square();
- Point<dim, Number> p_2(Tensor<1, dim, Number>{});
- check[1] = p_2.norm_square();
- if (dim == 1)
- {
- Point<dim, Number> p(1.);
- check[2] = p.norm_square();
- }
- if (dim == 2)
- {
- Point<dim, Number> p(.6, .8);
- check[2] = p.norm_square();
- }
- if (dim == 3)
- {
- Point<dim, Number> p(.48, .64, .6);
- check[2] = p.norm_square();
- }
-
- auto p_3 = Point<dim, Number>::unit_vector(0);
- check[3] = p_3.norm_square();
-
- auto entry_1 = p_1(0);
- check[4] = entry_1;
- p_1(0) = Number{1.};
- check[5] = p_1.norm_square();
- auto p_4 = p_1 + Tensor<1, dim, Number>{};
- check[6] = p_4.norm_square();
- auto p_5 = p_1 - Tensor<1, dim, Number>{};
- check[7] = p_5.norm_square();
- auto t_1 = p_1 - p_2;
- check[8] = t_1.norm_square();
- auto p_6 = -p_3;
- check[9] = p_6.norm_square();
- auto p_7 = p_4 / 2.;
- check[10] = p_7.norm_square();
- auto p_8 = p_7 * 5.;
- check[11] = p_8.norm_square();
-
- auto s_1 = p_1 * t_1;
- check[12] = s_1;
- auto s_2 = p_2.square();
- check[13] = s_2;
- auto s_3 = p_3.distance(p_5);
- check[14] = s_3;
- auto s_4 = p_4.distance_square(p_1);
- check[15] = s_4;
-}
-
template <int dim, typename Number>
void
test_gpu()
{
- Number * check;
const unsigned int n_tests = 16;
- auto cuda_error = cudaMalloc(&check, n_tests * sizeof(Number));
- AssertCuda(cuda_error);
+ Kokkos::View<Number *, MemorySpace::Default::kokkos_space> check("check",
+ n_tests);
// Miscellaneous
- miscellaneous_kernel<dim, Number><<<1, 1>>>(check);
- // Check that the kernel was launched correctly
- AssertCuda(cudaPeekAtLastError());
- // Check that there was no problem during the execution of the kernel
- AssertCuda(cudaDeviceSynchronize());
-
- std::vector<Number> check_host(n_tests);
-
- cuda_error = cudaMemcpy(check_host.data(),
- check,
- n_tests * sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
+ using ExecutionSpace = MemorySpace::Default::kokkos_space::execution_space;
+ ExecutionSpace exec;
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, 1), KOKKOS_LAMBDA(int) {
+ Point<dim, Number> p_1;
+ check[0] = p_1.norm_square();
+ Point<dim, Number> p_2(Tensor<1, dim, Number>{});
+ check[1] = p_2.norm_square();
+ if (dim == 1)
+ {
+ Point<dim, Number> p(1.);
+ check[2] = p.norm_square();
+ }
+ if (dim == 2)
+ {
+ Point<dim, Number> p(.6, .8);
+ check[2] = p.norm_square();
+ }
+ if (dim == 3)
+ {
+ Point<dim, Number> p(.48, .64, .6);
+ check[2] = p.norm_square();
+ }
+
+ auto p_3 = Point<dim, Number>::unit_vector(0);
+ check[3] = p_3.norm_square();
+
+ auto entry_1 = p_1(0);
+ check[4] = entry_1;
+ p_1(0) = Number{1.};
+ check[5] = p_1.norm_square();
+ auto p_4 = p_1 + Tensor<1, dim, Number>{};
+ check[6] = p_4.norm_square();
+ auto p_5 = p_1 - Tensor<1, dim, Number>{};
+ check[7] = p_5.norm_square();
+ auto t_1 = p_1 - p_2;
+ check[8] = t_1.norm_square();
+ auto p_6 = -p_3;
+ check[9] = p_6.norm_square();
+ auto p_7 = p_4 / 2.;
+ check[10] = p_7.norm_square();
+ auto p_8 = p_7 * 5.;
+ check[11] = p_8.norm_square();
+
+ auto s_1 = p_1 * t_1;
+ check[12] = s_1;
+ auto s_2 = p_2.square();
+ check[13] = s_2;
+ auto s_3 = p_3.distance(p_5);
+ check[14] = s_3;
+ auto s_4 = p_4.distance_square(p_1);
+ check[15] = s_4;
+ });
+
+ exec.fence();
+
+ auto check_host =
+ Kokkos::create_mirror_view_and_copy(Kokkos::HostSpace{}, check);
const double tolerance = 1.e-8;
AssertThrow(std::abs(check_host[0] - 0.) < tolerance, ExcInternalError());
AssertThrow(std::abs(check_host[14] - 0.) < tolerance, ExcInternalError());
AssertThrow(std::abs(check_host[15] - 0.) < tolerance, ExcInternalError());
- cuda_error = cudaFree(check);
- AssertCuda(cuda_error);
-
deallog << "OK" << std::endl;
}
{
initlog();
- init_cuda();
+ Kokkos::initialize();
test_gpu<1, double>();
test_gpu<2, double>();
test_gpu<1, float>();
test_gpu<2, float>();
test_gpu<3, float>();
+
+ Kokkos::finalize();
}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test operator[], norm and norm_square of cuda_tensor.
+
+#include <deal.II/base/tensor.h>
+
+#include "../tests.h"
+
+void
+test_cpu()
+{
+ double a[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}};
+ const unsigned int dim = 3;
+ Tensor<2, dim> t;
+ for (unsigned int i = 0; i < dim; ++i)
+ for (unsigned int j = 0; j < dim; ++j)
+ t[i][j] = a[i][j];
+
+ deallog.push("values");
+ for (unsigned int i = 0; i < dim; ++i)
+ for (unsigned int j = 0; j < dim; ++j)
+ deallog << t[i][j] << std::endl;
+ deallog.pop();
+
+ deallog << "norm: " << t.norm() << std::endl;
+ deallog << "norm_square: " << t.norm_square() << std::endl;
+}
+
+void
+test_gpu()
+{
+ const unsigned int dim = 3;
+ Kokkos::View<double, MemorySpace::Default::kokkos_space> norm_dev("norm_dev");
+ double norm_host;
+ Kokkos::View<double, MemorySpace::Default::kokkos_space> norm_square_dev(
+ "norm_square_dev");
+ double norm_square_host;
+ Kokkos::View<Tensor<2, dim>, MemorySpace::Default::kokkos_space> t_dev(
+ "t_dev");
+
+ using ExecutionSpace = MemorySpace::Default::kokkos_space::execution_space;
+ ExecutionSpace exec;
+
+ // Launch the kernels.
+ Kokkos::parallel_for(
+ Kokkos::MDRangePolicy<ExecutionSpace, Kokkos::Rank<2>>(exec,
+ {{0, 0}},
+ {{dim, dim}}),
+ KOKKOS_LAMBDA(int i, int j) { t_dev()[i][j] = j + i * dim + 1.; });
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, 1), KOKKOS_LAMBDA(int) {
+ norm_dev() = t_dev().norm();
+ norm_square_dev() = t_dev().norm_square();
+ });
+ exec.fence();
+
+ // Copy the result to the host
+ Kokkos::deep_copy(norm_host, norm_dev);
+ Kokkos::deep_copy(norm_square_host, norm_square_dev);
+
+ // Output result
+ deallog << "norm GPU: " << norm_host << std::endl;
+ deallog << "norm_square GPU: " << norm_square_host << std::endl;
+}
+
+int
+main()
+{
+ initlog();
+
+ Kokkos::initialize();
+
+ test_cpu();
+
+ test_gpu();
+
+ Kokkos::finalize();
+}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test operator[] and norm_square of cuda_tensor.
+
+#include <deal.II/base/tensor.h>
+
+#include "../tests.h"
+
+template <int rank, int dim, typename Number>
+struct InitFunctor;
+
+template <int dim, typename Number>
+struct InitFunctor<0, dim, Number>
+{
+ Kokkos::View<Tensor<0, dim, Number>, MemorySpace::Default::kokkos_space> t;
+
+ KOKKOS_FUNCTION void
+ operator()(int k) const
+ {
+ t() = 1.;
+ }
+};
+
+template <int dim, typename Number>
+struct InitFunctor<1, dim, Number>
+{
+ Kokkos::View<Tensor<1, dim, Number>, MemorySpace::Default::kokkos_space> t;
+
+ KOKKOS_FUNCTION void
+ operator()(int k) const
+ {
+ t()[k] = k + 1.;
+ }
+};
+
+template <int dim, typename Number>
+struct InitFunctor<2, dim, Number>
+{
+ Kokkos::View<Tensor<2, dim, Number>, MemorySpace::Default::kokkos_space> t;
+
+ KOKKOS_FUNCTION void
+ operator()(int k) const
+ {
+ int i = k / dim;
+ int j = k % dim;
+ t()[i][j] = k + 1.;
+ }
+};
+
+
+template <int rank, int dim, typename Number>
+void
+test_gpu()
+{
+ const double tolerance = 1.e-8;
+
+ Kokkos::View<Tensor<rank, dim, Number>, MemorySpace::Default::kokkos_space>
+ t_dev("t_dev");
+ Kokkos::View<Tensor<rank, dim, Number>, MemorySpace::Default::kokkos_space>
+ t1_dev("t1_dev");
+ Kokkos::View<Tensor<rank, dim, Number>, MemorySpace::Default::kokkos_space>
+ t2_dev("t2_dev");
+
+ Tensor<rank, dim, Number> t_host;
+ Tensor<rank, dim, Number> t1_host;
+ Tensor<rank, dim, Number> t2_host;
+
+ Tensor<rank, dim, Number> reference_host;
+
+ using ExecutionSpace = MemorySpace::Default::kokkos_space::execution_space;
+ ExecutionSpace exec;
+
+ // Initialize
+ Kokkos::parallel_for(Kokkos::RangePolicy<ExecutionSpace>(
+ exec, 0, Utilities::fixed_power<dim>(rank)),
+ InitFunctor<rank, dim, Number>{t_dev});
+ Kokkos::deep_copy(reference_host, t_dev);
+
+ // Test multiplication.
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, 1), KOKKOS_LAMBDA(int) {
+ t1_dev() = t_dev() * Number(2.);
+ t2_dev() = Number(2.) * t_dev();
+ t_dev() *= 2.;
+ });
+ Kokkos::deep_copy(t_host, t_dev);
+ Kokkos::deep_copy(t1_host, t1_dev);
+ Kokkos::deep_copy(t2_host, t2_dev);
+
+ reference_host *= 2;
+ AssertThrow((t_host - reference_host).norm() < tolerance, ExcInternalError());
+ AssertThrow((t1_host - reference_host).norm() < tolerance,
+ ExcInternalError());
+ AssertThrow((t2_host - reference_host).norm() < tolerance,
+ ExcInternalError());
+
+ deallog << "multiplication OK" << std::endl;
+
+ // Test division.
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, 1), KOKKOS_LAMBDA(int) {
+ t1_dev() = t_dev() / Number(2.);
+ t_dev() /= 2.;
+ t2_dev() = t1_dev();
+ });
+ Kokkos::deep_copy(t_host, t_dev);
+ Kokkos::deep_copy(t1_host, t1_dev);
+
+ reference_host /= 2.;
+ AssertThrow((t_host - reference_host).norm() < tolerance, ExcInternalError());
+ AssertThrow((t1_host - reference_host).norm() < tolerance,
+ ExcInternalError());
+
+ deallog << "division OK" << std::endl;
+
+ // Test summation
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, 1), KOKKOS_LAMBDA(int) {
+ t2_dev() += t_dev();
+ t1_dev() = t1_dev() + t_dev();
+ });
+ Kokkos::deep_copy(t1_host, t1_dev);
+ Kokkos::deep_copy(t2_host, t2_dev);
+
+ reference_host *= 2.;
+ AssertThrow((t1_host - reference_host).norm() < tolerance,
+ ExcInternalError());
+ AssertThrow((t2_host - reference_host).norm() < tolerance,
+ ExcInternalError());
+
+
+ // Test subtraction
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, 1), KOKKOS_LAMBDA(int) {
+ t2_dev() -= t_dev();
+ t1_dev() = t1_dev() - t_dev();
+ });
+ Kokkos::deep_copy(t1_host, t1_dev);
+ Kokkos::deep_copy(t2_host, t2_dev);
+
+ reference_host /= 2.;
+ AssertThrow((t1_host - reference_host).norm() < tolerance,
+ ExcInternalError());
+ AssertThrow((t2_host - reference_host).norm() < tolerance,
+ ExcInternalError());
+
+ // Miscellaneous
+ {
+ Kokkos::View<Number, MemorySpace::Default::kokkos_space> check_1("check_1");
+ Kokkos::View<Number, MemorySpace::Default::kokkos_space> check_2("check_2");
+ Kokkos::View<Number, MemorySpace::Default::kokkos_space> check_3("check_3");
+ Kokkos::View<Number, MemorySpace::Default::kokkos_space> check_4("check_4");
+ Kokkos::View<Number, MemorySpace::Default::kokkos_space> check_5("check_5");
+
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, 1), KOKKOS_LAMBDA(int) {
+ // constructors
+ typename Tensor<rank, dim, Number>::array_type array{};
+ Tensor<rank, dim, Number> dummy_1(array);
+ check_1() = dummy_1.norm_square();
+ Tensor<rank, dim, Number> dummy_2;
+ check_2() = dummy_2.norm_square();
+ Tensor<rank, dim, Number> dummy_3 = dummy_2;
+ check_3() = dummy_3.norm_square();
+
+ // access
+ Tensor<rank + 1, dim, Number> initializer_1;
+ const Tensor<rank, dim, Number> dummy_5 = initializer_1[0];
+ check_4() = dummy_5.norm_square();
+
+ // assignment
+ dummy_2 = dummy_3;
+ check_5() = dummy_2.norm_square();
+ });
+
+ Number check_1_host, check_2_host, check_3_host, check_4_host, check_5_host;
+
+ Kokkos::deep_copy(check_1_host, check_1);
+ Kokkos::deep_copy(check_2_host, check_2);
+ Kokkos::deep_copy(check_3_host, check_3);
+ Kokkos::deep_copy(check_4_host, check_4);
+ Kokkos::deep_copy(check_5_host, check_5);
+
+ AssertThrow(std::abs(check_1_host) < tolerance, ExcInternalError());
+ AssertThrow(std::abs(check_2_host) < tolerance, ExcInternalError());
+ AssertThrow(std::abs(check_3_host) < tolerance, ExcInternalError());
+ AssertThrow(std::abs(check_4_host) < tolerance, ExcInternalError());
+ AssertThrow(std::abs(check_5_host) < tolerance, ExcInternalError());
+ }
+}
+
+int
+main()
+{
+ initlog();
+
+ Kokkos::initialize();
+
+ test_gpu<0, 3, double>();
+ test_gpu<1, 3, double>();
+ test_gpu<2, 3, double>();
+ test_gpu<0, 3, float>();
+ test_gpu<1, 3, float>();
+ test_gpu<2, 3, float>();
+
+ Kokkos::finalize();
+}
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2018 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE.md at
-// the top level directory of deal.II.
-//
-// ---------------------------------------------------------------------
-
-
-// check that we detect creating ArrayView objects with the wrong memory space.
-
-#include <deal.II/base/array_view.h>
-
-#include "../tests.h"
-
-int
-main(int argc, char **argv)
-{
- deal_II_exceptions::disable_abort_on_exception();
-
- initlog();
-
- init_cuda();
-
- std::vector<unsigned int> dummy_host(2);
- std::unique_ptr<unsigned int[], void (*)(unsigned int *)> dummy_cuda(
- Utilities::CUDA::allocate_device_data<unsigned int>(2),
- Utilities::CUDA::delete_device_data<unsigned int>);
-
- deallog << "Testing host ArrayView with host memory" << std::endl;
- ArrayView<unsigned int, MemorySpace::Host> view_1(dummy_host);
-
- deallog << "Testing device ArrayView with host memory" << std::endl;
- try
- {
- ArrayView<unsigned int, MemorySpace::CUDA> view_2(dummy_host);
- }
- catch (const ExceptionBase &exc)
- {
- deallog << exc.what() << std::endl;
- }
-
- deallog << "Testing host ArrayView with device memory" << std::endl;
- try
- {
- ArrayView<unsigned int, MemorySpace::Host> view_3(dummy_cuda.get(), 2);
- }
- catch (const ExceptionBase &exc)
- {
- deallog << exc.what() << std::endl;
- }
-
- deallog << "Testing device ArrayView with device memory" << std::endl;
- ArrayView<unsigned int, MemorySpace::CUDA> view_4(dummy_cuda.get(), 2);
-
- deallog << "Testing host ArrayView to a nullptr with length 0" << std::endl;
- ArrayView<unsigned int, MemorySpace::Host> view_5(nullptr, 0);
-
- deallog << "Testing device ArrayView to a nullptr with length 0" << std::endl;
- ArrayView<unsigned int, MemorySpace::CUDA> view_6(nullptr, 0);
-
- return 0;
-}
+++ /dev/null
-
-DEAL::Testing host ArrayView with host memory
-DEAL::Testing device ArrayView with host memory
-DEAL::
---------------------------------------------------------
-An error occurred in file <array_view.h> in function
- dealii::ArrayView<ElementType, MemorySpace>::ArrayView(dealii::ArrayView<ElementType, MemorySpace>::value_type*, std::size_t) [with ElementType = unsigned int; MemorySpaceType = dealii::MemorySpace::Default; dealii::ArrayView<ElementType, MemorySpace>::value_type = unsigned int; std::size_t = long unsigned int]
-The violated condition was:
- n_elements == 0 || internal::ArrayViewHelper::is_in_correct_memory_space<MemorySpaceType>( starting_element)
-Additional information:
- The memory space indicated by the template parameter and the one
- derived from the pointer value do not match!
---------------------------------------------------------
-
-DEAL::Testing host ArrayView with device memory
-DEAL::
---------------------------------------------------------
-An error occurred in file <array_view.h> in function
- dealii::ArrayView<ElementType, MemorySpace>::ArrayView(dealii::ArrayView<ElementType, MemorySpace>::value_type*, std::size_t) [with ElementType = unsigned int; MemorySpaceType = dealii::MemorySpace::Host; dealii::ArrayView<ElementType, MemorySpace>::value_type = unsigned int; std::size_t = long unsigned int]
-The violated condition was:
- n_elements == 0 || internal::ArrayViewHelper::is_in_correct_memory_space<MemorySpaceType>( starting_element)
-Additional information:
- The memory space indicated by the template parameter and the one
- derived from the pointer value do not match!
---------------------------------------------------------
-
-DEAL::Testing device ArrayView with device memory
-DEAL::Testing host ArrayView to a nullptr with length 0
-DEAL::Testing device ArrayView to a nullptr with length 0
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2016 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE.md at
-// the top level directory of deal.II.
-//
-// ---------------------------------------------------------------------
-
-// Test operator[], norm and norm_square of cuda_tensor.
-
-#include <deal.II/base/tensor.h>
-
-#include "../tests.h"
-
-void
-test_cpu()
-{
- double a[3][3] = {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}};
- const unsigned int dim = 3;
- Tensor<2, dim> t;
- for (unsigned int i = 0; i < dim; ++i)
- for (unsigned int j = 0; j < dim; ++j)
- t[i][j] = a[i][j];
-
- deallog.push("values");
- for (unsigned int i = 0; i < dim; ++i)
- for (unsigned int j = 0; j < dim; ++j)
- deallog << t[i][j] << std::endl;
- deallog.pop();
-
- deallog << "norm: " << t.norm() << std::endl;
- deallog << "norm_square: " << t.norm_square() << std::endl;
-}
-
-__global__ void
-init_kernel(Tensor<2, 3> *t, const unsigned int N)
-{
- const unsigned int i = threadIdx.y;
- const unsigned int j = threadIdx.x;
- if ((i < N) && (j < N))
- (*t)[i][j] = j + i * N + 1.;
-}
-
-__global__ void
-norm_kernel(Tensor<2, 3> *t, double *norm, double *norm_square)
-{
- if (threadIdx.x == 0)
- {
- *norm = t->norm();
- *norm_square = t->norm_square();
- }
-}
-
-void
-test_gpu()
-{
- const unsigned int dim = 3;
- double * norm_dev;
- double norm_host;
- double * norm_square_dev;
- double norm_square_host;
- Tensor<2, dim> * t_dev;
-
- // Allocate objects on the device
- cudaError_t cuda_error = cudaMalloc(&t_dev, sizeof(Tensor<2, dim>));
- AssertCuda(cuda_error);
- cuda_error = cudaMalloc(&norm_dev, sizeof(double));
- AssertCuda(cuda_error);
- cuda_error = cudaMalloc(&norm_square_dev, sizeof(double));
- AssertCuda(cuda_error);
-
- // Launch the kernels.
- dim3 block_dim(dim, dim);
- init_kernel<<<1, block_dim>>>(t_dev, dim);
- norm_kernel<<<1, 1>>>(t_dev, norm_dev, norm_square_dev);
-
- // Copy the result to the device
- cuda_error =
- cudaMemcpy(&norm_host, norm_dev, sizeof(double), cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&norm_square_host,
- norm_square_dev,
- sizeof(double),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
-
- // Free memory
- cuda_error = cudaFree(t_dev);
- AssertCuda(cuda_error);
- cuda_error = cudaFree(norm_dev);
- AssertCuda(cuda_error);
- cuda_error = cudaFree(norm_square_dev);
- AssertCuda(cuda_error);
-
- // Output result
- deallog << "norm GPU: " << norm_host << std::endl;
- deallog << "norm_square GPU: " << norm_square_host << std::endl;
-}
-
-int
-main()
-{
- initlog();
-
- init_cuda();
-
- test_cpu();
-
- test_gpu();
-}
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2016 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE.md at
-// the top level directory of deal.II.
-//
-// ---------------------------------------------------------------------
-
-// Test operator[] and norm_square of cuda_tensor.
-
-#include <deal.II/base/tensor.h>
-
-#include "../tests.h"
-
-template <int rank, int dim, typename Number>
-__global__ void
-miscellaneous_kernel(Number *check_1,
- Number *check_2,
- Number *check_3,
- Number *check_4,
- Number *check_5)
-{
- // constructors
- typename Tensor<rank, dim, Number>::array_type array{};
- Tensor<rank, dim, Number> dummy_1(array);
- *check_1 = dummy_1.norm_square();
- Tensor<rank, dim, Number> dummy_2;
- *check_2 = dummy_2.norm_square();
- Tensor<rank, dim, Number> dummy_3 = dummy_2;
- *check_3 = dummy_3.norm_square();
-
- // access
- Tensor<rank + 1, dim, Number> initializer_1;
- const Tensor<rank, dim, Number> dummy_5 = initializer_1[0];
- *check_4 = dummy_5.norm_square();
-
- // assignment
- dummy_2 = dummy_3;
- *check_5 = dummy_2.norm_square();
-}
-
-template <int rank, int dim, typename Number>
-__global__ void
-summation_kernel(Tensor<rank, dim, Number> *t,
- Tensor<rank, dim, Number> *t1,
- Tensor<rank, dim, Number> *t2)
-{
- *t2 += *t;
- *t1 = *t1 + *t;
-}
-
-template <int rank, int dim, typename Number>
-__global__ void
-subtraction_kernel(Tensor<rank, dim, Number> *t,
- Tensor<rank, dim, Number> *t1,
- Tensor<rank, dim, Number> *t2)
-{
- *t2 -= *t;
- *t1 = *t1 - *t;
-}
-
-template <int rank, int dim, typename Number>
-__global__ void
-multiplication_kernel(Tensor<rank, dim, Number> *t,
- Tensor<rank, dim, Number> *t1,
- Tensor<rank, dim, Number> *t2)
-{
- *t1 = *t * Number(2.);
- *t2 = Number(2.) * *t;
- *t *= 2.;
-}
-
-template <int rank, int dim, typename Number>
-__global__ void
-division_kernel(Tensor<rank, dim, Number> *t,
- Tensor<rank, dim, Number> *t1,
- Tensor<rank, dim, Number> *t2)
-{
- *t1 = *t / Number(2.);
- *t /= 2.;
- *t2 = *t1;
-}
-
-template <int dim, typename Number>
-__global__ void
-init_kernel(Tensor<0, dim, Number> *t)
-{
- if (threadIdx.x == 0)
- *t = 1.;
-}
-
-template <int dim, typename Number>
-__global__ void
-init_kernel(Tensor<1, dim, Number> *t)
-{
- const unsigned int i = threadIdx.x;
- if (i < dim)
- (*t)[i] = i + 1.;
-}
-
-template <int dim, typename Number>
-__global__ void
-init_kernel(Tensor<2, dim, Number> *t)
-{
- const unsigned int i = threadIdx.y;
- const unsigned int j = threadIdx.x;
- if ((i < dim) && (j < dim))
- (*t)[i][j] = j + i * dim + 1.;
-}
-
-
-template <int rank, int dim, typename Number>
-void
-test_gpu()
-{
- const double tolerance = 1.e-8;
-
- Tensor<rank, dim, Number> *t_dev;
- Tensor<rank, dim, Number> *t1_dev;
- Tensor<rank, dim, Number> *t2_dev;
-
- Tensor<rank, dim, Number> t_host;
- Tensor<rank, dim, Number> t1_host;
- Tensor<rank, dim, Number> t2_host;
-
- Tensor<rank, dim, Number> reference_host;
-
- // Allocate objects on the device
- cudaError_t cuda_error =
- cudaMalloc(&t_dev, sizeof(Tensor<rank, dim, Number>));
- AssertCuda(cuda_error);
- cuda_error = cudaMalloc(&t1_dev, sizeof(Tensor<rank, dim, Number>));
- AssertCuda(cuda_error);
- cuda_error = cudaMalloc(&t2_dev, sizeof(Tensor<rank, dim, Number>));
- AssertCuda(cuda_error);
-
- // Initialize
- dim3 block_dim(dim, dim);
- init_kernel<<<1, block_dim>>>(t_dev);
- cuda_error = cudaMemcpy(&reference_host,
- t_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
-
- // Test multiplication.
- multiplication_kernel<<<1, 1>>>(t_dev, t1_dev, t2_dev);
-
- cuda_error = cudaMemcpy(&t_host,
- t_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&t1_host,
- t1_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&t2_host,
- t2_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
-
- reference_host *= 2;
- AssertThrow((t_host - reference_host).norm() < tolerance, ExcInternalError());
- AssertThrow((t1_host - reference_host).norm() < tolerance,
- ExcInternalError());
- AssertThrow((t2_host - reference_host).norm() < tolerance,
- ExcInternalError());
-
- deallog << "multiplication OK" << std::endl;
-
- // Test division.
- division_kernel<<<1, 1>>>(t_dev, t1_dev, t2_dev);
- cuda_error = cudaMemcpy(&t_host,
- t_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&t1_host,
- t1_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
-
- reference_host /= 2.;
- AssertThrow((t_host - reference_host).norm() < tolerance, ExcInternalError());
- AssertThrow((t1_host - reference_host).norm() < tolerance,
- ExcInternalError());
-
- deallog << "division OK" << std::endl;
-
- // Test summation
- summation_kernel<<<1, 1>>>(t_dev, t1_dev, t2_dev);
- cuda_error = cudaMemcpy(&t1_host,
- t1_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&t2_host,
- t2_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
-
- reference_host *= 2.;
- AssertThrow((t1_host - reference_host).norm() < tolerance,
- ExcInternalError());
- AssertThrow((t2_host - reference_host).norm() < tolerance,
- ExcInternalError());
-
-
- // Test subtraction
- subtraction_kernel<<<1, 1>>>(t_dev, t1_dev, t2_dev);
- cuda_error = cudaMemcpy(&t1_host,
- t1_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&t2_host,
- t2_dev,
- sizeof(Tensor<rank, dim, Number>),
- cudaMemcpyDeviceToHost);
-
- reference_host /= 2.;
- AssertThrow((t1_host - reference_host).norm() < tolerance,
- ExcInternalError());
- AssertThrow((t2_host - reference_host).norm() < tolerance,
- ExcInternalError());
-
- // Free memory
- cuda_error = cudaFree(t_dev);
- AssertCuda(cuda_error);
- cuda_error = cudaFree(t1_dev);
- AssertCuda(cuda_error);
- cuda_error = cudaFree(t2_dev);
- AssertCuda(cuda_error);
-
- // Miscellaneous
- {
- Number *check_1;
- Number *check_2;
- Number *check_3;
- Number *check_4;
- Number *check_5;
-
- cuda_error = cudaMalloc(&check_1, sizeof(Number));
- AssertCuda(cuda_error);
- cuda_error = cudaMalloc(&check_2, sizeof(Number));
- AssertCuda(cuda_error);
- cuda_error = cudaMalloc(&check_3, sizeof(Number));
- AssertCuda(cuda_error);
- cuda_error = cudaMalloc(&check_4, sizeof(Number));
- AssertCuda(cuda_error);
- cuda_error = cudaMalloc(&check_5, sizeof(Number));
- AssertCuda(cuda_error);
-
- miscellaneous_kernel<rank, dim, Number>
- <<<1, 1>>>(check_1, check_2, check_3, check_4, check_5);
-
- Number check_1_host, check_2_host, check_3_host, check_4_host, check_5_host;
-
- cuda_error = cudaMemcpy(&check_1_host,
- check_1,
- sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&check_2_host,
- check_2,
- sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&check_3_host,
- check_3,
- sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&check_4_host,
- check_4,
- sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
- cuda_error = cudaMemcpy(&check_5_host,
- check_5,
- sizeof(Number),
- cudaMemcpyDeviceToHost);
- AssertCuda(cuda_error);
-
- AssertThrow(std::abs(check_1_host) < tolerance, ExcInternalError());
- AssertThrow(std::abs(check_2_host) < tolerance, ExcInternalError());
- AssertThrow(std::abs(check_3_host) < tolerance, ExcInternalError());
- AssertThrow(std::abs(check_4_host) < tolerance, ExcInternalError());
- AssertThrow(std::abs(check_5_host) < tolerance, ExcInternalError());
-
- cuda_error = cudaFree(check_1);
- AssertCuda(cuda_error);
- cuda_error = cudaFree(check_2);
- AssertCuda(cuda_error);
- cuda_error = cudaFree(check_3);
- AssertCuda(cuda_error);
- cuda_error = cudaFree(check_4);
- AssertCuda(cuda_error);
- cuda_error = cudaFree(check_5);
- AssertCuda(cuda_error);
- }
-}
-
-int
-main()
-{
- initlog();
-
- init_cuda();
-
- test_gpu<0, 3, double>();
- test_gpu<1, 3, double>();
- test_gpu<2, 3, double>();
- test_gpu<0, 3, float>();
- test_gpu<1, 3, float>();
- test_gpu<2, 3, float>();
-}
#include "../tests.h"
-__global__ void
-initialize_vector(double *vector, int local_size, int offset)
-{
- const int index = threadIdx.x + blockIdx.x * blockDim.x;
- if (index < local_size)
- vector[index] = 1.0 + index + offset;
-}
-
-
void
test()
{
deallog << "CM:" << std::endl;
cm.print(deallog.get_file_stream());
- LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> ghosted;
+ using ExecutionSpace = MemorySpace::Default::kokkos_space::execution_space;
+ ExecutionSpace exec;
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::Default> ghosted;
{
ghosted.reinit(local_active,
complete_index_set(2 * numproc),
MPI_COMM_WORLD);
-
- const int n_blocks = 1 + ghosted.size() / CUDAWrappers::block_size;
- initialize_vector<<<n_blocks, CUDAWrappers::block_size>>>(
- ghosted.get_values(), numproc, myid * numproc);
+ auto ghosted_values = ghosted.get_values();
+
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, numproc),
+ KOKKOS_LAMBDA(int i) {
+ int offset = myid * numproc;
+ ghosted_values[i] = 1.0 + i + offset;
+ });
+ exec.fence();
ghosted.compress(VectorOperation::insert);
deallog << "ghosted vector before:" << std::endl;
ghosted.print(deallog.get_file_stream());
}
- LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> distributed;
+ LinearAlgebra::distributed::Vector<double, MemorySpace::Default> distributed;
{
distributed.reinit(local_active,
complete_index_set(2 * numproc),
MPI_COMM_WORLD);
- const int n_blocks = 1 + distributed.size() / CUDAWrappers::block_size;
- initialize_vector<<<n_blocks, CUDAWrappers::block_size>>>(
- distributed.get_values(), numproc, myid * numproc);
+ auto distributed_values = distributed.get_values();
+
+ Kokkos::parallel_for(
+ Kokkos::RangePolicy<ExecutionSpace>(exec, 0, numproc),
+ KOKKOS_LAMBDA(int i) {
+ int offset = myid * numproc;
+ distributed_values[i] = 1.0 + i + offset;
+ });
+ exec.fence();
+
distributed.compress(VectorOperation::insert);
deallog << "distributed vector before:" << std::endl;
Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
MPILogInitAll log;
- init_cuda();
-
test();
return 0;
}
argc, argv, testing_max_num_threads());
initlog();
- init_cuda();
do_test<
LinearAlgebra::distributed::Vector<double, dealii::MemorySpace::CUDA>>();