From: Daniel Arndt Date: Tue, 30 Oct 2018 14:36:20 +0000 (+0100) Subject: Introduce init_cuda X-Git-Tag: v9.1.0-rc1~577^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=51f0eaba96767fe1eff7de286fed7fe960e5006e;p=dealii.git Introduce init_cuda --- diff --git a/tests/cuda/cuda_evaluate_1d_shape.cu b/tests/cuda/cuda_evaluate_1d_shape.cu index bb9129e4da..e2e4323c06 100644 --- a/tests/cuda/cuda_evaluate_1d_shape.cu +++ b/tests/cuda/cuda_evaluate_1d_shape.cu @@ -159,6 +159,8 @@ main() std::ofstream logfile("output"); deallog.attach(logfile); + init_cuda(); + deallog.push("values"); test<4, 4, 0, false>(); test<3, 3, 0, false>(); diff --git a/tests/cuda/cuda_evaluate_2d_shape.cu b/tests/cuda/cuda_evaluate_2d_shape.cu index 2c353ffabe..db81805e4d 100644 --- a/tests/cuda/cuda_evaluate_2d_shape.cu +++ b/tests/cuda/cuda_evaluate_2d_shape.cu @@ -184,6 +184,8 @@ main() std::ofstream logfile("output"); deallog.attach(logfile); + init_cuda(); + deallog.push("values"); test<4, 4, 0, false>(); test<3, 3, 0, false>(); diff --git a/tests/cuda/cuda_tensor_01.cu b/tests/cuda/cuda_tensor_01.cu index 615b2da0ad..d530992a4d 100644 --- a/tests/cuda/cuda_tensor_01.cu +++ b/tests/cuda/cuda_tensor_01.cu @@ -101,6 +101,8 @@ main() deallog << std::setprecision(5); deallog.attach(logfile); + init_cuda(); + test_cpu(); test_gpu(); diff --git a/tests/cuda/cuda_vector_01.cu b/tests/cuda/cuda_vector_01.cu index b44eb96bf1..cd042f05a6 100644 --- a/tests/cuda/cuda_vector_01.cu +++ b/tests/cuda/cuda_vector_01.cu @@ -115,6 +115,8 @@ main(int argc, char **argv) initlog(); deallog.depth_console(0); + init_cuda(); + test(); deallog << "OK" << std::endl; diff --git a/tests/cuda/cuda_vector_02.cu b/tests/cuda/cuda_vector_02.cu index b2ca855ab2..ed240cea52 100644 --- a/tests/cuda/cuda_vector_02.cu +++ b/tests/cuda/cuda_vector_02.cu @@ -111,6 +111,8 @@ main(int argc, char **argv) initlog(); deallog.depth_console(0); + init_cuda(); + test(); deallog << "OK" << std::endl; diff --git a/tests/cuda/cuda_vector_03.cu b/tests/cuda/cuda_vector_03.cu index 0722524769..772928010e 100644 --- a/tests/cuda/cuda_vector_03.cu +++ b/tests/cuda/cuda_vector_03.cu @@ -41,6 +41,8 @@ main(int argc, char **argv) initlog(); deallog.depth_console(0); + init_cuda(); + test(); deallog << "OK" << std::endl; diff --git a/tests/cuda/cuda_vector_04.cu b/tests/cuda/cuda_vector_04.cu index c97ee95031..e54a829a64 100644 --- a/tests/cuda/cuda_vector_04.cu +++ b/tests/cuda/cuda_vector_04.cu @@ -49,6 +49,8 @@ main(int argc, char **argv) initlog(); deallog.depth_console(0); + init_cuda(); + test(); deallog << "OK" << std::endl; diff --git a/tests/cuda/matrix_vector_common.h b/tests/cuda/matrix_vector_common.h index a0f673a844..ee06444eb2 100644 --- a/tests/cuda/matrix_vector_common.h +++ b/tests/cuda/matrix_vector_common.h @@ -159,6 +159,8 @@ main() deallog << std::setprecision(3); + init_cuda(); + { deallog.push("2d"); test<2, 1>(); diff --git a/tests/cuda/parallel_vector_01.cu b/tests/cuda/parallel_vector_01.cu index 9e53fb11e9..5021d1aa16 100644 --- a/tests/cuda/parallel_vector_01.cu +++ b/tests/cuda/parallel_vector_01.cu @@ -85,18 +85,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); - + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_02.cu b/tests/cuda/parallel_vector_02.cu index 7c1d0ba0bc..e7664f4e1c 100644 --- a/tests/cuda/parallel_vector_02.cu +++ b/tests/cuda/parallel_vector_02.cu @@ -97,17 +97,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_03.cu b/tests/cuda/parallel_vector_03.cu index 1dfb25ceb2..0e34c7ed54 100644 --- a/tests/cuda/parallel_vector_03.cu +++ b/tests/cuda/parallel_vector_03.cu @@ -102,17 +102,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_03a.cu b/tests/cuda/parallel_vector_03a.cu index ddd3eac749..458b900a40 100644 --- a/tests/cuda/parallel_vector_03a.cu +++ b/tests/cuda/parallel_vector_03a.cu @@ -155,17 +155,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_04.cu b/tests/cuda/parallel_vector_04.cu index 94731f7fd1..e928ed32b1 100644 --- a/tests/cuda/parallel_vector_04.cu +++ b/tests/cuda/parallel_vector_04.cu @@ -153,22 +153,11 @@ main(int argc, char **argv) Utilities::MPI::MPI_InitFinalize mpi_initialization( argc, argv, testing_max_num_threads()); + init_cuda(true); + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); - - if (myid == 0) { initlog(); diff --git a/tests/cuda/parallel_vector_05.cu b/tests/cuda/parallel_vector_05.cu index 206c6c468e..1519bd97e6 100644 --- a/tests/cuda/parallel_vector_05.cu +++ b/tests/cuda/parallel_vector_05.cu @@ -98,17 +98,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_06.cu b/tests/cuda/parallel_vector_06.cu index a1ec4beb9b..bfe06fffae 100644 --- a/tests/cuda/parallel_vector_06.cu +++ b/tests/cuda/parallel_vector_06.cu @@ -143,18 +143,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); - + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_08.cu b/tests/cuda/parallel_vector_08.cu index 993a32e55c..f0c4716232 100644 --- a/tests/cuda/parallel_vector_08.cu +++ b/tests/cuda/parallel_vector_08.cu @@ -126,17 +126,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_10.cu b/tests/cuda/parallel_vector_10.cu index 4f08b45640..6b250c3292 100644 --- a/tests/cuda/parallel_vector_10.cu +++ b/tests/cuda/parallel_vector_10.cu @@ -92,17 +92,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_11.cu b/tests/cuda/parallel_vector_11.cu index 64840468e6..20997bf51f 100644 --- a/tests/cuda/parallel_vector_11.cu +++ b/tests/cuda/parallel_vector_11.cu @@ -233,18 +233,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); - + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_12.cu b/tests/cuda/parallel_vector_12.cu index 29e5c4da36..9103d9ef77 100644 --- a/tests/cuda/parallel_vector_12.cu +++ b/tests/cuda/parallel_vector_12.cu @@ -216,18 +216,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); - + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_13.cu b/tests/cuda/parallel_vector_13.cu index 097ef2ed9d..c157244982 100644 --- a/tests/cuda/parallel_vector_13.cu +++ b/tests/cuda/parallel_vector_13.cu @@ -113,17 +113,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_14.cu b/tests/cuda/parallel_vector_14.cu index 955b562131..c3fab28bad 100644 --- a/tests/cuda/parallel_vector_14.cu +++ b/tests/cuda/parallel_vector_14.cu @@ -161,17 +161,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_15.cu b/tests/cuda/parallel_vector_15.cu index 973b0313ed..3a77b32fb9 100644 --- a/tests/cuda/parallel_vector_15.cu +++ b/tests/cuda/parallel_vector_15.cu @@ -111,17 +111,7 @@ main(int argc, char **argv) unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); deallog.push(Utilities::int_to_string(myid)); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = myid % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); if (myid == 0) { diff --git a/tests/cuda/parallel_vector_21.cu b/tests/cuda/parallel_vector_21.cu index f8e75de788..99f867327d 100644 --- a/tests/cuda/parallel_vector_21.cu +++ b/tests/cuda/parallel_vector_21.cu @@ -86,18 +86,7 @@ main(int argc, char **argv) Utilities::MPI::MPI_InitFinalize mpi_initialization( argc, argv, testing_max_num_threads()); - unsigned int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); - Utilities::CUDA::Handle cuda_handle; - // By default, all the ranks will try to access the device 0. This is fine if - // we have one rank per node _and_ one gpu per node. If we have multiple GPUs - // on one node, we need each process to access a different GPU. We assume that - // each node has the same number of GPUs. - int n_devices = 0; - cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); - AssertCuda(cuda_error_code); - int device_id = my_id % n_devices; - cuda_error_code = cudaSetDevice(device_id); - AssertCuda(cuda_error_code); + init_cuda(true); MPILogInitAll log; test(); diff --git a/tests/cuda/precondition_01.cu b/tests/cuda/precondition_01.cu index 9de6715e61..78569e9ced 100644 --- a/tests/cuda/precondition_01.cu +++ b/tests/cuda/precondition_01.cu @@ -74,6 +74,8 @@ main() deallog << std::setprecision(10); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; deallog << "Testing float" << std::endl; test(cuda_handle); diff --git a/tests/cuda/precondition_02.cu b/tests/cuda/precondition_02.cu index edfea90e81..e8b2099f66 100644 --- a/tests/cuda/precondition_02.cu +++ b/tests/cuda/precondition_02.cu @@ -74,6 +74,8 @@ main() deallog << std::setprecision(10); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; deallog << "Testing float" << std::endl; test(cuda_handle); diff --git a/tests/cuda/solver_01.cu b/tests/cuda/solver_01.cu index 69b8c32743..58d8ace477 100644 --- a/tests/cuda/solver_01.cu +++ b/tests/cuda/solver_01.cu @@ -76,6 +76,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_02.cu b/tests/cuda/solver_02.cu index cdf9d8a8dd..67333dfc01 100644 --- a/tests/cuda/solver_02.cu +++ b/tests/cuda/solver_02.cu @@ -107,6 +107,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_03.cu b/tests/cuda/solver_03.cu index 89250f5110..cc5c7dec4a 100644 --- a/tests/cuda/solver_03.cu +++ b/tests/cuda/solver_03.cu @@ -76,6 +76,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_04.cu b/tests/cuda/solver_04.cu index 0aba76d39b..eb50a3bc14 100644 --- a/tests/cuda/solver_04.cu +++ b/tests/cuda/solver_04.cu @@ -77,6 +77,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_05.cu b/tests/cuda/solver_05.cu index 1a4bb7f48a..f66ffbfad2 100644 --- a/tests/cuda/solver_05.cu +++ b/tests/cuda/solver_05.cu @@ -76,6 +76,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_06.cu b/tests/cuda/solver_06.cu index e8aa593338..b8cd854b47 100644 --- a/tests/cuda/solver_06.cu +++ b/tests/cuda/solver_06.cu @@ -76,6 +76,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_07.cu b/tests/cuda/solver_07.cu index ea8561c29a..c6b99396c7 100644 --- a/tests/cuda/solver_07.cu +++ b/tests/cuda/solver_07.cu @@ -76,6 +76,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_08.cu b/tests/cuda/solver_08.cu index 0cd8caab52..fe381d2d10 100644 --- a/tests/cuda/solver_08.cu +++ b/tests/cuda/solver_08.cu @@ -76,6 +76,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_09.cu b/tests/cuda/solver_09.cu index 742924bcbf..8aa91ff363 100644 --- a/tests/cuda/solver_09.cu +++ b/tests/cuda/solver_09.cu @@ -105,6 +105,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/solver_10.cu b/tests/cuda/solver_10.cu index f38f500402..396ef2ac47 100644 --- a/tests/cuda/solver_10.cu +++ b/tests/cuda/solver_10.cu @@ -125,6 +125,8 @@ main() initlog(); deallog.depth_console(10); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/sparse_matrix_01.cu b/tests/cuda/sparse_matrix_01.cu index 1d7db49acb..4ca3de628a 100644 --- a/tests/cuda/sparse_matrix_01.cu +++ b/tests/cuda/sparse_matrix_01.cu @@ -213,6 +213,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/sparse_matrix_02.cu b/tests/cuda/sparse_matrix_02.cu index bec9672194..377789dd16 100644 --- a/tests/cuda/sparse_matrix_02.cu +++ b/tests/cuda/sparse_matrix_02.cu @@ -52,6 +52,8 @@ main() initlog(); deallog.depth_console(0); + init_cuda(); + Utilities::CUDA::Handle cuda_handle; test(cuda_handle); diff --git a/tests/cuda/vector_reinit_01.cu b/tests/cuda/vector_reinit_01.cu index df834427b8..1bdfed9dc1 100644 --- a/tests/cuda/vector_reinit_01.cu +++ b/tests/cuda/vector_reinit_01.cu @@ -110,9 +110,9 @@ main(int argc, char **argv) { Utilities::MPI::MPI_InitFinalize mpi_initialization( argc, argv, testing_max_num_threads()); - Utilities::CUDA::Handle cuda_handle; initlog(); + init_cuda(); do_test< LinearAlgebra::distributed::Vector>(); diff --git a/tests/tests.h b/tests/tests.h index 25ccf10ff5..6f564b5351 100644 --- a/tests/tests.h +++ b/tests/tests.h @@ -20,6 +20,7 @@ #include +#include #include #include #include @@ -591,6 +592,42 @@ struct MPILogInitAll }; +#ifdef DEAL_II_COMPILER_CUDA_AWARE +// By default, all the ranks will try to access the device 0. We can distribute +// the load better by using a random device which is done in this function. In +// case tests use MPI, we make sure to set subsequent devices. MPI needs to be +// initialized before using this function. +// Also initialize a dummy handle that makes sure that unused memory is released +// before the device shuts down. +void +init_cuda(bool use_mpi = false) +{ + static Utilities::CUDA::Handle cuda_handle; +# ifndef DEAL_II_WITH_MPI + Assert(use_mpi == false, ExcInternalError()); +# endif + const unsigned int my_id = + use_mpi ? Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) : 0; + int device_id = 0; + int n_devices = 0; + cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); + if (my_id == 0) + { + Testing::srand(std::time(nullptr)); + AssertCuda(cuda_error_code); + device_id = Testing::rand() % n_devices; + } +# ifdef DEAL_II_WITH_MPI + if (use_mpi) + MPI_Bcast(&device_id, 1, MPI_INT, 0, MPI_COMM_WORLD); +# endif + device_id = (device_id + my_id) % n_devices; + cuda_error_code = cudaSetDevice(device_id); + AssertCuda(cuda_error_code); +} +#endif + + /* Override the tbb assertion handler in order to print a stacktrace:*/