From e307a34230257c636f5c1f617a84378fa187e4ec Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Thu, 8 Nov 2018 17:13:45 +0100 Subject: [PATCH] Derandomize CUDA devices used in test --- tests/tests.h | 52 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/tests/tests.h b/tests/tests.h index 2c27ab7d32..d798e0236e 100644 --- a/tests/tests.h +++ b/tests/tests.h @@ -593,10 +593,12 @@ struct MPILogInitAll #ifdef DEAL_II_COMPILER_CUDA_AWARE -// By default, all the ranks will try to access the device 0. We can distribute -// the load better by using a random device which is done in this function. In -// case tests use MPI, we make sure to set subsequent devices. MPI needs to be -// initialized before using this function. +// By default, all the ranks will try to access the device 0. +// If we are running with MPI support it is better to address different graphic +// cards for different processes even if only one node is used. The choice below +// is based on the MPI proccess id. +// MPI needs to be initialized before using this function. +// // Also initialize a dummy handle that makes sure that unused memory is released // before the device shuts down. void @@ -613,18 +615,38 @@ init_cuda(const bool use_mpi = false) int n_devices = 0; cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); AssertCuda(cuda_error_code); - if (my_id == 0) - { - Testing::srand(std::time(nullptr)); - device_id = Testing::rand() % n_devices; - } -# ifdef DEAL_II_WITH_MPI - if (use_mpi) - MPI_Bcast(&device_id, 1, MPI_INT, 0, MPI_COMM_WORLD); -# endif - device_id = (device_id + my_id) % n_devices; - cuda_error_code = cudaSetDevice(device_id); + const int device_id = my_id % n_devices; + cuda_error_code = cudaSetDevice(device_id); AssertCuda(cuda_error_code); + + // In principle, we should be able to distribute the load better by + // choosing a random graphics card. For some reason, this produces timeouts + // on the tester we use mainly for the CUDA tests so we don't use the + // following optimization by default. + + /* + # ifndef DEAL_II_WITH_MPI + Assert(use_mpi == false, ExcInternalError()); + # endif + const unsigned int my_id = + use_mpi ? Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) : 0; + int device_id = 0; + int n_devices = 0; + cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices); + AssertCuda(cuda_error_code); + if (my_id == 0) + { + Testing::srand(std::time(nullptr)); + device_id = Testing::rand() % n_devices; + } + # ifdef DEAL_II_WITH_MPI + if (use_mpi) + MPI_Bcast(&device_id, 1, MPI_INT, 0, MPI_COMM_WORLD); + # endif + device_id = (device_id + my_id) % n_devices; + cuda_error_code = cudaSetDevice(device_id); + AssertCuda(cuda_error_code); + */ } #endif -- 2.39.5