::dealii::LinearAlgebra::CUDAWrappers::kernel::
gather<<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
temp_array_ptr,
- locally_owned_array.data(),
import_indices_plain_dev[i].first.get(),
+ locally_owned_array.data(),
import_indices_plain_dev[i].second);
}
else
*
* @ingroup CUDAWrappers
*/
- template <typename Number>
+ template <typename Number, typename IndexType>
__global__ void
- set_permutated(Number * val,
+ set_permutated(const IndexType *indices,
+ Number * val,
const Number * v,
- const size_type *indices,
- const size_type N);
+ const IndexType N);
template <typename Number, typename IndexType>
__global__ void
gather(Number * val,
- const Number * v,
const IndexType *indices,
+ const Number * v,
const IndexType N);
*/
template <typename Number>
__global__ void
- add_permutated(Number * val,
+ add_permutated(const size_type *indices,
+ Number * val,
const Number * v,
- const size_type *indices,
const size_type N);
} // namespace kernel
} // namespace CUDAWrappers
- template <typename Number>
+ template <typename Number, typename IndexType>
__global__ void
- set_permutated(Number * val,
+ set_permutated(const IndexType *indices,
+ Number * val,
const Number * v,
- const size_type *indices,
- const size_type N)
+ const IndexType N)
{
const size_type idx_base =
threadIdx.x + blockIdx.x * (blockDim.x * chunk_size);
template <typename Number, typename IndexType>
__global__ void
gather(Number * val,
- const Number * v,
const IndexType *indices,
+ const Number * v,
const IndexType N)
{
const IndexType idx_base =
template <typename Number>
__global__ void
- add_permutated(Number * val,
+ add_permutated(const size_type *indices,
+ Number * val,
const Number * v,
- const size_type *indices,
const size_type N)
{
const size_type idx_base =
::dealii::CUDAWrappers::block_size);
::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated<Number>
<<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
- tmp_vector.begin(), V_dev, indices_dev, n_elements);
+ indices_dev, tmp_vector.begin(), V_dev, n_elements);
tmp_vector.compress(operation);
if (operation == VectorOperation::add)
::dealii::LinearAlgebra::CUDAWrappers::kernel::add_permutated<
Number><<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
+ indices_dev,
data.values_dev.get(),
tmp_vector.begin(),
- indices_dev,
tmp_n_elements);
else
::dealii::LinearAlgebra::CUDAWrappers::kernel::set_permutated<
Number><<<n_blocks, ::dealii::CUDAWrappers::block_size>>>(
+ indices_dev,
data.values_dev.get(),
tmp_vector.begin(),
- indices_dev,
tmp_n_elements);
::dealii::Utilities::CUDA::free(indices_dev);
const float * V_val,
const size_type N);
template __global__ void
- equ(float * val,
- const float a,
- const float * V_val,
- const float b,
- const float * W_val,
- const size_type N);
+ equ<float>(float * val,
+ const float a,
+ const float * V_val,
+ const float b,
+ const float * W_val,
+ const size_type N);
template __global__ void
add_and_dot<float>(float * res,
float * v1,
template __global__ void
set<float>(float *val, const float s, const size_type N);
template __global__ void
- set_permutated<float>(float * val,
- const float * v,
- const size_type *indices,
- const size_type N);
+ set_permutated<float, size_type>(const size_type *indices,
+ float * val,
+ const float * v,
+ const size_type N);
template __global__ void
- gather(float * val,
- const float * v,
- const size_type *indices,
- const size_type N);
+ gather<float, size_type>(float * val,
+ const size_type *indices,
+ const float * v,
+ const size_type N);
template __global__ void
- add_permutated<float>(float * val,
+ add_permutated<float>(const size_type *indices,
+ float * val,
const float * v,
- const size_type *indices,
const size_type N);
const double * V_val,
const size_type N);
template __global__ void
- equ(double * val,
- const double a,
- const double * V_val,
- const double b,
- const double * W_val,
- const size_type N);
+ equ<double>(double * val,
+ const double a,
+ const double * V_val,
+ const double b,
+ const double * W_val,
+ const size_type N);
template __global__ void
add_and_dot<double>(double * res,
double * v1,
template __global__ void
set<double>(double *val, const double s, const size_type N);
template __global__ void
- set_permutated<double>(double * val,
- const double * v,
- const size_type *indices,
- const size_type N);
+ set_permutated<double, size_type>(const size_type *indices,
+ double * val,
+ const double * v,
+ const size_type N);
template __global__ void
- gather(double * val,
- const double * v,
- const size_type *indices,
- const size_type N);
+ gather<double, size_type>(double * val,
+ const size_type *indices,
+ const double * v,
+ const size_type N);
template __global__ void
- add_permutated<double>(double * val,
+ add_permutated<double>(const size_type *indices,
+ double * val,
const double * v,
- const size_type *indices,
const size_type N);
} // namespace kernel
} // namespace CUDAWrappers