#ifndef dealii_affine_constraints_templates_h
#define dealii_affine_constraints_templates_h
+#include <deal.II/base/cuda_size.h>
#include <deal.II/base/memory_consumption.h>
#include <deal.II/base/table.h>
#include <deal.II/base/thread_local_storage.h>
vec.zero_out_ghosts();
}
+#ifdef DEAL_II_COMPILER_CUDA_AWARE
+ template <typename Number>
+ __global__ void
+ set_zero_kernel(const size_type * constrained_dofs,
+ const unsigned int n_constrained_dofs,
+ Number * dst)
+ {
+ const unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
+ if (index < n_constrained_dofs)
+ dst[constrained_dofs[index]] = 0;
+ }
+
+ template <typename number>
+ void
+ set_zero_parallel(
+ const std::vector<size_type> & cm,
+ LinearAlgebra::distributed::Vector<number, MemorySpace::CUDA> &vec,
+ size_type shift = 0)
+ {
+ Assert(shift == 0, ExcNotImplemented());
+ const int n_constraints = cm.size();
+ std::vector<size_type> constrained_local_dofs_host(n_constraints);
+
+ std::transform(cm.begin(),
+ cm.end(),
+ constrained_local_dofs_host.begin(),
+ [&vec](const size_type global_index) {
+ return vec.get_partitioner()->global_to_local(
+ global_index);
+ });
+
+ size_type *constrained_local_dofs_device;
+ Utilities::CUDA::malloc(constrained_local_dofs_device, n_constraints);
+ Utilities::CUDA::copy_to_dev(constrained_local_dofs_host,
+ constrained_local_dofs_device);
+
+ const int n_blocks = 1 + (n_constraints - 1) / CUDAWrappers::block_size;
+ set_zero_kernel<<<n_blocks, CUDAWrappers::block_size>>>(
+ constrained_local_dofs_device, cm.size(), vec.get_values());
+# ifdef DEBUG
+ // Check that the kernel was launched correctly
+ AssertCuda(cudaGetLastError());
+ // Check that there was no problem during the execution of the kernel
+ AssertCuda(cudaDeviceSynchronize());
+# endif
+
+ Utilities::CUDA::free(constrained_local_dofs_device);
+
+ vec.zero_out_ghosts();
+ }
+#endif
+
template <class VectorType>
void
set_zero_in_parallel(const std::vector<size_type> &cm,
void
AffineConstraints<number>::set_zero(VectorType &vec) const
{
- // since we lines is a private member, we cannot pass it to the functions
+ // since lines is a private member, we cannot pass it to the functions
// above. therefore, copy the content which is cheap
std::vector<size_type> constrained_lines(lines.size());
for (unsigned int i = 0; i < lines.size(); ++i)
// this is an operation that is different for all vector types and so we
// need a few overloads
#ifdef DEAL_II_WITH_TRILINOS
- void
+ inline void
import_vector_with_ghost_elements(
const TrilinosWrappers::MPI::Vector &vec,
const IndexSet & /*locally_owned_elements*/,
#endif
#ifdef DEAL_II_WITH_PETSC
- void
+ inline void
import_vector_with_ghost_elements(
const PETScWrappers::MPI::Vector &vec,
const IndexSet & locally_owned_elements,
SET(_separate_src
${_separate_src}
vector_memory.cu
+ affine_constraints.cu
)
ENDIF()
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/lac/affine_constraints.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+template void
+AffineConstraints<float>::set_zero<
+ LinearAlgebra::distributed::Vector<float, MemorySpace::CUDA>>(
+ LinearAlgebra::distributed::Vector<float, MemorySpace::CUDA> &) const;
+
+template void
+AffineConstraints<double>::set_zero<
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>>(
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> &) const;
+
+DEAL_II_NAMESPACE_CLOSE
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2019 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// check AffineConstraints<double>::set_zero(Vector) for parallel distributed
+// vectors
+
+#include <deal.II/base/cuda_size.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+
+#include "../tests.h"
+
+
+__global__ void
+initialize_vector(double *vector, int local_size, int offset)
+{
+ const int index = threadIdx.x + blockIdx.x * blockDim.x;
+ if (index < local_size)
+ vector[index] = 1.0 + index + offset;
+}
+
+
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+
+ IndexSet local_active;
+ local_active.set_size(2 * numproc);
+ local_active.add_range(myid * numproc, (myid + 1) * numproc);
+
+ LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> v;
+ v.reinit(local_active, complete_index_set(2 * numproc), MPI_COMM_WORLD);
+
+ const int n_blocks = 1 + v.size() / CUDAWrappers::block_size;
+ initialize_vector<<<n_blocks, CUDAWrappers::block_size>>>(v.get_values(),
+ numproc,
+ myid * numproc);
+ v.compress(VectorOperation::insert);
+
+ AffineConstraints<double> cm;
+ cm.add_line(numproc * myid + 1);
+ cm.close();
+
+ deallog << "vector before:" << std::endl;
+ v.print(deallog.get_file_stream());
+
+ deallog << std::endl;
+ deallog << "CM:" << std::endl;
+ cm.print(deallog.get_file_stream());
+
+ cm.set_zero(v);
+
+ deallog << "vector after:" << std::endl;
+ v.print(deallog.get_file_stream());
+
+ deallog << "OK" << std::endl;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+
+ init_cuda();
+
+ test();
+ return 0;
+}
--- /dev/null
+
+DEAL:0::vector before:
+Process #0
+Local range: [0, 2), global size: 4
+Vector data:
+1.000e+00 2.000e+00
+DEAL:0::
+DEAL:0::CM:
+ 1 = 0
+DEAL:0::vector after:
+Process #0
+Local range: [0, 2), global size: 4
+Vector data:
+1.000e+00 0.000e+00
+DEAL:0::OK
+
+DEAL:1::vector before:
+Process #1
+Local range: [2, 4), global size: 4
+Vector data:
+3.000e+00 4.000e+00
+DEAL:1::
+DEAL:1::CM:
+ 3 = 0
+DEAL:1::vector after:
+Process #1
+Local range: [2, 4), global size: 4
+Vector data:
+3.000e+00 0.000e+00
+DEAL:1::OK
+