]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add support for distributed CUDA MatrixFree
authorBruno Turcksin <bruno.turcksin@gmail.com>
Tue, 20 Nov 2018 22:31:19 +0000 (22:31 +0000)
committerBruno Turcksin <bruno.turcksin@gmail.com>
Mon, 26 Nov 2018 14:40:14 +0000 (14:40 +0000)
include/deal.II/matrix_free/cuda_hanging_nodes_internal.h
include/deal.II/matrix_free/cuda_matrix_free.h
include/deal.II/matrix_free/cuda_matrix_free.templates.h

index 75a2696808832bceb31ba56b563ed6f40dd2289a..4a4df3be8e28df1afc1289d7c083c39648aab858 100644 (file)
@@ -58,9 +58,11 @@ namespace CUDAWrappers
        */
       template <typename CellIterator>
       void
-      setup_constraints(std::vector<types::global_dof_index> &dof_indices,
-                        const CellIterator &                  cell,
-                        unsigned int &                        mask) const;
+      setup_constraints(
+        std::vector<types::global_dof_index> &                    dof_indices,
+        const CellIterator &                                      cell,
+        const std::unique_ptr<const Utilities::MPI::Partitioner> &partitioner,
+        unsigned int &                                            mask) const;
 
     private:
       /**
@@ -272,9 +274,10 @@ namespace CUDAWrappers
     template <typename CellIterator>
     void
     HangingNodes<dim>::setup_constraints(
-      std::vector<types::global_dof_index> &dof_indices,
-      const CellIterator &                  cell,
-      unsigned int &                        mask) const
+      std::vector<types::global_dof_index> &                    dof_indices,
+      const CellIterator &                                      cell,
+      const std::unique_ptr<const Utilities::MPI::Partitioner> &partitioner,
+      unsigned int &                                            mask) const
     {
       mask                         = 0;
       const unsigned int n_dofs_1d = fe_degree + 1;
@@ -311,6 +314,11 @@ namespace CUDAWrappers
 
                   // Get indices to read
                   neighbor->face(neighbor_face)->get_dof_indices(neighbor_dofs);
+                  // If the vector is distributed, we need to transform the
+                  // global indices to local ones.
+                  if (partitioner)
+                    for (auto &index : neighbor_dofs)
+                      index = partitioner->global_to_local(index);
 
                   if (dim == 2)
                     {
@@ -554,6 +562,11 @@ namespace CUDAWrappers
                           neighbor_dofs.resize(n_dofs_1d * n_dofs_1d *
                                                n_dofs_1d);
                           neighbor_cell->get_dof_indices(neighbor_dofs);
+                          // If the vector is distributed, we need to transform
+                          // the global indices to local ones.
+                          if (partitioner)
+                            for (auto &index : neighbor_dofs)
+                              index = partitioner->global_to_local(index);
 
                           for (unsigned int i = 0; i < n_dofs_1d; ++i)
                             {
index b75ff1f9cd02c85807d94fd38d7e6492be01fb7d..75af6c004ef23c6c7ffe78af41c923ab192fb243 100644 (file)
@@ -21,6 +21,7 @@
 
 #ifdef DEAL_II_COMPILER_CUDA_AWARE
 
+#  include <deal.II/base/mpi.h>
 #  include <deal.II/base/quadrature.h>
 #  include <deal.II/base/tensor.h>
 
@@ -32,6 +33,7 @@
 
 #  include <deal.II/lac/affine_constraints.h>
 #  include <deal.II/lac/cuda_vector.h>
+#  include <deal.II/lac/la_parallel_vector.h>
 
 
 DEAL_II_NAMESPACE_OPEN
@@ -142,6 +144,9 @@ namespace CUDAWrappers
      */
     MatrixFree();
 
+    /**
+     * Return the length of the padding.
+     */
     unsigned int
     get_padding_length() const;
 
@@ -151,7 +156,35 @@ namespace CUDAWrappers
      * degrees of freedom, the DoFHandler and the mapping describe the
      * transformation from unit to real cell, and the finite element
      * underlying the DoFHandler together with the quadrature formula
-     * describe the local operations.
+     * describe the local operations. This function supports distributed
+     * computation (MPI).
+     */
+    void
+    reinit(const Mapping<dim> &             mapping,
+           const DoFHandler<dim> &          dof_handler,
+           const AffineConstraints<Number> &constraints,
+           const Quadrature<1> &            quad,
+           const MPI_Comm &                 comm,
+           const AdditionalData             additional_data = AdditionalData());
+
+    /**
+     * Initializes the data structures. Same as above but using a Q1 mapping.
+     */
+    void
+    reinit(const DoFHandler<dim> &          dof_handler,
+           const AffineConstraints<Number> &constraints,
+           const Quadrature<1> &            quad,
+           const MPI_Comm &                 comm,
+           const AdditionalData             AdditionalData = AdditionalData());
+
+    /**
+     * Extracts the information needed to perform loops over cells. The
+     * DoFHandler and AffineConstraints objects describe the layout of
+     * degrees of freedom, the DoFHandler and the mapping describe the
+     * transformation from unit to real cell, and the finite element
+     * underlying the DoFHandler together with the quadrature formula
+     * describe the local operations. This function does not support distributed
+     * computation.
      */
     void
     reinit(const Mapping<dim> &             mapping,
@@ -185,10 +218,19 @@ namespace CUDAWrappers
               const VectorType &src,
               VectorType &      dst) const;
 
+    /**
+     * Copy the values of the constrained entries from @p src to @p dst. This is
+     * used to impose zero Dirichlet boundary condition.
+     */
     template <typename VectorType>
     void
     copy_constrained_values(const VectorType &src, VectorType &dst) const;
 
+    /**
+     * Set the entries in @p dst corresponding to constrained values to @p val.
+     * The main purpose of this function is to set the constrained entries of
+     * the source vector used in cell_loop() to zero.
+     */
     template <typename VectorType>
     void
     set_constrained_values(const Number val, VectorType &dst) const;
@@ -206,6 +248,107 @@ namespace CUDAWrappers
     memory_consumption() const;
 
   private:
+    /**
+     * Initializes the data structures.
+     */
+    void
+    reinit(const Mapping<dim> &             mapping,
+           const DoFHandler<dim> &          dof_handler,
+           const AffineConstraints<Number> &constraints,
+           const Quadrature<1> &            quad,
+           std::shared_ptr<const MPI_Comm>  comm,
+           const AdditionalData             additional_data);
+
+    /**
+     * Helper function. Loop over all the cells and apply the functor on each
+     * element in parallel. This function is used when MPI is not used.
+     */
+    template <typename functor, typename VectorType>
+    void
+    serial_cell_loop(const functor &   func,
+                     const VectorType &src,
+                     VectorType &      dst) const;
+
+    /**
+     * Helper function. Loop over all the cells and apply the functor on each
+     * element in parallel. This function is used when MPI is used.
+     */
+    template <typename functor>
+    void
+    distributed_cell_loop(
+      const functor &                                                      func,
+      const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
+      LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const;
+
+    /**
+     * This function should never be called. Calling it results in an internal
+     * error. This function exists only because cell_loop needs
+     * distributed_cell_loop() to exist for LinearAlgebra::CUDAWrappers::Vector.
+     */
+    template <typename functor>
+    void
+    distributed_cell_loop(
+      const functor &                                    func,
+      const LinearAlgebra::CUDAWrappers::Vector<Number> &src,
+      LinearAlgebra::CUDAWrappers::Vector<Number> &      dst) const;
+
+    /**
+     * Helper function. Copy the values of the constrained entries of @p src to
+     * @p dst. This function is used when MPI is not used.
+     */
+    template <typename VectorType>
+    void
+    serial_copy_constrained_values(const VectorType &src,
+                                   VectorType &      dst) const;
+
+    /**
+     * Helper function. Copy the values of the constrained entries of @p src to
+     * @p dst. This function is used when MPI is used.
+     */
+    void
+    distributed_copy_constrained_values(
+      const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
+      LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const;
+
+    /**
+     * This function should never be called. Calling it results in an internal
+     * error. This function exists only because copy_constrained_values needs
+     * distributed_copy_constrained_values() to exist for
+     * LinearAlgebra::CUDAWrappers::Vector.
+     */
+    void
+    distributed_copy_constrained_values(
+      const LinearAlgebra::CUDAWrappers::Vector<Number> &src,
+      LinearAlgebra::CUDAWrappers::Vector<Number> &      dst) const;
+
+    /**
+     * Helper function. Set the constrained entries of @p dst to @p val. This
+     * function is used when MPI is not used.
+     */
+    template <typename VectorType>
+    void
+    serial_set_constrained_values(const Number val, VectorType &dst) const;
+
+    /**
+     * Helper function. Set the constrained entries of @p dst to @p val. This
+     * function is used when MPI is used.
+     */
+    void
+    distributed_set_constrained_values(
+      const Number                                                   val,
+      LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const;
+
+    /**
+     * This function should never be called. Calling it results in an internal
+     * error. This function exists only because set_constrained_values needs
+     * distributed_set_constrained_values() to exist for
+     * LinearAlgebra::CUDAWrappers::Vector.
+     */
+    void
+    distributed_set_constrained_values(
+      const Number                                 val,
+      LinearAlgebra::CUDAWrappers::Vector<Number> &dst) const;
+
     /**
      * Parallelization scheme used, parallelization over degrees of freedom or
      * over cells.
@@ -284,7 +427,13 @@ namespace CUDAWrappers
      */
     std::vector<dim3> block_dim;
 
-    // Parallelization parameter
+    /**
+     * Unique pointer to a Partitioner for distributed Vectors used in
+     * cell_loop. When MPI is not used the pointer is null.
+     */
+    std::unique_ptr<const Utilities::MPI::Partitioner> partitioner;
+
+    // Parallelization parameters
     unsigned int cells_per_block;
     dim3         constraint_grid_dim;
     dim3         constraint_block_dim;
index 435b53b6a01129becc7391f5ec16f37d4697fd2a..89625a885cd5e5b5019bdeb29b63e5049ff51951 100644 (file)
@@ -23,6 +23,9 @@
 
 #  include <deal.II/base/cuda_size.h>
 #  include <deal.II/base/graph_coloring.h>
+#  include <deal.II/base/std_cxx14/memory.h>
+
+#  include <deal.II/dofs/dof_tools.h>
 
 #  include <deal.II/fe/fe_values.h>
 
@@ -103,6 +106,10 @@ namespace CUDAWrappers
       cudaError_t error_code = cudaMalloc(array_device, n * sizeof(Number1));
       AssertCuda(error_code);
 
+      // TODO This is very dangerous because we are doing a memcopy but with
+      // different data types. However this is very useful to move Point to the
+      // device where they are stored as Tensor. Thus, we need Point on the
+      // device in order to make this function safer.
       error_code = cudaMemcpy(*array_device,
                               array_host.data(),
                               n * sizeof(Number1),
@@ -137,7 +144,10 @@ namespace CUDAWrappers
 
       template <typename CellFilter>
       void
-      get_cell_data(const CellFilter &cell, const unsigned int cell_id);
+      get_cell_data(
+        const CellFilter &                                        cell,
+        const unsigned int                                        cell_id,
+        const std::unique_ptr<const Utilities::MPI::Partitioner> &partitioner);
 
       void
       alloc_and_copy_arrays(const unsigned int cell);
@@ -274,17 +284,25 @@ namespace CUDAWrappers
     template <int dim, typename Number>
     template <typename CellFilter>
     void
-    ReinitHelper<dim, Number>::get_cell_data(const CellFilter & cell,
-                                             const unsigned int cell_id)
+    ReinitHelper<dim, Number>::get_cell_data(
+      const CellFilter &                                        cell,
+      const unsigned int                                        cell_id,
+      const std::unique_ptr<const Utilities::MPI::Partitioner> &partitioner)
     {
       cell->get_dof_indices(local_dof_indices);
-
+      // When using MPI, we need to transform the local_dof_indices, which
+      // contains global dof indices, to get local (to the current MPI process)
+      // dof indices.
+      if (partitioner)
+        for (auto &index : local_dof_indices)
+          index = partitioner->global_to_local(index);
 
       for (unsigned int i = 0; i < dofs_per_cell; ++i)
         lexicographic_dof_indices[i] = local_dof_indices[lexicographic_inv[i]];
 
       hanging_nodes.setup_constraints(lexicographic_dof_indices,
                                       cell,
+                                      partitioner,
                                       constraint_mask_host[cell_id]);
 
       memcpy(&local_to_global_host[cell_id * padding_length],
@@ -414,12 +432,16 @@ namespace CUDAWrappers
     copy_constrained_dofs(
       const dealii::types::global_dof_index *constrained_dofs,
       const unsigned int                     n_constrained_dofs,
+      const unsigned int                     size,
       const Number *                         src,
       Number *                               dst)
     {
       const unsigned int dof =
         threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
-      if (dof < n_constrained_dofs)
+      // When working with distributed vectors, the constrained dofs are
+      // computed for ghosted vectors but we want to copy the values of the
+      // constrained dofs of non-ghosted vectors.
+      if ((dof < n_constrained_dofs) && (constrained_dofs[dof] < size))
         dst[constrained_dofs[dof]] = src[constrained_dofs[dof]];
     }
 
@@ -430,12 +452,16 @@ namespace CUDAWrappers
     set_constrained_dofs(
       const dealii::types::global_dof_index *constrained_dofs,
       const unsigned int                     n_constrained_dofs,
+      const unsigned int                     size,
       Number                                 val,
       Number *                               dst)
     {
       const unsigned int dof =
         threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
-      if (dof < n_constrained_dofs)
+      // When working with distributed vectors, the constrained dofs are
+      // computed for ghosted vectors but we want to set the values of the
+      // constrained dofs of non-ghosted vectors.
+      if ((dof < n_constrained_dofs) && (constrained_dofs[dof] < size))
         dst[constrained_dofs[dof]] = val;
     }
 
@@ -491,6 +517,247 @@ namespace CUDAWrappers
                                   const DoFHandler<dim> &          dof_handler,
                                   const AffineConstraints<Number> &constraints,
                                   const Quadrature<1> &            quad,
+                                  const MPI_Comm &                 comm,
+                                  const AdditionalData additional_data)
+  {
+    reinit(mapping,
+           dof_handler,
+           constraints,
+           quad,
+           std::make_shared<const MPI_Comm>(comm),
+           additional_data);
+  }
+
+
+
+  template <int dim, typename Number>
+  void
+  MatrixFree<dim, Number>::reinit(const DoFHandler<dim> &          dof_handler,
+                                  const AffineConstraints<Number> &constraints,
+                                  const Quadrature<1> &            quad,
+                                  const MPI_Comm &                 comm,
+                                  const AdditionalData additional_data)
+  {
+    reinit(StaticMappingQ1<dim>::mapping,
+           dof_handler,
+           constraints,
+           quad,
+           std::make_shared<const MPI_Comm>(comm),
+           additional_data);
+  }
+
+
+
+  template <int dim, typename Number>
+  void
+  MatrixFree<dim, Number>::reinit(const Mapping<dim> &             mapping,
+                                  const DoFHandler<dim> &          dof_handler,
+                                  const AffineConstraints<Number> &constraints,
+                                  const Quadrature<1> &            quad,
+                                  const AdditionalData additional_data)
+  {
+    reinit(mapping, dof_handler, constraints, quad, nullptr, additional_data);
+  }
+
+
+
+  template <int dim, typename Number>
+  void
+  MatrixFree<dim, Number>::reinit(const DoFHandler<dim> &          dof_handler,
+                                  const AffineConstraints<Number> &constraints,
+                                  const Quadrature<1> &            quad,
+                                  const AdditionalData additional_data)
+  {
+    reinit(StaticMappingQ1<dim>::mapping,
+           dof_handler,
+           constraints,
+           quad,
+           nullptr,
+           additional_data);
+  }
+
+
+
+  template <int dim, typename Number>
+  MatrixFree<dim, Number>::Data
+  MatrixFree<dim, Number>::get_data(unsigned int color) const
+  {
+    Data data_copy;
+    data_copy.q_points        = q_points[color];
+    data_copy.local_to_global = local_to_global[color];
+    data_copy.inv_jacobian    = inv_jacobian[color];
+    data_copy.JxW             = JxW[color];
+    data_copy.constraint_mask = constraint_mask[color];
+    data_copy.n_cells         = n_cells[color];
+    data_copy.padding_length  = padding_length;
+    data_copy.row_start       = row_start[color];
+
+    return data_copy;
+  }
+
+
+
+  template <int dim, typename Number>
+  void
+  MatrixFree<dim, Number>::free()
+  {
+    for (unsigned int i = 0; i < q_points.size(); ++i)
+      {
+        if (q_points[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(q_points[i]);
+            AssertCuda(cuda_error);
+            q_points[i] = nullptr;
+          }
+      }
+
+    for (unsigned int i = 0; i < local_to_global.size(); ++i)
+      {
+        if (local_to_global[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(local_to_global[i]);
+            AssertCuda(cuda_error);
+            local_to_global[i] = nullptr;
+          }
+      }
+
+    for (unsigned int i = 0; i < inv_jacobian.size(); ++i)
+      {
+        if (inv_jacobian[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(inv_jacobian[i]);
+            AssertCuda(cuda_error);
+            inv_jacobian[i] = nullptr;
+          }
+      }
+
+    for (unsigned int i = 0; i < JxW.size(); ++i)
+      {
+        if (JxW[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(JxW[i]);
+            AssertCuda(cuda_error);
+            JxW[i] = nullptr;
+          }
+      }
+
+    for (unsigned int i = 0; i < constraint_mask.size(); ++i)
+      {
+        if (constraint_mask[i] != nullptr)
+          {
+            cudaError_t cuda_error = cudaFree(constraint_mask[i]);
+            AssertCuda(cuda_error);
+            constraint_mask[i] = nullptr;
+          }
+      }
+
+
+    q_points.clear();
+    local_to_global.clear();
+    inv_jacobian.clear();
+    JxW.clear();
+    constraint_mask.clear();
+
+    if (constrained_dofs != nullptr)
+      {
+        cudaError_t cuda_error = cudaFree(constrained_dofs);
+        AssertCuda(cuda_error);
+        constrained_dofs = nullptr;
+      }
+  }
+
+
+
+  template <int dim, typename Number>
+  template <typename VectorType>
+  void
+  MatrixFree<dim, Number>::copy_constrained_values(const VectorType &src,
+                                                   VectorType &      dst) const
+  {
+    static_assert(
+      std::is_same<Number, typename VectorType::value_type>::value,
+      "VectorType::value_type and Number should be of the same type.");
+    if (partitioner)
+      distributed_copy_constrained_values(src, dst);
+    else
+      serial_copy_constrained_values(src, dst);
+  }
+
+
+
+  template <int dim, typename Number>
+  template <typename VectorType>
+  void
+  MatrixFree<dim, Number>::set_constrained_values(Number      val,
+                                                  VectorType &dst) const
+  {
+    static_assert(
+      std::is_same<Number, typename VectorType::value_type>::value,
+      "VectorType::value_type and Number should be of the same type.");
+    if (partitioner)
+      distributed_set_constrained_values(val, dst);
+    else
+      serial_set_constrained_values(val, dst);
+  }
+
+
+
+  template <int dim, typename Number>
+  unsigned int
+  MatrixFree<dim, Number>::get_padding_length() const
+  {
+    return padding_length;
+  }
+
+
+
+  template <int dim, typename Number>
+  template <typename functor, typename VectorType>
+  void
+  MatrixFree<dim, Number>::cell_loop(const functor &   func,
+                                     const VectorType &src,
+                                     VectorType &      dst) const
+  {
+    if (partitioner)
+      distributed_cell_loop(func, src, dst);
+    else
+      serial_cell_loop(func, src, dst);
+  }
+
+
+
+  template <int dim, typename Number>
+  std::size_t
+  MatrixFree<dim, Number>::memory_consumption() const
+  {
+    // First compute the size of n_cells, row_starts, kernel launch parameters,
+    // and constrained_dofs
+    std::size_t bytes = n_cells.size() * sizeof(unsigned int) * 2 +
+                        2 * n_colors * sizeof(dim3) +
+                        n_constrained_dofs * sizeof(unsigned int);
+
+    // For each color, add local_to_global, inv_jacobian, JxW, and q_points.
+    for (unsigned int i = 0; i < n_colors; ++i)
+      {
+        bytes += n_cells[i] * padding_length * sizeof(unsigned int) +
+                 n_cells[i] * padding_length * dim * dim * sizeof(Number) +
+                 n_cells[i] * padding_length * sizeof(Number) +
+                 n_cells[i] * padding_length * sizeof(point_type) +
+                 n_cells[i] * sizeof(unsigned int);
+      }
+
+    return bytes;
+  }
+
+
+
+  template <int dim, typename Number>
+  void
+  MatrixFree<dim, Number>::reinit(const Mapping<dim> &             mapping,
+                                  const DoFHandler<dim> &          dof_handler,
+                                  const AffineConstraints<Number> &constraints,
+                                  const Quadrature<1> &            quad,
+                                  std::shared_ptr<const MPI_Comm>  comm,
                                   const AdditionalData additional_data)
   {
     if (typeid(Number) == typeid(double))
@@ -568,6 +835,15 @@ namespace CUDAWrappers
     n_colors = graph.size();
 
     helper.setup_color_arrays(n_colors);
+
+    IndexSet locally_relevant_dofs;
+    if (comm)
+      {
+        DoFTools::extract_locally_relevant_dofs(dof_handler,
+                                                locally_relevant_dofs);
+        partitioner = std_cxx14::make_unique<Utilities::MPI::Partitioner>(
+          dof_handler.locally_owned_dofs(), locally_relevant_dofs, *comm);
+      }
     for (unsigned int i = 0; i < n_colors; ++i)
       {
         n_cells[i] = graph[i].size();
@@ -575,7 +851,7 @@ namespace CUDAWrappers
         typename std::vector<CellFilter>::iterator cell     = graph[i].begin(),
                                                    end_cell = graph[i].end();
         for (unsigned int cell_id = 0; cell != end_cell; ++cell, ++cell_id)
-          helper.get_cell_data(*cell, cell_id);
+          helper.get_cell_data(*cell, cell_id, partitioner);
 
         helper.alloc_and_copy_arrays(i);
       }
@@ -606,14 +882,33 @@ namespace CUDAWrappers
         std::vector<dealii::types::global_dof_index> constrained_dofs_host(
           n_constrained_dofs);
 
-        unsigned int       i_constraint = 0;
-        const unsigned int n_dofs       = dof_handler.n_dofs();
-        for (unsigned int i = 0; i < n_dofs; ++i)
+        if (partitioner)
+          {
+            const unsigned int n_local_dofs =
+              locally_relevant_dofs.n_elements();
+            unsigned int i_constraint = 0;
+            for (unsigned int i = 0; i < n_local_dofs; ++i)
+              {
+                // is_constrained uses a global dof id but constrained_dofs_host
+                // works on the local id
+                if (constraints.is_constrained(partitioner->local_to_global(i)))
+                  {
+                    constrained_dofs_host[i_constraint] = i;
+                    ++i_constraint;
+                  }
+              }
+          }
+        else
           {
-            if (constraints.is_constrained(i))
+            const unsigned int n_local_dofs = dof_handler.n_dofs();
+            unsigned int       i_constraint = 0;
+            for (unsigned int i = 0; i < n_local_dofs; ++i)
               {
-                constrained_dofs_host[i_constraint] = i;
-                ++i_constraint;
+                if (constraints.is_constrained(i))
+                  {
+                    constrained_dofs_host[i_constraint] = i;
+                    ++i_constraint;
+                  }
               }
           }
 
@@ -634,91 +929,62 @@ namespace CUDAWrappers
 
 
   template <int dim, typename Number>
-  MatrixFree<dim, Number>::Data
-  MatrixFree<dim, Number>::get_data(unsigned int color) const
+  template <typename functor, typename VectorType>
+  void
+  MatrixFree<dim, Number>::serial_cell_loop(const functor &   func,
+                                            const VectorType &src,
+                                            VectorType &      dst) const
   {
-    Data data_copy;
-    data_copy.q_points        = q_points[color];
-    data_copy.local_to_global = local_to_global[color];
-    data_copy.inv_jacobian    = inv_jacobian[color];
-    data_copy.JxW             = JxW[color];
-    data_copy.constraint_mask = constraint_mask[color];
-    data_copy.n_cells         = n_cells[color];
-    data_copy.padding_length  = padding_length;
-    data_copy.row_start       = row_start[color];
-
-    return data_copy;
+    // Execute the loop on the cells
+    for (unsigned int i = 0; i < n_colors; ++i)
+      internal::apply_kernel_shmem<dim, Number, functor>
+        <<<grid_dim[i], block_dim[i]>>>(func,
+                                        get_data(i),
+                                        src.get_values(),
+                                        dst.get_values());
   }
 
 
 
   template <int dim, typename Number>
+  template <typename functor>
   void
-  MatrixFree<dim, Number>::free()
+  MatrixFree<dim, Number>::distributed_cell_loop(
+    const functor &                                                      func,
+    const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const
   {
-    for (unsigned int i = 0; i < q_points.size(); ++i)
-      {
-        if (q_points[i] != nullptr)
-          {
-            cudaError_t cuda_error = cudaFree(q_points[i]);
-            AssertCuda(cuda_error);
-            q_points[i] = nullptr;
-          }
-      }
-
-    for (unsigned int i = 0; i < local_to_global.size(); ++i)
-      {
-        if (local_to_global[i] != nullptr)
-          {
-            cudaError_t cuda_error = cudaFree(local_to_global[i]);
-            AssertCuda(cuda_error);
-            local_to_global[i] = nullptr;
-          }
-      }
-
-    for (unsigned int i = 0; i < inv_jacobian.size(); ++i)
-      {
-        if (inv_jacobian[i] != nullptr)
-          {
-            cudaError_t cuda_error = cudaFree(inv_jacobian[i]);
-            AssertCuda(cuda_error);
-            inv_jacobian[i] = nullptr;
-          }
-      }
-
-    for (unsigned int i = 0; i < JxW.size(); ++i)
-      {
-        if (JxW[i] != nullptr)
-          {
-            cudaError_t cuda_error = cudaFree(JxW[i]);
-            AssertCuda(cuda_error);
-            JxW[i] = nullptr;
-          }
-      }
+    // Create the ghosted source and the ghosted destination
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> ghosted_src(
+      partitioner);
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> ghosted_dst(
+      ghosted_src);
+    ghosted_src = src;
+
+    // Execute the loop on the cells
+    for (unsigned int i = 0; i < n_colors; ++i)
+      internal::apply_kernel_shmem<dim, Number, functor>
+        <<<grid_dim[i], block_dim[i]>>>(func,
+                                        get_data(i),
+                                        ghosted_src.get_values(),
+                                        ghosted_dst.get_values());
 
-    for (unsigned int i = 0; i < constraint_mask.size(); ++i)
-      {
-        if (constraint_mask[i] != nullptr)
-          {
-            cudaError_t cuda_error = cudaFree(constraint_mask[i]);
-            AssertCuda(cuda_error);
-            constraint_mask[i] = nullptr;
-          }
-      }
+    // Add the ghosted values
+    ghosted_dst.compress(VectorOperation::add);
+    dst = ghosted_dst;
+  }
 
 
-    q_points.clear();
-    local_to_global.clear();
-    inv_jacobian.clear();
-    JxW.clear();
-    constraint_mask.clear();
 
-    if (constrained_dofs != nullptr)
-      {
-        cudaError_t cuda_error = cudaFree(constrained_dofs);
-        AssertCuda(cuda_error);
-        constrained_dofs = nullptr;
-      }
+  template <int dim, typename Number>
+  template <typename functor>
+  void
+  MatrixFree<dim, Number>::distributed_cell_loop(
+    const functor &,
+    const LinearAlgebra::CUDAWrappers::Vector<Number> &,
+    LinearAlgebra::CUDAWrappers::Vector<Number> &) const
+  {
+    Assert(false, ExcInternalError());
   }
 
 
@@ -726,15 +992,15 @@ namespace CUDAWrappers
   template <int dim, typename Number>
   template <typename VectorType>
   void
-  MatrixFree<dim, Number>::copy_constrained_values(const VectorType &src,
-                                                   VectorType &      dst) const
+  MatrixFree<dim, Number>::serial_copy_constrained_values(const VectorType &src,
+                                                          VectorType &dst) const
   {
-    static_assert(
-      std::is_same<Number, typename VectorType::value_type>::value,
-      "VectorType::value_type and Number should be of the same type.");
+    Assert(src.size() == dst.size(),
+           ExcMessage("src and dst vectors have different size."));
     internal::copy_constrained_dofs<Number>
       <<<constraint_grid_dim, constraint_block_dim>>>(constrained_dofs,
                                                       n_constrained_dofs,
+                                                      src.size(),
                                                       src.get_values(),
                                                       dst.get_values());
   }
@@ -742,70 +1008,73 @@ namespace CUDAWrappers
 
 
   template <int dim, typename Number>
-  template <typename VectorType>
   void
-  MatrixFree<dim, Number>::set_constrained_values(Number      val,
-                                                  VectorType &dst) const
+  MatrixFree<dim, Number>::distributed_copy_constrained_values(
+    const LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &src,
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const
   {
-    static_assert(
-      std::is_same<Number, typename VectorType::value_type>::value,
-      "VectorType::value_type and Number should be of the same type.");
-    internal::set_constrained_dofs<Number>
+    Assert(src.size() == dst.size(),
+           ExcMessage("src and dst vectors have different local size."));
+    internal::copy_constrained_dofs<Number>
       <<<constraint_grid_dim, constraint_block_dim>>>(constrained_dofs,
                                                       n_constrained_dofs,
-                                                      val,
+                                                      src.local_size(),
+                                                      src.get_values(),
                                                       dst.get_values());
   }
 
 
 
   template <int dim, typename Number>
-  unsigned int
-  MatrixFree<dim, Number>::get_padding_length() const
+  void
+  MatrixFree<dim, Number>::distributed_copy_constrained_values(
+    const LinearAlgebra::CUDAWrappers::Vector<Number> &,
+    LinearAlgebra::CUDAWrappers::Vector<Number> &) const
   {
-    return padding_length;
+    Assert(false, ExcInternalError());
   }
 
 
 
   template <int dim, typename Number>
-  template <typename functor, typename VectorType>
+  template <typename VectorType>
   void
-  MatrixFree<dim, Number>::cell_loop(const functor &   func,
-                                     const VectorType &src,
-                                     VectorType &      dst) const
+  MatrixFree<dim, Number>::serial_set_constrained_values(const Number val,
+                                                         VectorType & dst) const
   {
-    for (unsigned int i = 0; i < n_colors; ++i)
-      internal::apply_kernel_shmem<dim, Number, functor>
-        <<<grid_dim[i], block_dim[i]>>>(func,
-                                        get_data(i),
-                                        src.get_values(),
-                                        dst.get_values());
+    internal::set_constrained_dofs<Number>
+      <<<constraint_grid_dim, constraint_block_dim>>>(constrained_dofs,
+                                                      n_constrained_dofs,
+                                                      dst.size(),
+                                                      val,
+                                                      dst.get_values());
   }
 
 
 
   template <int dim, typename Number>
-  std::size_t
-  MatrixFree<dim, Number>::memory_consumption() const
+  void
+  MatrixFree<dim, Number>::distributed_set_constrained_values(
+    const Number                                                   val,
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> &dst) const
   {
-    // First compute the size of n_cells, row_starts, kernel launch parameters,
-    // and constrained_dofs
-    std::size_t bytes = n_cells.size() * sizeof(unsigned int) * 2 +
-                        2 * n_colors * sizeof(dim3) +
-                        n_constrained_dofs * sizeof(unsigned int);
+    internal::set_constrained_dofs<Number>
+      <<<constraint_grid_dim, constraint_block_dim>>>(constrained_dofs,
+                                                      n_constrained_dofs,
+                                                      dst.local_size(),
+                                                      val,
+                                                      dst.get_values());
+  }
 
-    // For each color, add local_to_global, inv_jacobian, JxW, and q_points.
-    for (unsigned int i = 0; i < n_colors; ++i)
-      {
-        bytes += n_cells[i] * padding_length * sizeof(unsigned int) +
-                 n_cells[i] * padding_length * dim * dim * sizeof(Number) +
-                 n_cells[i] * padding_length * sizeof(Number) +
-                 n_cells[i] * padding_length * sizeof(point_type) +
-                 n_cells[i] * sizeof(unsigned int);
-      }
 
-    return bytes;
+
+  template <int dim, typename Number>
+  void
+  MatrixFree<dim, Number>::distributed_set_constrained_values(
+    const Number,
+    LinearAlgebra::CUDAWrappers::Vector<Number> &) const
+  {
+    Assert(false, ExcInternalError());
   }
 } // namespace CUDAWrappers
 

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.