]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Add new tutorial step-64
authorBruno Turcksin <bruno.turcksin@gmail.com>
Fri, 30 Nov 2018 15:37:44 +0000 (15:37 +0000)
committerDaniel Arndt <arndtd@ornl.gov>
Fri, 10 May 2019 14:51:07 +0000 (10:51 -0400)
Show to use matrix-free on the GPU with MPI

Co-Authored-By: Daniel Arndt <daniel.arndt@iwr.uni-heidelberg.de>
examples/CMakeLists.txt
examples/step-64/CMakeLists.txt [new file with mode: 0644]
examples/step-64/doc/builds-on [new file with mode: 0644]
examples/step-64/doc/intro.dox [new file with mode: 0644]
examples/step-64/doc/kind [new file with mode: 0644]
examples/step-64/doc/results.dox [new file with mode: 0644]
examples/step-64/doc/tooltip [new file with mode: 0644]
examples/step-64/step-64.cu [new file with mode: 0644]

index 8c4173b41ffc29a50babff7a2ccc532eb4891069..f89bb06c7149fa68ba00e731b0f4aadc88e38638 100644 (file)
@@ -28,6 +28,7 @@ IF(DEAL_II_COMPONENT_EXAMPLES)
     #
     # Glob Includes:
     #
+    PATTERN "*.cu"
     PATTERN "*.cc"
     PATTERN "*.prm"
     PATTERN "*.inp"
@@ -51,7 +52,9 @@ IF(DEAL_II_COMPONENT_EXAMPLES)
     #
     # Set up all executables:
     #
-    FILE(GLOB _steps ${CMAKE_CURRENT_SOURCE_DIR}/step-*/*.cc)
+    FILE(GLOB _steps 
+      ${CMAKE_CURRENT_SOURCE_DIR}/step-*/*.cc
+      ${CMAKE_CURRENT_SOURCE_DIR}/step-*/*.cu)
     FOREACH(_step ${_steps})
       GET_FILENAME_COMPONENT(_name ${_step} NAME_WE)
       GET_FILENAME_COMPONENT(_directory ${_step} DIRECTORY)
diff --git a/examples/step-64/CMakeLists.txt b/examples/step-64/CMakeLists.txt
new file mode 100644 (file)
index 0000000..5c39199
--- /dev/null
@@ -0,0 +1,57 @@
+##
+#  CMake script for the step-64 tutorial program:
+##
+
+# Set the name of the project and target:
+SET(TARGET "step-64")
+
+# Declare all source files the target consists of. Here, this is only
+# the one step-X.cc file, but as you expand your project you may wish
+# to add other source files as well. If your project becomes much larger,
+# you may want to either replace the following statement by something like
+#  FILE(GLOB_RECURSE TARGET_SRC  "source/*.cc")
+#  FILE(GLOB_RECURSE TARGET_INC  "include/*.h")
+#  SET(TARGET_SRC ${TARGET_SRC}  ${TARGET_INC})
+# or switch altogether to the large project CMakeLists.txt file discussed
+# in the "CMake in user projects" page accessible from the "User info"
+# page of the documentation.
+SET(TARGET_SRC
+  ${TARGET}.cu
+  )
+
+# Usually, you will not need to modify anything beyond this point...
+
+CMAKE_MINIMUM_REQUIRED(VERSION 3.10)
+
+FIND_PACKAGE(deal.II 9.0.0 QUIET
+  HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR}
+  )
+IF(NOT ${deal.II_FOUND})
+  MESSAGE(FATAL_ERROR "\n"
+    "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n"
+    "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to cmake\n"
+    "or set an environment variable \"DEAL_II_DIR\" that contains this path."
+    )
+ENDIF()
+
+#
+# Are all dependencies fulfilled?
+#
+IF(NOT DEAL_II_WITH_MPI OR NOT DEAL_II_WITH_P4EST OR NOT DEAL_II_WITH_CUDA) # keep in one line
+  MESSAGE(FATAL_ERROR "
+Error! This tutorial requires a deal.II library that was configured with the following options:
+    DEAL_II_WITH_MPI = ON
+    DEAL_II_WITH_P4EST = ON
+    DEAL_II_WITH_CUDA = ON
+However, the deal.II library found at ${DEAL_II_PATH} was configured with these options
+    DEAL_II_WITH_MPI = ${DEAL_II_WITH_MPI}
+    DEAL_II_WITH_P4EST = ${DEAL_II_WITH_P4EST}
+    DEAL_II_WITH_CUDA = ${DEAL_II_WITH_CUDA}
+which conflict with the requirements."
+    )
+ENDIF()
+
+
+DEAL_II_INITIALIZE_CACHED_VARIABLES()
+PROJECT(${TARGET})
+DEAL_II_INVOKE_AUTOPILOT()
diff --git a/examples/step-64/doc/builds-on b/examples/step-64/doc/builds-on
new file mode 100644 (file)
index 0000000..8888d17
--- /dev/null
@@ -0,0 +1 @@
+step-37
diff --git a/examples/step-64/doc/intro.dox b/examples/step-64/doc/intro.dox
new file mode 100644 (file)
index 0000000..85e3121
--- /dev/null
@@ -0,0 +1,103 @@
+<br>
+
+<i>
+This program was contributed by Bruno Turcksin, Daniel Arndt, Oak Ridge National Laboratory.
+</i>
+
+
+<h1>Introduction</h1>
+
+This example shows how to implement a matrix-free method on the GPU using CUDA
+for the Helmhotz equation with variable coefficients on a hypercube. The linear
+system willbe solved using CG and MPI.
+
+In the last few years, heterogeneous computing in general and GPUs in particular
+have gained a lot of popularity. This is because GPUs offer better computing
+capabilities and memory bandwidth than CPU for a given power. GPUs are also the
+most popular architecture for machine learning. Therefore it might be
+interesting to be able to efficiently run a simulation along side a machine
+learning code.
+
+While we have tried for the interface of the matrix-free classes for the CPU and
+the GPU to be a close as possible, there are a few differences. When using
+matrix-free on GPU, one must write some CUDA codes. However, the amount is
+fairly small and the use of CUDA is limited to a few keyword
+
+<h3>The test case</h3>
+
+In this example, we consider the Poisson problem @f{eqnarray*} - \nabla \cdot
+\nabla u + a(\mathbf r) u &=&1,\\ u &=& 0 \quad \text{on} \partial \Omega @f}
+where $a(\mathbf x)$ is a variable coefficient. 
+
+We choose as domain $\Omega=[0,1]^3$ and $a(\mathbf x)=\frac{10}{0.05 +
+2\|mathbf r\|^2}$, Since the coefficient is symmetric around the origin but
+the domain is not, we will end up with a non-symmetric solution.
+
+<h3>Moving data to and from the device</h3>
+
+GPUs (we will use device from now on to refer to the GPU) have their own memory
+that is separate from the memory accessible to the CPU (we will use host from
+now on). A normal calculation on the device can be divided in three separte
+steps:
+ 1) the data is moved from the host to the device
+ 2) computation is done on the device
+ 3) the result is move from the device to the host
+The data movements can either done manually by the user or done automatically
+using UVM (Unified Virtual Memory). In deal.II, only the first method is
+supported. While it means an extra burden for the user, it allows a better
+control of data movement and more importantly it avoids to mistakenly run
+important kernels on the host instead of the device.
+
+The data movement in deal.II is done using
+LinearAlgebra::ReadWriteVector<Number>. These vectors can be seen as buffers on
+the host that are used to either store data from the device or to seen the
+device. There are two types of vectors that can be used on the device:
+LinearAlgebra::CUDAWrappers::Vector<Number>, which is similar to the more common
+Vector<Number>, and LinearAlgebra::distributed::Vector<Number,
+MemorySpace::CUDA>, which is a regular 
+LinearAlgebra::distributed::Vector<Number> where we have specified which memory
+space to use. The default value if the memory space is not specified is
+MemorySpace::Host.
+
+Next, we show how to move data to/from
+LinearAlgebra::CUDAWrappers::Vector<Number>:
+<code>
+unsigned int size = 10;
+LinearAlgebra::CUDAWrappers::Vector<double> vector_dev(10);
+LinearAlgebra::ReadWriteVector<double> rw_vector(10);
+// Fill rw_vector...
+// Move the data to the device.
+vector_dev.import(rw_vector, VectorOperations::insert);
+// Do some computations on the device
+// Move the data to the host
+rw_vector.import(vector_dev, VectorOperations::insert);
+</code>
+Using LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> is similar
+but import() stage may involve an MPI communication::
+<code>
+IndexSet locally_owned_dofs, locally_relevant_dofs;
+// Fill the two IndexSet...
+LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>
+distributed_vector_dev(locally_owned_dofs, MPI_COMM_WORLD);
+// Create the ReadWriteVector using an IndexSet instead of the size
+LinearAlgebra::ReadWriteVector<double> owned_rw_vector(locally_owned_dofs);
+// Fill owned_rw_vector
+// Move the data to the device
+distributed_vector_dev.import(owned_rw_vector, VectorOperations::insert);
+// Do some computations on the device
+// Create a ReadWriteVector with a different IndexSet
+LinearAlgebra::ReadWriteVector<double> relevant_rw_vector(locally_relevant_dofs);
+// Move the data to the host and do an MPI communication
+relevnt_rw_vector(distributed_vector_dev, VectorOperations::insert);
+</code>
+
+import() supports two kinds of VectorOperations: VectorOperations::insert and
+VectorOperations::add.
+
+
+<h3>Matrix-vector product implementation</h3>
+
+The code necessary to evaluate the matrix-free operator on the device is very
+similar to the one on the host. There are however a few differences, the main
+ones are that the local_apply() function in Step-37 and the loop over quadrature
+points both need to be encapsulated in their own functors.
diff --git a/examples/step-64/doc/kind b/examples/step-64/doc/kind
new file mode 100644 (file)
index 0000000..c1d9154
--- /dev/null
@@ -0,0 +1 @@
+techniques
diff --git a/examples/step-64/doc/results.dox b/examples/step-64/doc/results.dox
new file mode 100644 (file)
index 0000000..efcbca3
--- /dev/null
@@ -0,0 +1,10 @@
+<h1>Results</h1>
+
+<a name="extensions"></a>
+<h3> Possible extensions </h3>
+
+Currently, there is no preconditioner used at all. This is mainly since 
+constructing an efficient matrix-free preconditioner is non-trivial.
+However, simple choices just requiring the diagonal of the corresponding matrix
+are good candidates. In particular, one could extend the tutorial to use multigrid 
+with Chebyshev smoothers similar to step-37.
diff --git a/examples/step-64/doc/tooltip b/examples/step-64/doc/tooltip
new file mode 100644 (file)
index 0000000..0bb91e2
--- /dev/null
@@ -0,0 +1 @@
+The fictitious domain method using distributed Lagrange multipliers
diff --git a/examples/step-64/step-64.cu b/examples/step-64/step-64.cu
new file mode 100644 (file)
index 0000000..8c727e5
--- /dev/null
@@ -0,0 +1,576 @@
+/* ---------------------------------------------------------------------
+ *
+ * Copyright (C) 2019 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE.md at
+ * the top level directory of deal.II.
+ *
+ * ---------------------------------------------------------------------
+
+ *
+ * Authors: Bruno Turcksin, Daniel Arndt, Oak Ridge National Laboratory, 2019
+ */
+
+// First include the necessary files from the deal.II libary known from the
+// previous tutorials.
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/quadrature_lib.h>
+
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria.h>
+
+#include <deal.II/lac/affine_constraints.h>
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/precondition.h>
+#include <deal.II/lac/solver_cg.h>
+
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/numerics/vector_tools.h>
+
+// This includes the data structures for the implementation of matrix-free
+// methods on GPU
+#include <deal.II/base/cuda.h>
+
+#include <deal.II/matrix_free/cuda_fe_evaluation.h>
+#include <deal.II/matrix_free/cuda_matrix_free.h>
+#include <deal.II/matrix_free/operators.h>
+
+#include <fstream>
+
+namespace Step64
+{
+  using namespace dealii;
+
+
+  // Define a class that implements the varying coefficients we want to use in
+  // the Helmholtzoperator.
+  // Later, we want to pass an object of this type to a
+  // CUDAWrappers::MatrixFree<dim, double> object that expects the class to have
+  // an operator that fills the values provided in the constructor for a given
+  // cell.
+  template <int dim, int fe_degree>
+  class VaryingCoefficientFunctor
+  {
+  public:
+    VaryingCoefficientFunctor(double *coefficient)
+      : coef(coefficient)
+    {}
+
+    __device__ void operator()(
+      const unsigned int                                          cell,
+      const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data);
+
+    // Since CUDAWrappers::MatrixFree::Data doesn't know about the size of its
+    // arrays, we need to store the number of quadrature points and the numbers
+    // of degrees of freedom in this class to do necessary index conversions.
+    static const unsigned int n_dofs_1d = fe_degree + 1;
+    static const unsigned int n_local_dofs =
+      dealii::Utilities::pow(n_dofs_1d, dim);
+    static const unsigned int n_q_points =
+      dealii::Utilities::pow(n_dofs_1d, dim);
+
+  private:
+    double *coef;
+  };
+
+
+
+  template <int dim, int fe_degree>
+  __device__ void VaryingCoefficientFunctor<dim, fe_degree>::operator()(
+    const unsigned int                                          cell,
+    const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data)
+  {
+    const unsigned int pos = CUDAWrappers::local_q_point_id<dim, double>(
+      cell, gpu_data, n_dofs_1d, n_q_points);
+    const auto q_point =
+      CUDAWrappers::get_quadrature_point<dim, double>(cell,
+                                                      gpu_data,
+                                                      n_dofs_1d);
+
+    double p_square = 0.;
+    for (unsigned int i = 0; i < dim; ++i)
+      {
+        double coord = q_point[i];
+        p_square += coord * coord;
+      }
+    coef[pos] = 10. / (0.05 + 2. * p_square);
+  }
+
+
+  // The class HelmholtzOperatorQuad implements the evaluation of the Helmholtz
+  // operator in each quadrature point. It uses a similar mechanism as the
+  // MatrixFree framework introduced in step-37.
+  template <int dim, int fe_degree>
+  class HelmholtzOperatorQuad
+  {
+  public:
+    __device__ HelmholtzOperatorQuad(double coef)
+      : coef(coef)
+    {}
+
+    __device__ void
+    operator()(CUDAWrappers::FEEvaluation<dim, fe_degree> *fe_eval,
+               const unsigned int                          q) const;
+
+  private:
+    double coef;
+  };
+
+
+  // The Helmholtz operator reads
+  // \begin{align*}
+  // -\Delta u + c\cdot u
+  // \end{align*}
+  // and consists of two parts that are correspond to the two function calls
+  // here.
+  template <int dim, int fe_degree>
+  __device__ void HelmholtzOperatorQuad<dim, fe_degree>::
+                  operator()(CUDAWrappers::FEEvaluation<dim, fe_degree> *fe_eval,
+             const unsigned int                          q) const
+  {
+    fe_eval->submit_value(/*coef **/ fe_eval->get_value(q), q);
+    fe_eval->submit_gradient(fe_eval->get_gradient(q), q);
+  }
+
+
+  // Finally, we need to define a class that implements the whole operator
+  // evaluation that corresponds to matrix-vector product in matrix-based
+  // approaches. It corresponds
+  template <int dim, int fe_degree>
+  class LocalHelmholtzOperator
+  {
+  public:
+    LocalHelmholtzOperator(double *coefficient)
+      : coef(coefficient)
+    {}
+
+    __device__ void operator()(
+      const unsigned int                                          cell,
+      const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
+      CUDAWrappers::SharedData<dim, double> *                     shared_data,
+      const double *                                              src,
+      double *                                                    dst) const;
+
+    // Again, the CUDAWrappers::MatrixFree object doesn't know about the number
+    // of degrees of freedom and the number of quadrature points so we need
+    // to store these for index calculations in the call operator.
+    static const unsigned int n_dofs_1d    = fe_degree + 1;
+    static const unsigned int n_local_dofs = Utilities::pow(fe_degree + 1, dim);
+    static const unsigned int n_q_points   = Utilities::pow(fe_degree + 1, dim);
+
+  private:
+    double *coef;
+  };
+
+
+  // This is the call operator that performs the Helmholtz operator evaluation
+  // on a given cell similar to the MatrixFree framework. In particular, we need
+  // access to both values and gradients of the source vector and we write value
+  // and gradient information to the destincation vector.
+  template <int dim, int fe_degree>
+  __device__ void LocalHelmholtzOperator<dim, fe_degree>::operator()(
+    const unsigned int                                          cell,
+    const typename CUDAWrappers::MatrixFree<dim, double>::Data *gpu_data,
+    CUDAWrappers::SharedData<dim, double> *                     shared_data,
+    const double *                                              src,
+    double *                                                    dst) const
+  {
+    const unsigned int pos = CUDAWrappers::local_q_point_id<dim, double>(
+      cell, gpu_data, n_dofs_1d, n_q_points);
+
+    CUDAWrappers::FEEvaluation<dim, fe_degree, fe_degree + 1, 1, double>
+      fe_eval(cell, gpu_data, shared_data);
+    fe_eval.read_dof_values(src);
+    fe_eval.evaluate(true, true);
+    fe_eval.apply_quad_point_operations(
+      HelmholtzOperatorQuad<dim, fe_degree>(coef[pos]));
+    fe_eval.integrate(true, true);
+    fe_eval.distribute_local_to_global(dst);
+  }
+
+
+
+  // The HelmholtzOperator class acts as wrapper for LocalHelmholtzOperator
+  // defining an interface that can be used with linear solvers like SolverCG.
+  template <int dim, int fe_degree>
+  class HelmholtzOperator
+  {
+  public:
+    HelmholtzOperator(const DoFHandler<dim> &          dof_handler,
+                      const AffineConstraints<double> &constraints);
+
+    void
+    vmult(LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> &dst,
+          const LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>
+            &src) const;
+
+  private:
+    CUDAWrappers::MatrixFree<dim, double>       mf_data;
+    LinearAlgebra::CUDAWrappers::Vector<double> coef;
+  };
+
+
+
+  template <int dim, int fe_degree>
+  HelmholtzOperator<dim, fe_degree>::HelmholtzOperator(
+    const DoFHandler<dim> &          dof_handler,
+    const AffineConstraints<double> &constraints)
+  {
+    MappingQGeneric<dim> mapping(fe_degree);
+    typename CUDAWrappers::MatrixFree<dim, double>::AdditionalData
+      additional_data;
+    additional_data.mapping_update_flags = update_values | update_gradients |
+                                           update_JxW_values |
+                                           update_quadrature_points;
+    const QGauss<1> quad(fe_degree + 1);
+    mf_data.reinit(mapping, dof_handler, constraints, quad, additional_data);
+
+    // We need to store the value of the coefficient for each quadrature point
+    // in every locally owned cells.
+    const unsigned int n_owned_cells =
+      std::distance<decltype(dof_handler.begin_active())>(
+        dof_handler.begin_active(), dof_handler.end());
+    coef.reinit(Utilities::pow(fe_degree + 1, dim) * n_owned_cells);
+    VaryingCoefficientFunctor<dim, fe_degree> functor(coef.get_values());
+    mf_data.evaluate_coefficients(functor);
+  }
+
+
+  // When applying the Helmholtz operator, we have to be careful to handle
+  // boundary conditions correctly. Since the local operator doesn't know about
+  // constraints, we have to copy the correct values from the source to the
+  // destination vector afterwards.
+  template <int dim, int fe_degree>
+  void HelmholtzOperator<dim, fe_degree>::vmult(
+    LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> &      dst,
+    const LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> &src)
+    const
+  {
+    dst = 0.;
+    LocalHelmholtzOperator<dim, fe_degree> helmholtz_operator(
+      coef.get_values());
+    mf_data.cell_loop(helmholtz_operator, src, dst);
+    mf_data.copy_constrained_values(src, dst);
+  }
+
+
+  // This class defines the usual framework we use for tutorial programs. The
+  // only point worth commenting on the solve() function and the choice of
+  // vector types.
+  template <int dim, int fe_degree>
+  class HelmholtzProblem
+  {
+  public:
+    HelmholtzProblem();
+    ~HelmholtzProblem();
+
+    void run();
+
+  private:
+    void setup_system();
+
+    void assemble_rhs();
+
+    void solve();
+
+    void output_results(const unsigned int cycle) const;
+
+    MPI_Comm mpi_communicator;
+
+    parallel::distributed::Triangulation<dim> triangulation;
+
+    DoFHandler<dim> dof_handler;
+    FE_Q<dim>       fe;
+
+    IndexSet locally_owned_dofs;
+    IndexSet locally_relevant_dofs;
+
+    AffineConstraints<double>                          constraints;
+    std::unique_ptr<HelmholtzOperator<dim, fe_degree>> system_matrix_dev;
+
+    // Since all the operations in the solve functions are executed on the
+    // graphic card it is necessary for the vectors used to store their values
+    // on the GPU as well. LinearAlgebra::distributed::Vector can be told which
+    // memory space to use. There is also LinearAlgebra::CUDAWrappers::Vector
+    // that always uses GPU memory storage but doesn't work with MPI. It might
+    // be worth noticing that the communication between different MPI processes
+    // can be improved if the MPI implementation is CUDA-aware and the configure
+    // flag DEAL_II_WITH_CUDA_AWARE_MPI is enabled.
+    //
+    // Here, we also have a finite element vector with CPU storage such that we
+    // can view and display the solution as usual.
+    LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
+                                                                  ghost_solution_host;
+    LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA> solution_dev;
+    LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>
+      system_rhs_dev;
+
+    ConditionalOStream pcout;
+  };
+
+
+  // The implementation of all the remaining functions of this class apart from
+  // Helmholtzproblem::solve() doesn't contain anything new and we won't further
+  // comment on it.
+  template <int dim, int fe_degree>
+  HelmholtzProblem<dim, fe_degree>::HelmholtzProblem()
+    : mpi_communicator(MPI_COMM_WORLD)
+    , triangulation(mpi_communicator)
+    , dof_handler(triangulation)
+    , fe(fe_degree)
+    , pcout(std::cout, Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
+  {}
+
+
+
+  template <int dim, int fe_degree>
+  HelmholtzProblem<dim, fe_degree>::~HelmholtzProblem()
+  {
+    dof_handler.clear();
+  }
+
+
+
+  template <int dim, int fe_degree>
+  void HelmholtzProblem<dim, fe_degree>::setup_system()
+  {
+    dof_handler.distribute_dofs(fe);
+
+    locally_owned_dofs = dof_handler.locally_owned_dofs();
+    DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
+    system_rhs_dev.reinit(locally_owned_dofs, mpi_communicator);
+
+    constraints.clear();
+    constraints.reinit(locally_relevant_dofs);
+    DoFTools::make_hanging_node_constraints(dof_handler, constraints);
+    VectorTools::interpolate_boundary_values(dof_handler,
+                                             0,
+                                             Functions::ZeroFunction<dim>(),
+                                             constraints);
+    constraints.close();
+    system_matrix_dev.reset(
+      new HelmholtzOperator<dim, fe_degree>(dof_handler, constraints));
+
+    ghost_solution_host.reinit(locally_owned_dofs,
+                               locally_relevant_dofs,
+                               mpi_communicator);
+    solution_dev.reinit(locally_owned_dofs, mpi_communicator);
+    system_rhs_dev.reinit(locally_owned_dofs, mpi_communicator);
+  }
+
+
+
+  template <int dim, int fe_degree>
+  void HelmholtzProblem<dim, fe_degree>::assemble_rhs()
+  {
+    LinearAlgebra::distributed::Vector<double, MemorySpace::Host>
+                      system_rhs_host(locally_owned_dofs,
+                      locally_relevant_dofs,
+                      mpi_communicator);
+    const QGauss<dim> quadrature_formula(fe_degree + 1);
+
+    FEValues<dim> fe_values(fe,
+                            quadrature_formula,
+                            update_values | update_quadrature_points |
+                              update_JxW_values);
+
+    const unsigned int dofs_per_cell = fe.dofs_per_cell;
+    const unsigned int n_q_points    = quadrature_formula.size();
+
+    Vector<double> cell_rhs(dofs_per_cell);
+
+    std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
+
+    for (const auto &cell : dof_handler.active_cell_iterators())
+      if (cell->is_locally_owned())
+        {
+          cell_rhs = 0;
+
+          fe_values.reinit(cell);
+
+          for (unsigned int q_index = 0; q_index < n_q_points; ++q_index)
+            {
+              for (unsigned int i = 0; i < dofs_per_cell; ++i)
+                cell_rhs(i) += (fe_values.shape_value(i, q_index) * 1.0 *
+                                fe_values.JxW(q_index));
+            }
+
+          // Finally, transfer the contributions from @p cell_rhs into the global
+          // objects. Set the constraints to zero. This is necessary for CG to
+          // converge since the ansatz and solution space have these degrees of
+          // freedom constrained as well.
+          // The other solution is modifying vmult() so that the source
+          // vector sets the contrained dof to zero.
+          cell->get_dof_indices(local_dof_indices);
+          constraints.distribute_local_to_global(cell_rhs,
+                                                 local_dof_indices,
+                                                 system_rhs_host);
+        }
+    system_rhs_host.compress(VectorOperation::add);
+
+    // We can't directly copy the values from the host to the device but need to
+    // use an intermediate object of type LinearAlgebra::ReadWriteVector to
+    // construct the correct communication pattern.
+    LinearAlgebra::ReadWriteVector<double> rw_vector(locally_owned_dofs);
+    rw_vector.import(system_rhs_host, VectorOperation::insert);
+    system_rhs_dev.import(rw_vector, VectorOperation::insert);
+  }
+
+
+
+  // This solve() function finally contains the calls to the new classes
+  // previously dicussed. Here we don't use any preconditioner, i.e. the
+  // identity, to focus just on the pecuiarities of the CUDA MatrixFree
+  // framework. Of course, in a real application the choice of a suitable
+  // preconditioner is crucial but we have at least the same restructions as in
+  // step-37 since matrix entries are computed on the fly and not stored.
+  template <int dim, int fe_degree>
+  void HelmholtzProblem<dim, fe_degree>::solve()
+  {
+    PreconditionIdentity preconditioner;
+
+    SolverControl solver_control(system_rhs_dev.size(),
+                                 1e-12 * system_rhs_dev.l2_norm());
+    SolverCG<LinearAlgebra::distributed::Vector<double, MemorySpace::CUDA>> cg(
+      solver_control);
+    cg.solve(*system_matrix_dev, solution_dev, system_rhs_dev, preconditioner);
+
+    // Copy the solution from the device to the host to be able to view its
+    // values and display it in output_results().
+    LinearAlgebra::ReadWriteVector<double> rw_vector(locally_owned_dofs);
+    rw_vector.import(solution_dev, VectorOperation::insert);
+    ghost_solution_host.import(rw_vector, VectorOperation::insert);
+
+    constraints.distribute(ghost_solution_host);
+
+    std::cout << "solution norm: " << ghost_solution_host.l2_norm()
+              << std::endl;
+
+    ghost_solution_host.update_ghost_values();
+  }
+
+  // The output results function is as usual since we have already copied the
+  // values back from the GPU to the CPU.
+  template <int dim, int fe_degree>
+  void HelmholtzProblem<dim, fe_degree>::output_results(
+    const unsigned int cycle) const
+  {
+    DataOut<dim> data_out;
+
+    data_out.attach_dof_handler(dof_handler);
+    data_out.add_data_vector(ghost_solution_host, "solution");
+    data_out.build_patches();
+
+    std::ofstream output(
+      "solution-" + std::to_string(cycle) + "." +
+      std::to_string(Utilities::MPI::this_mpi_process(mpi_communicator)) +
+      ".vtu");
+    DataOutBase::VtkFlags flags;
+    flags.compression_level = DataOutBase::VtkFlags::best_speed;
+    data_out.set_flags(flags);
+    data_out.write_vtu(output);
+
+    if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
+      {
+        std::vector<std::string> filenames;
+        for (unsigned int i = 0;
+             i < Utilities::MPI::n_mpi_processes(mpi_communicator);
+             ++i)
+          filenames.emplace_back("solution-" + std::to_string(cycle) + "." +
+                                 std::to_string(i) + ".vtu");
+
+        std::string master_name =
+          "solution-" + Utilities::to_string(cycle) + ".pvtu";
+        std::ofstream master_output(master_name);
+        data_out.write_pvtu_record(master_output, filenames);
+      }
+  }
+
+
+  // Nothing surprising in the run function as well. We simply compute the
+  // solution on a series of (globally) refined meshes.
+  template <int dim, int fe_degree>
+  void HelmholtzProblem<dim, fe_degree>::run()
+  {
+    for (unsigned int cycle = 0; cycle < 5 - dim; ++cycle)
+      {
+        pcout << "Cycle " << cycle << std::endl;
+
+        if (cycle == 0)
+          {
+            GridGenerator::hyper_cube(triangulation, 0., 1.);
+            triangulation.refine_global(4 - dim);
+          }
+        triangulation.refine_global(1);
+        setup_system();
+        assemble_rhs();
+        solve();
+        output_results(cycle);
+        pcout << std::endl;
+      }
+  }
+} // namespace Step64
+
+int main(int argc, char *argv[])
+{
+  try
+    {
+      using namespace Step64;
+
+      Utilities::MPI::MPI_InitFinalize mpi_init(argc, argv, 1);
+
+      // By default, all the ranks will try to access the device 0.
+      // If we are running with MPI support it is better to address different
+      // graphic cards for different processes even if only one node is used.
+      // The choice below is based on the MPI proccess id. MPI needs to be
+      // initialized before using this function.
+      int         n_devices       = 0;
+      cudaError_t cuda_error_code = cudaGetDeviceCount(&n_devices);
+      AssertCuda(cuda_error_code);
+      const unsigned int my_id =
+        Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+      const int device_id = my_id % n_devices;
+      cuda_error_code     = cudaSetDevice(device_id);
+      AssertCuda(cuda_error_code);
+
+      HelmholtzProblem<3, 3> helmhotz_problem;
+      helmhotz_problem.run();
+    }
+  catch (std::exception &exc)
+    {
+      std::cerr << std::endl
+                << std::endl
+                << "----------------------------------------------------"
+                << std::endl;
+      std::cerr << "Exception on processing: " << std::endl
+                << exc.what() << std::endl
+                << "Aborting!" << std::endl
+                << "----------------------------------------------------"
+                << std::endl;
+      return 1;
+    }
+  catch (...)
+    {
+      std::cerr << std::endl
+                << std::endl
+                << "----------------------------------------------------"
+                << std::endl;
+      std::cerr << "Unknown exception!" << std::endl
+                << "Aborting!" << std::endl
+                << "----------------------------------------------------"
+                << std::endl;
+      return 1;
+    }
+
+  return 0;
+}

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.