]> https://gitweb.dealii.org/ - dealii.git/commitdiff
Allow using PreconditionChebyshev for parallel::distributed::Vector with MemorySpace...
authorDaniel Arndt <arndtd@ornl.gov>
Tue, 27 Aug 2019 21:54:05 +0000 (17:54 -0400)
committerDaniel Arndt <arndtd@ornl.gov>
Wed, 4 Sep 2019 12:48:34 +0000 (08:48 -0400)
include/deal.II/lac/affine_constraints.h
include/deal.II/lac/affine_constraints.templates.h
include/deal.II/lac/precondition.h
tests/cuda/matrix_vector_mf.h
tests/cuda/precondition_03.cu [new file with mode: 0644]
tests/cuda/precondition_03.with_trilinos=true.with_mpi=true.with_p4est=true.mpirun=1.output [new file with mode: 0644]
tests/cuda/precondition_03.with_trilinos=true.with_mpi=true.with_p4est=true.mpirun=2.output [new file with mode: 0644]

index 7a710868cf645fdb0785b96f396f9f69f1b7b5fa..993f59b07205faef7e105dd23a5513bf348117ef 100644 (file)
@@ -54,6 +54,27 @@ namespace internals
   class GlobalRowsFromLocal;
 }
 
+namespace internal
+{
+  namespace AffineConstraintsImplementation
+  {
+    template <class VectorType>
+    void
+    set_zero_all(const std::vector<types::global_dof_index> &cm,
+                 VectorType &                                vec);
+
+    template <class T>
+    void
+    set_zero_all(const std::vector<types::global_dof_index> &cm,
+                 dealii::Vector<T> &                         vec);
+
+    template <class T>
+    void
+    set_zero_all(const std::vector<types::global_dof_index> &cm,
+                 dealii::BlockVector<T> &                    vec);
+  } // namespace AffineConstraintsImplementation
+} // namespace internal
+
 
 template <typename number>
 class AffineConstraints;
@@ -1675,6 +1696,20 @@ AffineConstraints<number>::set_inhomogeneity(const size_type line_n,
   line_ptr->inhomogeneity  = value;
 }
 
+template <typename number>
+template <class VectorType>
+inline void
+AffineConstraints<number>::set_zero(VectorType &vec) const
+{
+  // since lines is a private member, we cannot pass it to the functions
+  // above. therefore, copy the content which is cheap
+  std::vector<size_type> constrained_lines(lines.size());
+  for (unsigned int i = 0; i < lines.size(); ++i)
+    constrained_lines[i] = lines[i].index;
+  internal::AffineConstraintsImplementation::set_zero_all(constrained_lines,
+                                                          vec);
+}
+
 template <typename number>
 inline types::global_dof_index
 AffineConstraints<number>::n_constraints() const
index 4670b160ae451e9eb9c7437beb0f98595fa175cb..ebe4835f64016486c796af8e9a8c6c8a66b87b47 100644 (file)
@@ -1904,6 +1904,7 @@ namespace internal
       size_type                                                      shift = 0)
     {
       Assert(shift == 0, ExcNotImplemented());
+      (void)shift;
       std::vector<size_type> constrained_local_dofs_host;
       constrained_local_dofs_host.reserve(cm.size());
 
@@ -1993,20 +1994,6 @@ namespace internal
   } // namespace AffineConstraintsImplementation
 } // namespace internal
 
-template <typename number>
-template <class VectorType>
-void
-AffineConstraints<number>::set_zero(VectorType &vec) const
-{
-  // since lines is a private member, we cannot pass it to the functions
-  // above. therefore, copy the content which is cheap
-  std::vector<size_type> constrained_lines(lines.size());
-  for (unsigned int i = 0; i < lines.size(); ++i)
-    constrained_lines[i] = lines[i].index;
-  internal::AffineConstraintsImplementation::set_zero_all(constrained_lines,
-                                                          vec);
-}
-
 template <typename number>
 template <typename VectorType>
 void
index 2f887a3826e5bd086fb923431f98688f6f2fc18d..01cbd8001b782893e59752f2f59c7bce1f48a162 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <deal.II/base/config.h>
 
+#include <deal.II/base/cuda_size.h>
 #include <deal.II/base/memory_space.h>
 #include <deal.II/base/parallel.h>
 #include <deal.II/base/smartpointer.h>
@@ -27,6 +28,7 @@
 #include <deal.II/base/thread_management.h>
 #include <deal.II/base/utilities.h>
 
+#include <deal.II/lac/affine_constraints.h>
 #include <deal.II/lac/diagonal_matrix.h>
 #include <deal.II/lac/solver_cg.h>
 #include <deal.II/lac/vector_memory.h>
@@ -1028,6 +1030,12 @@ public:
                    const double       eig_cg_residual     = 1e-2,
                    const double       max_eigenvalue      = 1);
 
+    /**
+     *  Copy assignment operator.
+     */
+    AdditionalData &
+    operator=(const AdditionalData &other_data);
+
     /**
      * This determines the degree of the Chebyshev polynomial. The degree of
      * the polynomial gives the number of matrix-vector products to be
@@ -1075,6 +1083,12 @@ public:
      */
     double max_eigenvalue;
 
+    /**
+     * Constraints to be used for the operator given. This variable is used to
+     * zero out the correct entries when creating an initial guess.
+     */
+    AffineConstraints<double> constraints;
+
     /**
      * Stores the preconditioner object that the Chebyshev is wrapped around.
      */
@@ -1947,19 +1961,21 @@ namespace internal
     }
 
     // selection for diagonal matrix around parallel deal.II vector
-    template <typename Number, typename MemorySpace>
+    template <typename Number>
     inline void
     vector_updates(
-      const LinearAlgebra::distributed::Vector<Number, MemorySpace> &rhs,
+      const LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &rhs,
       const DiagonalMatrix<
-        LinearAlgebra::distributed::Vector<Number, MemorySpace>> &jacobi,
-      const unsigned int                                       iteration_index,
-      const double                                             factor1,
-      const double                                             factor2,
-      LinearAlgebra::distributed::Vector<Number, MemorySpace> &solution_old,
-      LinearAlgebra::distributed::Vector<Number, MemorySpace> &temp_vector1,
-      LinearAlgebra::distributed::Vector<Number, MemorySpace> &,
-      LinearAlgebra::distributed::Vector<Number, MemorySpace> &solution)
+        LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>> &jacobi,
+      const unsigned int iteration_index,
+      const double       factor1,
+      const double       factor2,
+      LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>
+        &solution_old,
+      LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>
+        &temp_vector1,
+      LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &,
+      LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &solution)
     {
       VectorUpdater<Number> upd(rhs.begin(),
                                 jacobi.get_vector().begin(),
@@ -2047,10 +2063,11 @@ namespace internal
       vector.add(-mean_value);
     }
 
-    template <typename Number, typename MemorySpace>
+    template <typename Number>
     void
     set_initial_guess(
-      ::dealii::LinearAlgebra::distributed::Vector<Number, MemorySpace> &vector)
+      ::dealii::LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>
+        &vector)
     {
       // Choose a high-frequency mode consisting of numbers between 0 and 1
       // that is cheap to compute (cheaper than random numbers) but avoids
@@ -2068,6 +2085,54 @@ namespace internal
       vector.add(-mean_value);
     }
 
+
+#  ifdef DEAL_II_COMPILER_CUDA_AWARE
+    template <typename Number>
+    __global__ void
+    set_initial_guess_kernel(const types::global_dof_index offset,
+                             const unsigned int            local_size,
+                             Number *                      values)
+
+    {
+      const unsigned int index = threadIdx.x + blockDim.x * blockIdx.x;
+      if (index < local_size)
+        values[index] = (index + offset) % 11;
+    }
+
+    template <typename Number>
+    void
+    set_initial_guess(
+      ::dealii::LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA>
+        &vector)
+    {
+      // Choose a high-frequency mode consisting of numbers between 0 and 1
+      // that is cheap to compute (cheaper than random numbers) but avoids
+      // obviously re-occurring numbers in multi-component systems by choosing
+      // a period of 11.
+      // Make initial guess robust with respect to number of processors
+      // by operating on the global index.
+      types::global_dof_index first_local_range = 0;
+      if (!vector.locally_owned_elements().is_empty())
+        first_local_range = vector.locally_owned_elements().nth_index_in_set(0);
+
+      const auto n_local_elements = vector.local_size();
+      const int  n_blocks =
+        1 + (n_local_elements - 1) / CUDAWrappers::block_size;
+      set_initial_guess_kernel<<<n_blocks, CUDAWrappers::block_size>>>(
+        first_local_range, n_local_elements, vector.get_values());
+
+#    ifdef DEBUG
+      // Check that the kernel was launched correctly
+      AssertCuda(cudaGetLastError());
+      // Check that there was no problem during the execution of the kernel
+      AssertCuda(cudaDeviceSynchronize());
+#    endif
+
+      const Number mean_value = vector.mean_value();
+      vector.add(-mean_value);
+    }
+#  endif // DEAL_II_COMPILER_CUDA_AWARE
+
     struct EigenvalueTracker
     {
     public:
@@ -2100,6 +2165,26 @@ inline PreconditionChebyshev<MatrixType, VectorType, PreconditionerType>::
 
 
 
+template <typename MatrixType, class VectorType, typename PreconditionerType>
+inline typename PreconditionChebyshev<MatrixType,
+                                      VectorType,
+                                      PreconditionerType>::AdditionalData &
+                  PreconditionChebyshev<MatrixType, VectorType, PreconditionerType>::
+  AdditionalData::operator=(const AdditionalData &other_data)
+{
+  degree              = other_data.degree;
+  smoothing_range     = other_data.smoothing_range;
+  eig_cg_n_iterations = other_data.eig_cg_n_iterations;
+  eig_cg_residual     = other_data.eig_cg_residual;
+  max_eigenvalue      = other_data.max_eigenvalue;
+  preconditioner      = other_data.preconditioner;
+  constraints.copy_from(other_data.constraints);
+
+  return *this;
+}
+
+
+
 template <typename MatrixType, typename VectorType, typename PreconditionerType>
 inline PreconditionChebyshev<MatrixType, VectorType, PreconditionerType>::
   PreconditionChebyshev()
@@ -2191,6 +2276,7 @@ PreconditionChebyshev<MatrixType, VectorType, PreconditionerType>::
       // one entry is different to trigger high frequencies
       internal::PreconditionChebyshevImplementation::set_initial_guess(
         temp_vector1);
+      data.constraints.set_zero(temp_vector1);
 
       try
         {
index 0fddfdcdd613f5ef1dff291d4fbaf32e1e8e7d1b..ef4a0c3d4f1ab53f6a08cb841bd738eb5774ca3b 100644 (file)
@@ -161,7 +161,7 @@ template <int dim,
           typename Number,
           typename VectorType = LinearAlgebra::CUDAWrappers::Vector<Number>,
           int n_q_points_1d   = fe_degree + 1>
-class MatrixFreeTest
+class MatrixFreeTest : public Subscriptor
 {
 public:
   MatrixFreeTest(const CUDAWrappers::MatrixFree<dim, Number> &data_in,
@@ -169,7 +169,18 @@ public:
                  const bool constant_coeff = true);
 
   void
-  vmult(VectorType &dst, const VectorType &src);
+  vmult(VectorType &dst, const VectorType &src) const;
+
+  Number
+  el(const unsigned int row, const unsigned int col) const;
+
+  types::global_dof_index
+  m() const
+  {
+    return internal_m;
+  }
+
+  types::global_dof_index internal_m;
 
 private:
   const CUDAWrappers::MatrixFree<dim, Number> &data;
@@ -200,7 +211,20 @@ MatrixFreeTest<dim, fe_degree, Number, VectorType, n_q_points_1d>::
     }
 }
 
-
+template <int dim,
+          int fe_degree,
+          typename Number,
+          typename VectorType,
+          int n_q_points_1d>
+Number
+MatrixFreeTest<dim, fe_degree, Number, VectorType, n_q_points_1d>::el(
+  const unsigned int row,
+  const unsigned int col) const
+{
+  (void)col;
+  Assert(false, ExcNotImplemented());
+  return 0.;
+}
 
 template <int dim,
           int fe_degree,
@@ -210,7 +234,7 @@ template <int dim,
 void
 MatrixFreeTest<dim, fe_degree, Number, VectorType, n_q_points_1d>::vmult(
   VectorType &      dst,
-  const VectorType &src)
+  const VectorType &src) const
 {
   dst = static_cast<Number>(0.);
   HelmholtzOperator<dim, fe_degree, Number, n_q_points_1d> helmholtz_operator(
diff --git a/tests/cuda/precondition_03.cu b/tests/cuda/precondition_03.cu
new file mode 100644 (file)
index 0000000..eba6785
--- /dev/null
@@ -0,0 +1,286 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// this tests the correctness of MPI-parallel matrix free matrix-vector
+// products by comparing the result with a Trilinos sparse matrix assembled in
+// the usual way. The mesh is distributed among processors (hypercube) and has
+// both hanging nodes (by randomly refining some cells, so the mesh is going
+// to be different when run with different numbers of processors) and
+// Dirichlet boundary conditions
+
+#include <deal.II/base/function.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/manifold_lib.h>
+
+#include <deal.II/lac/affine_constraints.h>
+#include <deal.II/lac/precondition.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/lac/trilinos_sparsity_pattern.h>
+
+#include <deal.II/numerics/vector_tools.h>
+
+#include <iostream>
+
+#include "../tests.h"
+#include "matrix_vector_mf.h"
+
+
+
+template <int dim, int fe_degree>
+void
+test()
+{
+  typedef double Number;
+
+  parallel::distributed::Triangulation<dim> tria(MPI_COMM_WORLD);
+  GridGenerator::hyper_cube(tria);
+  tria.refine_global(1);
+  typename Triangulation<dim>::active_cell_iterator cell = tria.begin_active(),
+                                                    endc = tria.end();
+  for (; cell != endc; ++cell)
+    if (cell->is_locally_owned())
+      if (cell->center().norm() < 0.2)
+        cell->set_refine_flag();
+  tria.execute_coarsening_and_refinement();
+  if (dim < 3 && fe_degree < 2)
+    tria.refine_global(2);
+  else
+    tria.refine_global(1);
+  if (tria.begin(tria.n_levels() - 1)->is_locally_owned())
+    tria.begin(tria.n_levels() - 1)->set_refine_flag();
+  if (tria.last()->is_locally_owned())
+    tria.last()->set_refine_flag();
+  tria.execute_coarsening_and_refinement();
+  cell = tria.begin_active();
+  for (unsigned int i = 0; i < 10 - 3 * dim; ++i)
+    {
+      cell                 = tria.begin_active();
+      unsigned int counter = 0;
+      for (; cell != endc; ++cell, ++counter)
+        if (cell->is_locally_owned())
+          if (counter % (7 - i) == 0)
+            cell->set_refine_flag();
+      tria.execute_coarsening_and_refinement();
+    }
+
+  FE_Q<dim>       fe(fe_degree);
+  DoFHandler<dim> dof(tria);
+  dof.distribute_dofs(fe);
+
+  IndexSet owned_set = dof.locally_owned_dofs();
+  IndexSet relevant_set;
+  DoFTools::extract_locally_relevant_dofs(dof, relevant_set);
+
+  AffineConstraints<double> constraints(relevant_set);
+  DoFTools::make_hanging_node_constraints(dof, constraints);
+  VectorTools::interpolate_boundary_values(dof,
+                                           0,
+                                           Functions::ZeroFunction<dim>(),
+                                           constraints);
+  constraints.close();
+
+  deallog << "Testing " << dof.get_fe().get_name() << std::endl;
+
+  MappingQGeneric<dim>                  mapping(fe_degree);
+  CUDAWrappers::MatrixFree<dim, Number> mf_data;
+  const QGauss<1>                       quad(fe_degree + 1);
+  typename CUDAWrappers::MatrixFree<dim, Number>::AdditionalData
+    additional_data;
+  additional_data.mapping_update_flags = update_values | update_gradients |
+                                         update_JxW_values |
+                                         update_quadrature_points;
+  mf_data.reinit(mapping, dof, constraints, quad, additional_data);
+
+  const unsigned int coef_size =
+    tria.n_locally_owned_active_cells() * std::pow(fe_degree + 1, dim);
+  MatrixFreeTest<dim,
+                 fe_degree,
+                 Number,
+                 LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA>>
+    mf(mf_data, coef_size);
+  mf.internal_m = owned_set.size();
+  LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> in_dev(
+    owned_set, MPI_COMM_WORLD);
+  LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA> out_dev(
+    owned_set, MPI_COMM_WORLD);
+
+  LinearAlgebra::ReadWriteVector<Number> rw_in(owned_set);
+  for (unsigned int i = 0; i < in_dev.local_size(); ++i)
+    {
+      const unsigned int glob_index = owned_set.nth_index_in_set(i);
+      if (constraints.is_constrained(glob_index))
+        continue;
+      rw_in.local_element(i) = random_value<double>();
+    }
+  in_dev.import(rw_in, VectorOperation::insert);
+
+  // assemble trilinos sparse matrix with
+  // (\nabla v, \nabla u) + (v, 10 * u) for
+  // reference
+  LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> in_host(
+    owned_set, MPI_COMM_WORLD);
+  in_host.import(rw_in, VectorOperation::insert);
+  LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> ref(
+    owned_set, MPI_COMM_WORLD);
+  TrilinosWrappers::SparseMatrix sparse_matrix;
+  {
+    TrilinosWrappers::SparsityPattern csp(owned_set, MPI_COMM_WORLD);
+    DoFTools::make_sparsity_pattern(dof,
+                                    csp,
+                                    constraints,
+                                    true,
+                                    Utilities::MPI::this_mpi_process(
+                                      MPI_COMM_WORLD));
+    csp.compress();
+    sparse_matrix.reinit(csp);
+  }
+  {
+    QGauss<dim> quadrature_formula(fe_degree + 1);
+
+    FEValues<dim> fe_values(dof.get_fe(),
+                            quadrature_formula,
+                            update_values | update_gradients |
+                              update_JxW_values);
+
+    const unsigned int dofs_per_cell = dof.get_fe().dofs_per_cell;
+    const unsigned int n_q_points    = quadrature_formula.size();
+
+    FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
+    std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
+
+    typename DoFHandler<dim>::active_cell_iterator cell = dof.begin_active(),
+                                                   endc = dof.end();
+    for (; cell != endc; ++cell)
+      if (cell->is_locally_owned())
+        {
+          cell_matrix = 0;
+          fe_values.reinit(cell);
+
+          for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
+            for (unsigned int i = 0; i < dofs_per_cell; ++i)
+              {
+                for (unsigned int j = 0; j < dofs_per_cell; ++j)
+                  cell_matrix(i, j) +=
+                    ((fe_values.shape_grad(i, q_point) *
+                        fe_values.shape_grad(j, q_point) +
+                      10. * fe_values.shape_value(i, q_point) *
+                        fe_values.shape_value(j, q_point)) *
+                     fe_values.JxW(q_point));
+              }
+
+          cell->get_dof_indices(local_dof_indices);
+          constraints.distribute_local_to_global(cell_matrix,
+                                                 local_dof_indices,
+                                                 sparse_matrix);
+        }
+  }
+  sparse_matrix.compress(VectorOperation::add);
+
+  LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> matrix_diagonal(
+    ref.get_partitioner());
+  for (const auto index : matrix_diagonal.locally_owned_elements())
+    matrix_diagonal[index] = sparse_matrix(index, index);
+
+  using HostPreconditionerType = PreconditionChebyshev<
+    TrilinosWrappers::SparseMatrix,
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>>;
+  HostPreconditionerType                          precondition_chebyshev_host;
+  typename HostPreconditionerType::AdditionalData host_preconditioner_data;
+  host_preconditioner_data.preconditioner = std::make_shared<DiagonalMatrix<
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>>>(
+    matrix_diagonal);
+  host_preconditioner_data.constraints.copy_from(constraints);
+  precondition_chebyshev_host.initialize(sparse_matrix,
+                                         host_preconditioner_data);
+
+  using DevicePreconditionerType = PreconditionChebyshev<
+    MatrixFreeTest<
+      dim,
+      fe_degree,
+      Number,
+      LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA>>,
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA>>;
+  DevicePreconditionerType precondition_chebyshev_device;
+  typename DevicePreconditionerType::AdditionalData device_preconditioner_data;
+  device_preconditioner_data.preconditioner = std::make_shared<DiagonalMatrix<
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA>>>(
+    LinearAlgebra::distributed::Vector<Number, MemorySpace::CUDA>(
+      ref.get_partitioner()));
+  device_preconditioner_data.preconditioner->get_vector().import(
+    matrix_diagonal, VectorOperation::insert);
+  device_preconditioner_data.constraints.copy_from(constraints);
+  precondition_chebyshev_device.initialize(mf, device_preconditioner_data);
+
+  precondition_chebyshev_device.vmult(out_dev, in_dev);
+
+  LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> out_host(
+    owned_set, MPI_COMM_WORLD);
+  LinearAlgebra::ReadWriteVector<Number> rw_out(owned_set);
+  rw_out.import(out_dev, VectorOperation::insert);
+  out_host.import(rw_out, VectorOperation::insert);
+
+  precondition_chebyshev_host.vmult(ref, in_host);
+  out_host -= ref;
+
+  const double diff_norm = out_host.linfty_norm();
+
+  deallog << "Norm of difference: " << diff_norm << std::endl << std::endl;
+}
+
+
+int
+main(int argc, char **argv)
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(
+    argc, argv, testing_max_num_threads());
+
+  unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+  deallog.push(Utilities::int_to_string(myid));
+
+  init_cuda(true);
+
+  if (myid == 0)
+    {
+      initlog();
+      deallog << std::setprecision(4);
+
+      deallog.push("2d");
+      test<2, 1>();
+      deallog.pop();
+
+      deallog.push("3d");
+      test<3, 1>();
+      test<3, 2>();
+      deallog.pop();
+    }
+  else
+    {
+      test<2, 1>();
+      test<3, 1>();
+      test<3, 2>();
+    }
+}
diff --git a/tests/cuda/precondition_03.with_trilinos=true.with_mpi=true.with_p4est=true.mpirun=1.output b/tests/cuda/precondition_03.with_trilinos=true.with_mpi=true.with_p4est=true.mpirun=1.output
new file mode 100644 (file)
index 0000000..3b90478
--- /dev/null
@@ -0,0 +1,10 @@
+
+DEAL:0:2d::Testing FE_Q<2>(1)
+DEAL:0:2d::Norm of difference: 0
+DEAL:0:2d::
+DEAL:0:3d::Testing FE_Q<3>(1)
+DEAL:0:3d::Norm of difference: 0
+DEAL:0:3d::
+DEAL:0:3d::Testing FE_Q<3>(2)
+DEAL:0:3d::Norm of difference: 0
+DEAL:0:3d::
diff --git a/tests/cuda/precondition_03.with_trilinos=true.with_mpi=true.with_p4est=true.mpirun=2.output b/tests/cuda/precondition_03.with_trilinos=true.with_mpi=true.with_p4est=true.mpirun=2.output
new file mode 100644 (file)
index 0000000..3b90478
--- /dev/null
@@ -0,0 +1,10 @@
+
+DEAL:0:2d::Testing FE_Q<2>(1)
+DEAL:0:2d::Norm of difference: 0
+DEAL:0:2d::
+DEAL:0:3d::Testing FE_Q<3>(1)
+DEAL:0:3d::Norm of difference: 0
+DEAL:0:3d::
+DEAL:0:3d::Testing FE_Q<3>(2)
+DEAL:0:3d::Norm of difference: 0
+DEAL:0:3d::

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.