]> https://gitweb.dealii.org/ - dealii.git/commitdiff
TensorProductMatrixSymmetricSumCollection: fix vectorization 14569/head
authorPeter Munch <peterrmuench@gmail.com>
Mon, 12 Dec 2022 07:10:35 +0000 (08:10 +0100)
committerPeter Munch <peterrmuench@gmail.com>
Sat, 27 May 2023 09:17:02 +0000 (11:17 +0200)
include/deal.II/lac/tensor_product_matrix.h
tests/lac/tensor_product_matrix_08.cc
tests/lac/tensor_product_matrix_08.with_lapack=true.output
tests/lac/tensor_product_matrix_08.with_lapack=true.output.axx [new file with mode: 0644]
tests/lac/tensor_product_matrix_08.with_lapack=true.output.axx512 [new file with mode: 0644]
tests/lac/tensor_product_matrix_08.with_lapack=true.output.sse [new file with mode: 0644]

index dcb542fffbee4707dac0fd87952fc58f8a2f1f8c..20550c3f00bb69c6725420bedcfaa3e0ce1fe141 100644 (file)
@@ -281,12 +281,15 @@ namespace internal
     template <typename Number>
     struct MatrixPairComparator
     {
-      using MatrixPairType = std::pair<Table<2, Number>, Table<2, Number>>;
       using VectorizedArrayTrait =
         dealii::internal::VectorizedArrayTrait<Number>;
       using ScalarNumber = typename VectorizedArrayTrait::value_type;
       static constexpr std::size_t width = VectorizedArrayTrait::width();
 
+      using MatrixPairType =
+        std::pair<std::bitset<width>,
+                  std::pair<Table<2, Number>, Table<2, Number>>>;
+
       MatrixPairComparator()
         : eps(std::sqrt(std::numeric_limits<ScalarNumber>::epsilon()))
       {}
@@ -294,29 +297,15 @@ namespace internal
       bool
       operator()(const MatrixPairType &left, const MatrixPairType &right) const
       {
-        const auto &M_0 = left.first;
-        const auto &K_0 = left.second;
-        const auto &M_1 = right.first;
-        const auto &K_1 = right.second;
+        const auto &M_0 = left.second.first;
+        const auto &K_0 = left.second.second;
+        const auto &M_1 = right.second.first;
+        const auto &K_1 = right.second.second;
 
         std::bitset<width> mask;
 
         for (unsigned int v = 0; v < width; ++v)
-          {
-            ScalarNumber a = 0.0;
-            ScalarNumber b = 0.0;
-
-            for (unsigned int i = 0; i < M_0.size(0); ++i)
-              for (unsigned int j = 0; j < M_0.size(1); ++j)
-                {
-                  a += std::abs(VectorizedArrayTrait::get(M_0[i][j], v));
-                  a += std::abs(VectorizedArrayTrait::get(K_0[i][j], v));
-                  b += std::abs(VectorizedArrayTrait::get(M_1[i][j], v));
-                  b += std::abs(VectorizedArrayTrait::get(K_1[i][j], v));
-                }
-
-            mask[v] = (a != 0.0) && (b != 0.0);
-          }
+          mask[v] = left.first[v] && right.first[v];
 
         const FloatingPointComparator<Number> comparator(
           eps, false /*use relative tolerance*/, mask);
@@ -367,6 +356,10 @@ class TensorProductMatrixSymmetricSumCollection
 {
   using MatrixPairType = std::pair<Table<2, Number>, Table<2, Number>>;
 
+  using MatrixPairTypeWithMask = std::pair<
+    std::bitset<dealii::internal::VectorizedArrayTrait<Number>::width()>,
+    MatrixPairType>;
+
 public:
   /**
    * Struct to configure TensorProductMatrixSymmetricSumCollection.
@@ -467,7 +460,7 @@ private:
    * matrices. The memory is freed during finalize().
    */
   std::map<
-    MatrixPairType,
+    MatrixPairTypeWithMask,
     unsigned int,
     internal::TensorProductMatrixSymmetricSum::MatrixPairComparator<Number>>
     cache;
@@ -1157,18 +1150,85 @@ TensorProductMatrixSymmetricSumCollection<dim, Number, n_rows_1d>::insert(
 
   for (unsigned int d = 0; d < dim; ++d)
     {
-      const MatrixPairType matrix(Ms[d], Ks[d]);
-
       if (compress_matrices == false)
         {
+          const MatrixPairType matrix(Ms[d], Ks[d]);
           mass_and_derivative_matrices[index * dim + d] = matrix;
         }
       else
         {
+          using VectorizedArrayTrait =
+            dealii::internal::VectorizedArrayTrait<Number>;
+
+          std::bitset<VectorizedArrayTrait::width()> mask;
+
+          for (unsigned int v = 0; v < VectorizedArrayTrait::width(); ++v)
+            {
+              typename VectorizedArrayTrait::value_type a = 0.0;
+
+              for (unsigned int i = 0; i < Ms[d].size(0); ++i)
+                for (unsigned int j = 0; j < Ms[d].size(1); ++j)
+                  {
+                    a += std::abs(VectorizedArrayTrait::get(Ms[d][i][j], v));
+                    a += std::abs(VectorizedArrayTrait::get(Ks[d][i][j], v));
+                  }
+
+              mask[v] = (a != 0.0);
+            }
+
+          const MatrixPairTypeWithMask matrix{mask, {Ms[d], Ks[d]}};
+
           const auto ptr = cache.find(matrix);
 
           if (ptr != cache.end())
-            indices[index * dim + d] = ptr->second;
+            {
+              const auto ptr_index     = ptr->second;
+              indices[index * dim + d] = ptr_index;
+
+              if ([&]() {
+                    for (unsigned int v = 0; v < VectorizedArrayTrait::width();
+                         ++v)
+                      if ((mask[v] == true) && (ptr->first.first[v] == false))
+                        return false;
+
+                    return true;
+                  }())
+                {
+                  // nothing to do
+                }
+              else
+                {
+                  auto mask_new = ptr->first.first;
+                  auto Ms_new   = ptr->first.second.first;
+                  auto Ks_new   = ptr->first.second.second;
+
+                  for (unsigned int v = 0; v < VectorizedArrayTrait::width();
+                       ++v)
+                    if (mask_new[v] == false && mask[v] == true)
+                      {
+                        mask_new[v] = true;
+
+                        for (unsigned int i = 0; i < Ms_new.size(0); ++i)
+                          for (unsigned int j = 0; j < Ms_new.size(1); ++j)
+                            {
+                              VectorizedArrayTrait::get(Ms_new[i][j], v) =
+                                VectorizedArrayTrait::get(Ms[d][i][j], v);
+                              VectorizedArrayTrait::get(Ks_new[i][j], v) =
+                                VectorizedArrayTrait::get(Ks[d][i][j], v);
+                            }
+                      }
+
+                  cache.erase(ptr);
+
+                  const MatrixPairTypeWithMask entry_new{mask_new,
+                                                         {Ms_new, Ks_new}};
+
+                  const auto ptr_ = cache.find(entry_new);
+                  AssertThrow(ptr_ == cache.end(), ExcNotImplemented());
+
+                  cache[entry_new] = ptr_index;
+                }
+            }
           else
             {
               const auto size          = cache.size();
@@ -1260,7 +1320,7 @@ TensorProductMatrixSymmetricSumCollection<dim, Number, n_rows_1d>::finalize()
       std::map<unsigned int, MatrixPairType> inverted_cache;
 
       for (const auto &i : cache)
-        inverted_cache[i.second] = i.first;
+        inverted_cache[i.second] = i.first.second;
 
       for (unsigned int i = 0; i < indices.size(); ++i)
         {
@@ -1296,7 +1356,7 @@ TensorProductMatrixSymmetricSumCollection<dim, Number, n_rows_1d>::finalize()
 
       for (const auto &i : cache)
         {
-          const auto &M = i.first.first;
+          const auto &M = i.first.second.first;
 
           this->vector_ptr[i.second + 1] = M.n_rows();
           this->matrix_ptr[i.second + 1] = M.n_rows() * M.n_cols();
@@ -1314,7 +1374,7 @@ TensorProductMatrixSymmetricSumCollection<dim, Number, n_rows_1d>::finalize()
       this->eigenvalues.resize_fast(vector_ptr.back());
 
       for (const auto &i : cache)
-        store(i.second, i.first);
+        store(i.second, i.first.second);
 
       cache.clear();
     }
index bbe162923af4254bd7ebc05b01e7877dcee039cc..9ee0b7b415b7551297672a8874ffd19c77599608 100644 (file)
@@ -40,6 +40,10 @@ template <int dim, typename Number>
 void
 do_test_mesh(const Mapping<dim> &mapping, const Triangulation<dim> &tria)
 {
+  using VectorizedArrayTrait = dealii::internal::VectorizedArrayTrait<Number>;
+  using ScalarNumber         = typename VectorizedArrayTrait::value_type;
+  static constexpr std::size_t width = VectorizedArrayTrait::width();
+
   using FDM = TensorProductMatrixSymmetricSumCollection<dim, Number>;
 
   const unsigned int fe_degree = 3;
@@ -58,38 +62,62 @@ do_test_mesh(const Mapping<dim> &mapping, const Triangulation<dim> &tria)
   FDM collection_2(typename FDM::AdditionalData(true, true));
   FDM collection_3(typename FDM::AdditionalData(false, true));
 
-  collection_0.reserve(tria.n_active_cells());
-  collection_1.reserve(tria.n_active_cells());
-  collection_2.reserve(tria.n_active_cells());
-  collection_3.reserve(tria.n_active_cells());
+  const auto n_cell_batches = (tria.n_active_cells() + width - 1) / width;
+
+  collection_0.reserve(n_cell_batches);
+  collection_1.reserve(n_cell_batches);
+  collection_2.reserve(n_cell_batches);
+  collection_3.reserve(n_cell_batches);
+
+  auto cell = tria.begin_active();
 
-  for (const auto &cell : tria.active_cell_iterators())
+  for (unsigned int counter = 0; counter < n_cell_batches; ++counter)
     {
-      const auto &patch_extent =
-        harmonic_patch_extent[cell->active_cell_index()];
-
-      const auto M_and_K = TensorProductMatrixCreator::
-        create_laplace_tensor_product_matrix<dim, Number>(
-          cell,
-          std::set<unsigned int>{0},
-          std::set<unsigned int>{},
-          fe_1D,
-          quadrature_1D,
-          patch_extent,
-          n_overlap);
-
-      collection_0.insert(cell->active_cell_index(),
-                          M_and_K.first,
-                          M_and_K.second);
-      collection_1.insert(cell->active_cell_index(),
-                          M_and_K.first,
-                          M_and_K.second);
-      collection_2.insert(cell->active_cell_index(),
-                          M_and_K.first,
-                          M_and_K.second);
-      collection_3.insert(cell->active_cell_index(),
-                          M_and_K.first,
-                          M_and_K.second);
+      std::array<Table<2, Number>, dim> Ms;
+      std::array<Table<2, Number>, dim> Ks;
+
+      for (unsigned int v = 0; (v < width) && (cell != tria.end()); ++v, ++cell)
+        {
+          const auto &patch_extent =
+            harmonic_patch_extent[cell->active_cell_index()];
+
+          const auto M_and_K_scalar = TensorProductMatrixCreator::
+            create_laplace_tensor_product_matrix<dim, ScalarNumber>(
+              cell,
+              std::set<unsigned int>{0},
+              std::set<unsigned int>{},
+              fe_1D,
+              quadrature_1D,
+              patch_extent,
+              n_overlap);
+
+          const auto Ms_scalar = M_and_K_scalar.first;
+          const auto Ks_scalar = M_and_K_scalar.second;
+
+          for (unsigned int d = 0; d < dim; ++d)
+            {
+              if (Ms[d].size(0) == 0 || Ms[d].size(1) == 0)
+                {
+                  Ms[d].reinit(Ms_scalar[d].size(0), Ms_scalar[d].size(1));
+                  Ks[d].reinit(Ks_scalar[d].size(0), Ks_scalar[d].size(1));
+                }
+
+              for (unsigned int i = 0; i < Ms_scalar[d].size(0); ++i)
+                for (unsigned int j = 0; j < Ms_scalar[d].size(0); ++j)
+                  VectorizedArrayTrait::get(Ms[d][i][j], v) =
+                    Ms_scalar[d][i][j];
+
+              for (unsigned int i = 0; i < Ks_scalar[d].size(0); ++i)
+                for (unsigned int j = 0; j < Ks_scalar[d].size(0); ++j)
+                  VectorizedArrayTrait::get(Ks[d][i][j], v) =
+                    Ks_scalar[d][i][j];
+            }
+        }
+
+      collection_0.insert(counter, Ms, Ks);
+      collection_1.insert(counter, Ms, Ks);
+      collection_2.insert(counter, Ms, Ks);
+      collection_3.insert(counter, Ms, Ks);
     }
 
   collection_0.finalize();
@@ -100,15 +128,15 @@ do_test_mesh(const Mapping<dim> &mapping, const Triangulation<dim> &tria)
   deallog << "Storage sizes: " << collection_0.storage_size() << " "
           << collection_1.storage_size() << std::endl;
 
-  Vector<Number>        src(fe.n_dofs_per_cell());
-  Vector<Number>        dst(fe.n_dofs_per_cell());
+  AlignedVector<Number> src(fe.n_dofs_per_cell());
+  AlignedVector<Number> dst(fe.n_dofs_per_cell());
   AlignedVector<Number> tmp;
-  FullMatrix<Number>    matrix_0(fe.n_dofs_per_cell(), fe.n_dofs_per_cell());
-  FullMatrix<Number>    matrix_1(fe.n_dofs_per_cell(), fe.n_dofs_per_cell());
-  FullMatrix<Number>    matrix_2(fe.n_dofs_per_cell(), fe.n_dofs_per_cell());
-  FullMatrix<Number>    matrix_3(fe.n_dofs_per_cell(), fe.n_dofs_per_cell());
+  Table<2, Number>      matrix_0(fe.n_dofs_per_cell(), fe.n_dofs_per_cell());
+  Table<2, Number>      matrix_1(fe.n_dofs_per_cell(), fe.n_dofs_per_cell());
+  Table<2, Number>      matrix_2(fe.n_dofs_per_cell(), fe.n_dofs_per_cell());
+  Table<2, Number>      matrix_3(fe.n_dofs_per_cell(), fe.n_dofs_per_cell());
 
-  for (unsigned int cell = 0; cell < tria.n_active_cells(); ++cell)
+  for (unsigned int cell = 0; cell < n_cell_batches; ++cell)
     {
       for (unsigned int i = 0; i < fe.n_dofs_per_cell(); ++i)
         {
@@ -116,29 +144,29 @@ do_test_mesh(const Mapping<dim> &mapping, const Triangulation<dim> &tria)
             src[j] = i == j;
 
           collection_0.apply_inverse(cell,
-                                     make_array_view(dst),
-                                     make_array_view(src),
+                                     make_array_view(dst.begin(), dst.end()),
+                                     make_array_view(src.begin(), src.end()),
                                      tmp);
           for (unsigned int j = 0; j < fe.n_dofs_per_cell(); ++j)
             matrix_0[j][i] = dst[j];
 
           collection_1.apply_inverse(cell,
-                                     make_array_view(dst),
-                                     make_array_view(src),
+                                     make_array_view(dst.begin(), dst.end()),
+                                     make_array_view(src.begin(), src.end()),
                                      tmp);
           for (unsigned int j = 0; j < fe.n_dofs_per_cell(); ++j)
             matrix_1[j][i] = dst[j];
 
           collection_2.apply_inverse(cell,
-                                     make_array_view(dst),
-                                     make_array_view(src),
+                                     make_array_view(dst.begin(), dst.end()),
+                                     make_array_view(src.begin(), src.end()),
                                      tmp);
           for (unsigned int j = 0; j < fe.n_dofs_per_cell(); ++j)
             matrix_2[j][i] = dst[j];
 
           collection_3.apply_inverse(cell,
-                                     make_array_view(dst),
-                                     make_array_view(src),
+                                     make_array_view(dst.begin(), dst.end()),
+                                     make_array_view(src.begin(), src.end()),
                                      tmp);
           for (unsigned int j = 0; j < fe.n_dofs_per_cell(); ++j)
             matrix_3[j][i] = dst[j];
@@ -244,8 +272,56 @@ main()
 {
   initlog();
 
-  do_test<double>();
-  do_test<float>();
+  {
+    deallog.push("v=0");
+    do_test<double>();
+    deallog << std::endl;
+    do_test<float>();
+    deallog.pop();
+    deallog << std::endl;
+  }
+
+  {
+    deallog.push("v=64");
+    do_test<VectorizedArray<double, 1>>();
+    deallog << std::endl;
+    do_test<VectorizedArray<float, 1>>();
+    deallog.pop();
+    deallog << std::endl;
+  }
+
+#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 128
+  {
+    deallog.push("v=128");
+    do_test<VectorizedArray<double, 2>>();
+    deallog << std::endl;
+    do_test<VectorizedArray<float, 4>>();
+    deallog.pop();
+    deallog << std::endl;
+  }
+#endif
+
+#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 256
+  {
+    deallog.push("v=256");
+    do_test<VectorizedArray<double, 4>>();
+    deallog << std::endl;
+    do_test<VectorizedArray<float, 8>>();
+    deallog.pop();
+    deallog << std::endl;
+  }
+#endif
+
+#if DEAL_II_VECTORIZATION_WIDTH_IN_BITS >= 512
+  {
+    deallog.push("v=512");
+    do_test<VectorizedArray<double, 8>>();
+    deallog << std::endl;
+    do_test<VectorizedArray<float, 16>>();
+    deallog.pop();
+    deallog << std::endl;
+  }
+#endif
 
   return 0;
 }
index d7f63ad173e3e87ec5634ce0a77c235c3993ceaf..e99642232b1ffb2daf786f2da4ba01ab727b6bc8 100644 (file)
@@ -1,31 +1,65 @@
 
-DEAL::Storage sizes: 3 128
-DEAL::OK!
+DEAL:v=0::Storage sizes: 3 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 1 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 6 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 2 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 128 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 3 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 1 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 6 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 2 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 128 128
+DEAL:v=0::OK!
+DEAL:v=0::
 DEAL::
-DEAL::Storage sizes: 1 128
-DEAL::OK!
-DEAL::
-DEAL::Storage sizes: 6 256
-DEAL::OK!
-DEAL::
-DEAL::Storage sizes: 2 256
-DEAL::OK!
-DEAL::
-DEAL::Storage sizes: 128 128
-DEAL::OK!
-DEAL::
-DEAL::Storage sizes: 3 128
-DEAL::OK!
-DEAL::
-DEAL::Storage sizes: 1 128
-DEAL::OK!
-DEAL::
-DEAL::Storage sizes: 6 256
-DEAL::OK!
-DEAL::
-DEAL::Storage sizes: 2 256
-DEAL::OK!
-DEAL::
-DEAL::Storage sizes: 128 128
-DEAL::OK!
+DEAL:v=64::Storage sizes: 3 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 1 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 6 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 2 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 128 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 3 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 1 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 6 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 2 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 128 128
+DEAL:v=64::OK!
+DEAL:v=64::
 DEAL::
diff --git a/tests/lac/tensor_product_matrix_08.with_lapack=true.output.axx b/tests/lac/tensor_product_matrix_08.with_lapack=true.output.axx
new file mode 100644 (file)
index 0000000..89f5ac9
--- /dev/null
@@ -0,0 +1,129 @@
+
+DEAL:v=0::Storage sizes: 3 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 1 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 6 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 2 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 128 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 3 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 1 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 6 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 2 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 128 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL::
+DEAL:v=64::Storage sizes: 3 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 1 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 6 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 2 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 128 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 3 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 1 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 6 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 2 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 128 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL::
+DEAL:v=128::Storage sizes: 5 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 1 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 6 128
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 2 128
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 64 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 5 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 1 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 6 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 2 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 32 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL::
+DEAL:v=256::Storage sizes: 5 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 1 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 6 64
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 2 64
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 32 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 5 16
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 1 16
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 5 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 2 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 16 16
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL::
diff --git a/tests/lac/tensor_product_matrix_08.with_lapack=true.output.axx512 b/tests/lac/tensor_product_matrix_08.with_lapack=true.output.axx512
new file mode 100644 (file)
index 0000000..37508fd
--- /dev/null
@@ -0,0 +1,161 @@
+
+DEAL:v=0::Storage sizes: 3 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 1 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 6 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 2 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 128 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 3 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 1 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 6 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 2 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 128 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL::
+DEAL:v=64::Storage sizes: 3 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 1 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 6 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 2 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 128 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 3 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 1 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 6 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 2 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 128 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL::
+DEAL:v=128::Storage sizes: 5 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 1 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 6 128
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 2 128
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 64 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 5 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 1 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 6 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 2 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 32 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL::
+DEAL:v=256::Storage sizes: 5 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 1 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 6 64
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 2 64
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 32 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 5 16
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 1 16
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 5 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 2 32
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL:v=256::Storage sizes: 16 16
+DEAL:v=256::OK!
+DEAL:v=256::
+DEAL::
+DEAL:v=512::Storage sizes: 5 16
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 1 16
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 5 32
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 2 32
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 16 16
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 4 8
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 1 8
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 5 16
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 2 16
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL:v=512::Storage sizes: 8 8
+DEAL:v=512::OK!
+DEAL:v=512::
+DEAL::
diff --git a/tests/lac/tensor_product_matrix_08.with_lapack=true.output.sse b/tests/lac/tensor_product_matrix_08.with_lapack=true.output.sse
new file mode 100644 (file)
index 0000000..64ecbdf
--- /dev/null
@@ -0,0 +1,97 @@
+
+DEAL:v=0::Storage sizes: 3 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 1 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 6 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 2 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 128 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 3 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 1 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 6 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 2 256
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL:v=0::Storage sizes: 128 128
+DEAL:v=0::OK!
+DEAL:v=0::
+DEAL::
+DEAL:v=64::Storage sizes: 3 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 1 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 6 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 2 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 128 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 3 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 1 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 6 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 2 256
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL:v=64::Storage sizes: 128 128
+DEAL:v=64::OK!
+DEAL:v=64::
+DEAL::
+DEAL:v=128::Storage sizes: 5 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 1 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 6 128
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 2 128
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 64 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 5 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 1 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 6 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 2 64
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL:v=128::Storage sizes: 32 32
+DEAL:v=128::OK!
+DEAL:v=128::
+DEAL::

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.