]> https://gitweb.dealii.org/ - dealii.git/commitdiff
ScaLAPACKMatrix::copy_to(LAPACKFullMatrix) and ScaLAPACKMatrix::copy_from(LAPACKFullM... 6241/head
authorBenjamin Brands <benjamin.brands@fau.de>
Fri, 13 Apr 2018 14:50:33 +0000 (16:50 +0200)
committerBenBrands <benjamin.brands@fau.de>
Tue, 31 Jul 2018 20:14:55 +0000 (22:14 +0200)
include/deal.II/lac/scalapack.h
include/deal.II/lac/scalapack.templates.h
source/lac/scalapack.cc
tests/scalapack/scalapack_17_a.cc [new file with mode: 0644]
tests/scalapack/scalapack_17_a.mpirun=1.output [new file with mode: 0644]
tests/scalapack/scalapack_17_a.mpirun=11.output [new file with mode: 0644]
tests/scalapack/scalapack_17_a.mpirun=9.output [new file with mode: 0644]
tests/scalapack/scalapack_17_b.cc [new file with mode: 0644]
tests/scalapack/scalapack_17_b.mpirun=1.output [new file with mode: 0644]
tests/scalapack/scalapack_17_b.mpirun=11.output [new file with mode: 0644]
tests/scalapack/scalapack_17_b.mpirun=9.output [new file with mode: 0644]

index 721dad3f80cc5b95e6a79d993c3a9e8a72b24d56..3198d1a34ffcccc868c18490ba101efd4e93c41b 100644 (file)
@@ -26,6 +26,7 @@
 #  include <deal.II/base/thread_management.h>
 
 #  include <deal.II/lac/full_matrix.h>
+#  include <deal.II/lac/lapack_full_matrix.h>
 #  include <deal.II/lac/lapack_support.h>
 
 #  include <mpi.h>
@@ -190,6 +191,19 @@ public:
   ScaLAPACKMatrix<NumberType> &
   operator=(const FullMatrix<NumberType> &);
 
+  /**
+   * Copies the content of the locally owned @p matrix to the distributed matrix.
+   * The distributed matrix and @p matrix on process @p rank must have matching dimensions.
+   *
+   * For all processes except the process with rank @p rank the serial @p matrix is not referenced.
+   * The user has to ensure that all processes call this with identical @p rank.
+   * The @p rank refers to a process of the MPI communicator used to create the process grid
+   * of the distributed matrix.
+   */
+  void
+  copy_from(const LAPACKFullMatrix<NumberType> &matrix,
+            const unsigned int                  rank);
+
   /**
    * Copy the contents of the distributed matrix into @p matrix.
    *
@@ -199,6 +213,18 @@ public:
   void
   copy_to(FullMatrix<NumberType> &matrix) const;
 
+  /**
+   * Copies the content of the distributed matrix into the locally replicated @p matrix
+   * on the process with rank @rank. For all processes except @p rank @p matrix is not referenced.
+   * The distributed matrix and @p matrix on the process @p rank must have matching dimensions.
+   *
+   * The user has to ensure that all processes call this with identical @p rank.
+   * The @p rank refers to a process of the MPI communicator used to create the process grid
+   * of the distributed matrix.
+   */
+  void
+  copy_to(LAPACKFullMatrix<NumberType> &matrix, const unsigned int rank) const;
+
   /**
    * Copy the contents of the distributed matrix into a differently distributed matrix @p dest.
    * The function also works for matrices with different process grids
index 86db46b762690a1362f6bfdbfc0c39216f627cf6..01a9645a3b49c40c927f8cec42e31ac459334bc1 100644 (file)
@@ -580,29 +580,29 @@ extern "C"
    * A(ia:ia+m-1,ja:ja+n-1) and sub(B) denotes B(ib:ib+m-1,jb:jb+n-1)
    */
   void
-  pdlacpy_(const char *uplo,
-           const int * m,
-           const int * n,
-           double *    A,
-           const int * ia,
-           const int * ja,
-           int *       desca,
-           double *    B,
-           const int * ib,
-           const int * jb,
-           int *       descb);
+  pdlacpy_(const char *  uplo,
+           const int *   m,
+           const int *   n,
+           const double *A,
+           const int *   ia,
+           const int *   ja,
+           const int *   desca,
+           double *      B,
+           const int *   ib,
+           const int *   jb,
+           const int *   descb);
   void
-  pslacpy_(const char *uplo,
-           const int * m,
-           const int * n,
-           float *     A,
-           const int * ia,
-           const int * ja,
-           int *       desca,
-           float *     B,
-           const int * ib,
-           const int * jb,
-           int *       descb);
+  pslacpy_(const char * uplo,
+           const int *  m,
+           const int *  n,
+           const float *A,
+           const int *  ia,
+           const int *  ja,
+           const int *  desca,
+           float *      B,
+           const int *  ib,
+           const int *  jb,
+           const int *  descb);
 
   /**
    * Copies the content of a general rectangular distributed matrix @p A to another distributed matrix @p B
@@ -1558,46 +1558,46 @@ inline void
 placpy(const char * /*uplo*/,
        const int * /*m*/,
        const int * /*n*/,
-       number * /*A*/,
+       const number * /*A*/,
        const int * /*ia*/,
        const int * /*ja*/,
-       int * /*desca*/,
+       const int * /*desca*/,
        number * /*B*/,
        const int * /*ib*/,
        const int * /*jb*/,
-       int * /*descb*/)
+       const int * /*descb*/)
 {
   Assert(false, dealii::ExcNotImplemented());
 }
 
 inline void
-placpy(const char *uplo,
-       const int * m,
-       const int * n,
-       double *    A,
-       const int * ia,
-       const int * ja,
-       int *       desca,
-       double *    B,
-       const int * ib,
-       const int * jb,
-       int *       descb)
+placpy(const char *  uplo,
+       const int *   m,
+       const int *   n,
+       const double *A,
+       const int *   ia,
+       const int *   ja,
+       const int *   desca,
+       double *      B,
+       const int *   ib,
+       const int *   jb,
+       const int *   descb)
 {
   pdlacpy_(uplo, m, n, A, ia, ja, desca, B, ib, jb, descb);
 }
 
 inline void
-placpy(const char *uplo,
-       const int * m,
-       const int * n,
-       float *     A,
-       const int * ia,
-       const int * ja,
-       int *       desca,
-       float *     B,
-       const int * ib,
-       const int * jb,
-       int *       descb)
+placpy(const char * uplo,
+       const int *  m,
+       const int *  n,
+       const float *A,
+       const int *  ia,
+       const int *  ja,
+       const int *  desca,
+       float *      B,
+       const int *  ib,
+       const int *  jb,
+       const int *  descb)
 {
   pslacpy_(uplo, m, n, A, ia, ja, desca, B, ib, jb, descb);
 }
index 9baf808aa7e891df642b3eefa04f503e527457a8..6a41d47a2226022c9a5645a02f8004a48253808c 100644 (file)
@@ -263,6 +263,138 @@ ScaLAPACKMatrix<NumberType>::operator=(const FullMatrix<NumberType> &matrix)
 
 
 
+template <typename NumberType>
+void
+ScaLAPACKMatrix<NumberType>::copy_from(const LAPACKFullMatrix<NumberType> &B,
+                                       const unsigned int                  rank)
+{
+  if (n_rows * n_columns == 0)
+    return;
+
+  const unsigned int this_mpi_process(
+    Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
+
+#  ifdef DEBUG
+  Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
+         ExcMessage("All processes have to call routine with identical rank"));
+  Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
+         ExcMessage("All processes have to call routine with identical rank"));
+#  endif
+
+  // root process has to be active in the grid of A
+  if (this_mpi_process == rank)
+    {
+      Assert(grid->mpi_process_is_active, ExcInternalError());
+      Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
+      Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
+    }
+  // Create 1x1 grid for matrix B.
+  // The underlying grid for matrix B only contains the process #rank.
+  // This grid will be used to copy the serial matrix B to the distributed
+  // matrix using the ScaLAPACK routine pgemr2d.
+  MPI_Group group_A;
+  MPI_Comm_group(this->grid->mpi_communicator, &group_A);
+  const int              n = 1;
+  const std::vector<int> ranks(n, rank);
+  MPI_Group              group_B;
+  MPI_Group_incl(group_A, n, ranks.data(), &group_B);
+  MPI_Comm communicator_B;
+  MPI_Comm_create_group(this->grid->mpi_communicator,
+                        group_B,
+                        0,
+                        &communicator_B);
+  int n_proc_rows_B = 1, n_proc_cols_B = 1;
+  int this_process_row_B = -1, this_process_column_B = -1;
+  int blacs_context_B = -1;
+  if (MPI_COMM_NULL != communicator_B)
+    {
+      // Initialize Cblas context from the provided communicator
+      blacs_context_B   = Csys2blacs_handle(communicator_B);
+      const char *order = "Col";
+      Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
+      Cblacs_gridinfo(blacs_context_B,
+                      &n_proc_rows_B,
+                      &n_proc_cols_B,
+                      &this_process_row_B,
+                      &this_process_column_B);
+      Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
+      // the active process of grid B has to be process #rank of the
+      // communicator attached to A
+      Assert(this_mpi_process == rank, ExcInternalError());
+    }
+  const bool mpi_process_is_active_B =
+    (this_process_row_B >= 0 && this_process_column_B >= 0);
+
+  // create descriptor for matrix B
+  std::vector<int> descriptor_B(9, -1);
+  const int        first_process_row_B = 0, first_process_col_B = 0;
+
+  if (mpi_process_is_active_B)
+    {
+      // Get local sizes:
+      int n_local_rows_B = numroc_(&n_rows,
+                                   &n_rows,
+                                   &this_process_row_B,
+                                   &first_process_row_B,
+                                   &n_proc_rows_B);
+      int n_local_cols_B = numroc_(&n_columns,
+                                   &n_columns,
+                                   &this_process_column_B,
+                                   &first_process_col_B,
+                                   &n_proc_cols_B);
+      Assert(n_local_rows_B == n_rows, ExcInternalError());
+      Assert(n_local_cols_B == n_columns, ExcInternalError());
+      (void)n_local_cols_B;
+
+      int lda  = std::max(1, n_local_rows_B);
+      int info = 0;
+      descinit_(descriptor_B.data(),
+                &n_rows,
+                &n_columns,
+                &n_rows,
+                &n_columns,
+                &first_process_row_B,
+                &first_process_col_B,
+                &blacs_context_B,
+                &lda,
+                &info);
+      AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
+    }
+  if (this->grid->mpi_process_is_active)
+    {
+      const int   ii = 1;
+      NumberType *loc_vals_A =
+        this->values.size() > 0 ? &(this->values[0]) : nullptr;
+      const NumberType *loc_vals_B =
+        mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
+
+      // pgemr2d has to be called only for processes active on grid attached to
+      // matrix A
+      pgemr2d(&n_rows,
+              &n_columns,
+              loc_vals_B,
+              &ii,
+              &ii,
+              descriptor_B.data(),
+              loc_vals_A,
+              &ii,
+              &ii,
+              this->descriptor,
+              &(this->grid->blacs_context));
+    }
+  if (mpi_process_is_active_B)
+    Cblacs_gridexit(blacs_context_B);
+
+  MPI_Group_free(&group_A);
+  MPI_Group_free(&group_B);
+  if (MPI_COMM_NULL != communicator_B)
+    MPI_Comm_free(&communicator_B);
+
+  state = LAPACKSupport::matrix;
+}
+
+
+
 template <typename NumberType>
 unsigned int
 ScaLAPACKMatrix<NumberType>::global_row(const unsigned int loc_row) const
@@ -298,6 +430,137 @@ ScaLAPACKMatrix<NumberType>::global_column(const unsigned int loc_column) const
 
 
 
+template <typename NumberType>
+void
+ScaLAPACKMatrix<NumberType>::copy_to(LAPACKFullMatrix<NumberType> &B,
+                                     const unsigned int            rank) const
+{
+  if (n_rows * n_columns == 0)
+    return;
+
+  const unsigned int this_mpi_process(
+    Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
+
+#  ifdef DEBUG
+  Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
+         ExcMessage("All processes have to call routine with identical rank"));
+  Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
+         ExcMessage("All processes have to call routine with identical rank"));
+#  endif
+
+  if (this_mpi_process == rank)
+    {
+      // the process which gets the serial copy has to be in the process grid
+      Assert(this->grid->is_process_active(), ExcInternalError());
+      Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m()));
+      Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n()));
+    }
+
+  // Create 1x1 grid for matrix B.
+  // The underlying grid for matrix B only contains the process #rank.
+  // This grid will be used to copy to the distributed matrix to the serial
+  // matrix B using the ScaLAPACK routine pgemr2d.
+  MPI_Group group_A;
+  MPI_Comm_group(this->grid->mpi_communicator, &group_A);
+  const int              n = 1;
+  const std::vector<int> ranks(n, rank);
+  MPI_Group              group_B;
+  MPI_Group_incl(group_A, n, ranks.data(), &group_B);
+  MPI_Comm communicator_B;
+  MPI_Comm_create_group(this->grid->mpi_communicator,
+                        group_B,
+                        0,
+                        &communicator_B);
+  int n_proc_rows_B = 1, n_proc_cols_B = 1;
+  int this_process_row_B = -1, this_process_column_B = -1;
+  int blacs_context_B = -1;
+  if (MPI_COMM_NULL != communicator_B)
+    {
+      // Initialize Cblas context from the provided communicator
+      blacs_context_B   = Csys2blacs_handle(communicator_B);
+      const char *order = "Col";
+      Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B);
+      Cblacs_gridinfo(blacs_context_B,
+                      &n_proc_rows_B,
+                      &n_proc_cols_B,
+                      &this_process_row_B,
+                      &this_process_column_B);
+      Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError());
+      // the active process of grid B has to be process #rank of the
+      // communicator attached to A
+      Assert(this_mpi_process == rank, ExcInternalError());
+    }
+  const bool mpi_process_is_active_B =
+    (this_process_row_B >= 0 && this_process_column_B >= 0);
+
+  // create descriptor for matrix B
+  std::vector<int> descriptor_B(9, -1);
+  const int        first_process_row_B = 0, first_process_col_B = 0;
+
+  if (mpi_process_is_active_B)
+    {
+      // Get local sizes:
+      int n_local_rows_B = numroc_(&n_rows,
+                                   &n_rows,
+                                   &this_process_row_B,
+                                   &first_process_row_B,
+                                   &n_proc_rows_B);
+      int n_local_cols_B = numroc_(&n_columns,
+                                   &n_columns,
+                                   &this_process_column_B,
+                                   &first_process_col_B,
+                                   &n_proc_cols_B);
+      Assert(n_local_rows_B == n_rows, ExcInternalError());
+      Assert(n_local_cols_B == n_columns, ExcInternalError());
+      (void)n_local_cols_B;
+
+      int lda  = std::max(1, n_local_rows_B);
+      int info = 0;
+      // fill descriptor for matrix B
+      descinit_(descriptor_B.data(),
+                &n_rows,
+                &n_columns,
+                &n_rows,
+                &n_columns,
+                &first_process_row_B,
+                &first_process_col_B,
+                &blacs_context_B,
+                &lda,
+                &info);
+      AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info));
+    }
+  // pgemr2d has to be called only for processes active on grid attached to
+  // matrix A
+  if (this->grid->mpi_process_is_active)
+    {
+      const int         ii = 1;
+      const NumberType *loc_vals_A =
+        this->values.size() > 0 ? &(this->values[0]) : nullptr;
+      NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr;
+
+      pgemr2d(&n_rows,
+              &n_columns,
+              loc_vals_A,
+              &ii,
+              &ii,
+              this->descriptor,
+              loc_vals_B,
+              &ii,
+              &ii,
+              descriptor_B.data(),
+              &(this->grid->blacs_context));
+    }
+  if (mpi_process_is_active_B)
+    Cblacs_gridexit(blacs_context_B);
+
+  MPI_Group_free(&group_A);
+  MPI_Group_free(&group_B);
+  if (MPI_COMM_NULL != communicator_B)
+    MPI_Comm_free(&communicator_B);
+}
+
+
+
 template <typename NumberType>
 void
 ScaLAPACKMatrix<NumberType>::copy_to(FullMatrix<NumberType> &matrix) const
@@ -419,7 +682,6 @@ ScaLAPACKMatrix<NumberType>::copy_to(
     (my_row_A >= 0 && my_row_A < n_grid_rows_A) &&
     (my_column_A >= 0 && my_column_A < n_grid_columns_A);
 
-
   int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B;
   Cblacs_gridinfo(B.grid->blacs_context,
                   &n_grid_rows_B,
diff --git a/tests/scalapack/scalapack_17_a.cc b/tests/scalapack/scalapack_17_a.cc
new file mode 100644 (file)
index 0000000..1a82e15
--- /dev/null
@@ -0,0 +1,130 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../lapack/create_matrix.h"
+#include "../tests.h"
+
+// test ScaLAPACKMatrix::operator = (const LAPACKFullMatrix<NumberType> &)
+
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/multithread_info.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <random>
+
+
+template <typename NumberType>
+void
+test(const unsigned int block_size_i, const unsigned int block_size_j)
+{
+  MPI_Comm           mpi_communicator(MPI_COMM_WORLD);
+  const unsigned int n_mpi_processes(
+    Utilities::MPI::n_mpi_processes(mpi_communicator));
+  const unsigned int this_mpi_process(
+    Utilities::MPI::this_mpi_process(mpi_communicator));
+
+  ConditionalOStream pcout(std::cout, (this_mpi_process == 0));
+
+  const unsigned int size = 500;
+
+  // create 2d process grid
+  std::shared_ptr<Utilities::MPI::ProcessGrid> grid =
+    std::make_shared<Utilities::MPI::ProcessGrid>(
+      mpi_communicator, size, size, block_size_i, block_size_i);
+  ScaLAPACKMatrix<NumberType> scalapack_matrix(
+    size, size, grid, block_size_i, block_size_i);
+
+  pcout << "2D grid matrix: dim=" << scalapack_matrix.m() << "x"
+        << scalapack_matrix.n() << ";  blocks=" << block_size_i << "x"
+        << block_size_i << ";  grid=" << grid->get_process_grid_rows() << "x"
+        << grid->get_process_grid_columns() << std::endl
+        << std::endl;
+
+  const bool       process_is_in_grid = grid->is_process_active();
+  const int        entry = process_is_in_grid ? this_mpi_process : -1;
+  std::vector<int> valid_process_ranks =
+    Utilities::MPI::gather(mpi_communicator, entry, 0);
+  int random_rank = 0;
+
+  if (this_mpi_process == 0)
+    {
+      std::vector<int> valid_ranks;
+
+      for (unsigned int i = 0; i < valid_process_ranks.size(); ++i)
+        if (valid_process_ranks[i] >= 0)
+          valid_ranks.push_back(valid_process_ranks[i]);
+
+      const int entry = random_value<unsigned int>(0, valid_ranks.size() - 1);
+      random_rank     = valid_ranks[entry];
+    }
+  MPI_Bcast(&random_rank, 1, MPI_INT, 0, mpi_communicator);
+  const unsigned int rank = random_rank;
+
+  // create LAPACKFullMatrix and fill it
+  LAPACKFullMatrix<NumberType> full;
+
+  if (this_mpi_process == rank)
+    {
+      full.reinit(size);
+
+      unsigned int count = 0;
+      for (unsigned int i = 0; i < size; ++i)
+        for (unsigned int j = 0; j < size; ++j, ++count)
+          full(i, j) = count;
+    }
+
+  scalapack_matrix.copy_from(full, rank);
+  FullMatrix<NumberType> comparison(size);
+  scalapack_matrix.copy_to(comparison);
+
+  if (this_mpi_process == rank)
+    {
+      NumberType diff = 0;
+      for (unsigned int i = 0; i < size; ++i)
+        for (unsigned int j = 0; j < size; ++j)
+          diff +=
+            (full(i, j) - comparison(i, j)) * (full(i, j) - comparison(i, j));
+      diff = sqrt(diff);
+
+      AssertThrow(diff < 1e-12, ExcInternalError());
+    }
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(
+    argc, argv, numbers::invalid_unsigned_int);
+
+  const std::vector<unsigned int> blocks_i = {{16, 32, 64}};
+  const std::vector<unsigned int> blocks_j = {{16, 32, 64}};
+
+  for (const auto &s : blocks_i)
+    for (const auto &b : blocks_j)
+      test<float>(s, b);
+
+  for (const auto &s : blocks_i)
+    for (const auto &b : blocks_j)
+      test<double>(s, b);
+}
diff --git a/tests/scalapack/scalapack_17_a.mpirun=1.output b/tests/scalapack/scalapack_17_a.mpirun=1.output
new file mode 100644 (file)
index 0000000..b483cc9
--- /dev/null
@@ -0,0 +1,36 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
diff --git a/tests/scalapack/scalapack_17_a.mpirun=11.output b/tests/scalapack/scalapack_17_a.mpirun=11.output
new file mode 100644 (file)
index 0000000..73f104f
--- /dev/null
@@ -0,0 +1,36 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
diff --git a/tests/scalapack/scalapack_17_a.mpirun=9.output b/tests/scalapack/scalapack_17_a.mpirun=9.output
new file mode 100644 (file)
index 0000000..73f104f
--- /dev/null
@@ -0,0 +1,36 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
diff --git a/tests/scalapack/scalapack_17_b.cc b/tests/scalapack/scalapack_17_b.cc
new file mode 100644 (file)
index 0000000..28aa246
--- /dev/null
@@ -0,0 +1,126 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../lapack/create_matrix.h"
+#include "../tests.h"
+
+// test ScaLAPACKMatrix::copy_to (LAPACKFullMatrix<NumberType> &matrix)
+
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/multithread_info.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+
+
+template <typename NumberType>
+void
+test(const unsigned int block_size_i, const unsigned int block_size_j)
+{
+  MPI_Comm           mpi_communicator(MPI_COMM_WORLD);
+  const unsigned int n_mpi_processes(
+    Utilities::MPI::n_mpi_processes(mpi_communicator));
+  const unsigned int this_mpi_process(
+    Utilities::MPI::this_mpi_process(mpi_communicator));
+
+  ConditionalOStream pcout(std::cout, (this_mpi_process == 0));
+
+  const unsigned int size = 500;
+  // create FullMatrix and fill it
+  FullMatrix<NumberType> full(size);
+
+  unsigned int count = 0;
+  for (unsigned int i = 0; i < size; ++i)
+    for (unsigned int j = 0; j < size; ++j, ++count)
+      full(i, j) = count;
+
+  // create 2d process grid
+  std::shared_ptr<Utilities::MPI::ProcessGrid> grid =
+    std::make_shared<Utilities::MPI::ProcessGrid>(
+      mpi_communicator, size, size, block_size_i, block_size_i);
+  ScaLAPACKMatrix<NumberType> scalapack_matrix(
+    size, size, grid, block_size_i, block_size_i);
+
+  pcout << "2D grid matrix: dim=" << scalapack_matrix.m() << "x"
+        << scalapack_matrix.n() << ";  blocks=" << block_size_i << "x"
+        << block_size_i << ";  grid=" << grid->get_process_grid_rows() << "x"
+        << grid->get_process_grid_columns() << std::endl
+        << std::endl;
+
+  scalapack_matrix = full;
+  LAPACKFullMatrix<NumberType> comparison;
+
+  const bool       process_is_in_grid = grid->is_process_active();
+  const int        entry = process_is_in_grid ? this_mpi_process : -1;
+  std::vector<int> valid_process_ranks =
+    Utilities::MPI::gather(mpi_communicator, entry, 0);
+  int random_rank = 0;
+
+  if (this_mpi_process == 0)
+    {
+      std::vector<int> valid_ranks;
+
+      for (unsigned int i = 0; i < valid_process_ranks.size(); ++i)
+        if (valid_process_ranks[i] >= 0)
+          valid_ranks.push_back(valid_process_ranks[i]);
+
+      const int entry = random_value<unsigned int>(0, valid_ranks.size() - 1);
+      random_rank     = valid_ranks[entry];
+    }
+  MPI_Bcast(&random_rank, 1, MPI_INT, 0, mpi_communicator);
+  const unsigned int rank = random_rank;
+
+  if (this_mpi_process == rank)
+    comparison.reinit(size);
+
+  scalapack_matrix.copy_to(comparison, rank);
+
+  if (this_mpi_process == rank)
+    {
+      NumberType diff = 0;
+      for (unsigned int i = 0; i < size; ++i)
+        for (unsigned int j = 0; j < size; ++j)
+          diff +=
+            (full(i, j) - comparison(i, j)) * (full(i, j) - comparison(i, j));
+      diff = sqrt(diff);
+
+      AssertThrow(diff < 1e-12, ExcInternalError());
+    }
+}
+
+
+
+int
+main(int argc, char **argv)
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(
+    argc, argv, numbers::invalid_unsigned_int);
+
+  const std::vector<unsigned int> blocks_i = {{16, 32, 64}};
+  const std::vector<unsigned int> blocks_j = {{16, 32, 64}};
+
+  for (const auto &s : blocks_i)
+    for (const auto &b : blocks_j)
+      test<float>(s, b);
+
+  for (const auto &s : blocks_i)
+    for (const auto &b : blocks_j)
+      test<double>(s, b);
+}
diff --git a/tests/scalapack/scalapack_17_b.mpirun=1.output b/tests/scalapack/scalapack_17_b.mpirun=1.output
new file mode 100644 (file)
index 0000000..b483cc9
--- /dev/null
@@ -0,0 +1,36 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+
diff --git a/tests/scalapack/scalapack_17_b.mpirun=11.output b/tests/scalapack/scalapack_17_b.mpirun=11.output
new file mode 100644 (file)
index 0000000..73f104f
--- /dev/null
@@ -0,0 +1,36 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
diff --git a/tests/scalapack/scalapack_17_b.mpirun=9.output b/tests/scalapack/scalapack_17_b.mpirun=9.output
new file mode 100644 (file)
index 0000000..73f104f
--- /dev/null
@@ -0,0 +1,36 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.