From: Denis Davydov Date: Mon, 20 Nov 2017 10:58:28 +0000 (+0100) Subject: various cleanups and fixes to ScaLAPACK wrappers X-Git-Tag: v9.0.0-rc1~701^2~4 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=afe471248cd58ad7720de7007eb734a13ded2527;p=dealii.git various cleanups and fixes to ScaLAPACK wrappers --- diff --git a/doc/news/changes/major/20171120DenisDavydov-BenjaminBrands b/doc/news/changes/major/20171120DenisDavydov-BenjaminBrands index e424c1da28..3ab2bbe30c 100644 --- a/doc/news/changes/major/20171120DenisDavydov-BenjaminBrands +++ b/doc/news/changes/major/20171120DenisDavydov-BenjaminBrands @@ -1,4 +1,4 @@ -New: add ScaLAPACKMatrix and ProcessGrid wrappers for high-performance dense linear +New: Add ScaLAPACKMatrix and ProcessGrid wrappers for high-performance dense linear algebra routines for parallel distributed memory machines.
(Denis Davydov, Benjamin Brands, 2017/11/20) diff --git a/doc/readme.html b/doc/readme.html index b66ae341e0..d5d208adbe 100644 --- a/doc/readme.html +++ b/doc/readme.html @@ -717,17 +717,18 @@
- scalapack + ScaLAPACK

scalapack is a library of high-performance linear algebra routines for parallel - distributed memory machines. ScaLAPACK solves dense and banded linear - systems, least squares problems, eigenvalue problems, and singular value - problems. In order to enable this feature, add - -DSCALAPACK_DIR=/path/to/scalapack to the argument list for - cmake. + distributed memory machines. ScaLAPACK solves dense and banded linear + systems, least squares problems, eigenvalue problems, and singular value + problems. In order to enable this feature, add + -DSCALAPACK_DIR=/path/to/scalapack to the argument list for + cmake. If ScaLAPACK does not have embedded BLACS, you might + need to pass -DBLACS_DIR=/path/to/blacs as well.

diff --git a/include/deal.II/lac/scalapack.h b/include/deal.II/lac/scalapack.h index d216e74932..4c3b0ebbab 100644 --- a/include/deal.II/lac/scalapack.h +++ b/include/deal.II/lac/scalapack.h @@ -31,15 +31,28 @@ DEAL_II_NAMESPACE_OPEN -/** - * Forward declaration of class ScaLAPACKMatrix - */ +// Forward declaration of class ScaLAPACKMatrix for ProcessGrid template class ScaLAPACKMatrix; /** - * A class taking care of setting up the process grid for BLACS - * ScaLapack matrices will have shared pointer of this class to perform block-cyclic distribution + * A class taking care of setting up a two-dimensional processor grid. + * For example an MPI communicator with 5 processes can be arranged into a + * 2x2 grid with 5-th processor being inactive: + * @code + * | 0 | 1 + * -----| ------- |----- + * 0 | P0 | P1 + * | | + * -----| ------- |----- + * 1 | P2 | P3 + * @endcode + * + * A shared pointer to this class is provided to ScaLAPACKMatrix matrices to + * perform block-cyclic distribution. + * + * Note that this class allows to setup a process grid which has fewer + * MPI cores than the total number of cores in the communicator. * * @author Benjamin Brands, 2017 */ @@ -52,99 +65,120 @@ public: */ template friend class ScaLAPACKMatrix; - /* - * Constructor for a process grid for a given @p mpi_communicator - * The pair @p grid_dimensions contains the user-defined numbers of process rows and columns + /** + * Constructor for a process grid for a given @p mpi_communicator . + * The pair @p grid_dimensions contains the user-defined numbers of process rows and columns. + * Their product should be less or equal to the total number of cores + * in the @p mpi_communicator. */ - ProcessGrid(MPI_Comm mpi_communicator, const std::pair &grid_dimensions); + ProcessGrid(MPI_Comm mpi_communicator, + const std::pair &grid_dimensions); - /* - * Constructor for a process grid for a given @p mpi_communicator - * The process grid is built based on the dimension and blockcyclic distribution of a target matrix described by @p matrix_dimensions and @p block_sizes + /** + * Constructor for a process grid for a given @p mpi_communicator. + * In this case the process grid is heuristically chosen based on the + * dimensions and block-cyclic distribution of a target matrix provided + * in @p matrix_dimensions and @p block_sizes. + * + * The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ + * are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of + * processes in the @p mpi_communicator. This function then creates a 2D processor grid + * assuming the ratio between number of process row $p$ and columns $q$ to be + * equal the ratio between matrix dimensions $M$ and $N$. */ ProcessGrid(MPI_Comm mpi_communicator, - const std::pair &matrix_dimensions, const std::pair &block_sizes); + const std::pair &matrix_dimensions, + const std::pair &block_sizes); /** - * Destructor + * Destructor. */ virtual ~ProcessGrid(); /** - * Return the number of rows in processes grid. + * Return the number of rows in the processes grid. */ - int get_process_grid_rows() const; + unsigned int get_process_grid_rows() const; /** - * Return the number of columns in processes grid. + * Return the number of columns in the processes grid. */ - int get_process_grid_columns() const; + unsigned int get_process_grid_columns() const; private: /** - * MPI communicator with all processes + * Send @p count values stored consequently starting at @p value from + * the process with rank zero to processes which + * are not in the process grid. + */ + template + void send_to_inactive(NumberType *value, const int count=1) const; + + /** + * An MPI communicator with all processes. */ MPI_Comm mpi_communicator; /** - * MPI communicator with inactive processes and the root + * An MPI communicator with inactive processes and the process with rank zero. */ MPI_Comm mpi_communicator_inactive_with_root; /** - * BLACS context + * BLACS context. This is equivalent to MPI communicators and is + * used by ScaLAPACK. */ int blacs_context; /** - * ID of this MPI process + * Rank of this MPI process. */ - int this_mpi_process; + const unsigned int this_mpi_process; /** - * Total number of MPI processes + * Total number of MPI processes. */ - int n_mpi_processes; + const unsigned int n_mpi_processes; /** - * Number of rows in processes grid + * Number of rows in the processes grid. */ int n_process_rows; /** - * Number of columns in processes grid + * Number of columns in the processes grid. */ int n_process_columns; /** - * Row of this process in the grid + * Row of this process in the grid. */ int this_process_row; /** - * Column of this process in the grid + * Column of this process in the grid. */ int this_process_column; /** - * A flag which is true for processes within the 2D process grid + * A flag which is true for processes within the 2D process grid. */ bool active; -protected: }; /** * A wrapper class around ScaLAPACK parallel dense linear algebra. * - * ScaLAPACK assumes that matrices to be distributed according to the - * block-cyclic decomposition scheme. A $M$ by $N$ matrix is first decomposed - * into $MB$ by $NB$ blocks which are then uniformly distributed across the 2D process grid - * $p*q <= Np$. + * ScaLAPACK assumes that matrices are distributed according to the + * block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed + * into $MB$ by $NB$ blocks which are then uniformly distributed across + * the 2D process grid $p*q \le Np$. * - * For example, global real symmetric matrix of order 9 is stored in upper storage mode with block sizes 4 × 4: + * For example, a global real symmetric matrix of order 9 is stored in + * upper storage mode with block sizes 4 × 4: * @code * 0 1 2 * ┌ ┐ @@ -186,10 +220,13 @@ protected: * | . . . . -4.0 | . . . -4.0 * @endcode * + * The choice of the block size is a compromise between a sufficiently large + * sizes for efficient local/serial BLAS, but one that is also small enough to achieve + * good parallel load balance. * - * Here is a strong scaling example of ScaLAPACKMatrix::invert() + * Below we show a strong scaling example of ScaLAPACKMatrix::invert() * on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets - * 2.20GHz, 10 cores/socket. Calculate are performed on square processor + * 2.20GHz, 10 cores/socket. Calculations are performed on square processor * grids 1x1, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, 9x9, 10x10. * * @image html scalapack_invert.png @@ -203,115 +240,104 @@ class ScaLAPACKMatrix : protected TransposeTable public: /** - * Declare type for container size. - */ + * Declare the type for container size. + */ typedef unsigned int size_type; /** - * Constructor for a rectangular matrix with @p rows and @p columns, distributed - * in a given @p mpi_communicator . - * - * The choice of the block size @p block_size is a compromise between a large - * enough sizes for efficient local BLAS and small enough to get - * good parallel load balance. - */ - ScaLAPACKMatrix(const size_type rows, const size_type columns, - std::shared_ptr process_grid, - const unsigned int block_size_row = 32, const unsigned int block_size_column = 32, - const LAPACKSupport::Property property = LAPACKSupport::Property::general); - - /* - * Alternative constructor having the matrix dimensions and block sizes in the std::pairs @p sizes and @p block_sizes + * Constructor for a rectangular matrix with rows and columns provided in + * @p sizes, and distributed using the grid @p process_grid. */ ScaLAPACKMatrix(const std::pair &sizes, - std::shared_ptr process_grid, + const std::shared_ptr process_grid, const std::pair &block_sizes = std::make_pair(32,32), const LAPACKSupport::Property property = LAPACKSupport::Property::general); /** - * Constructor for a square matrix of size @p size, distributed + * Constructor for a square matrix of size @p size, and distributed * using the process grid in @p process_grid. - * - * The choice of the block size @p block_size is a compromize between a large - * enough sizes for efficient local BLAS and small enough get - * good parallel load balance. */ ScaLAPACKMatrix(const size_type size, - std::shared_ptr process_grid, - const unsigned int block_size = 32); + const std::shared_ptr process_grid, + const size_type block_size = 32, + const LAPACKSupport::Property property = LAPACKSupport::Property::symmetric); /** * Destructor */ virtual ~ScaLAPACKMatrix(); + /** + * Assign @p property to this matrix. + */ + void set_property(const LAPACKSupport::Property property); + /** * Assignment operator from a regular FullMatrix. + * + * @note This function should only be used for relatively small matrix + * dimensions. It is primarily intended for debugging purposes. */ ScaLAPACKMatrix & operator = (const FullMatrix &); /** - * Copy the content of the distributed matrix into @p matrix. + * Copy the contents of the distributed matrix into @p matrix. + * + * @note This function should only be used for relatively small matrix + * dimensions. It is primarily intended for debugging purposes. */ void copy_to (FullMatrix &matrix) const; /** - * Compute the Cholesky factorization of the matrix using ScaLAPACK function pXpotrf. + * Compute the Cholesky factorization of the matrix using ScaLAPACK + * function pXpotrf. The result of factorization is stored in this object. */ void compute_cholesky_factorization (); /** * Invert the matrix by first computing Cholesky factorization and then - * building the actual inverse using pXpotri. + * building the actual inverse using pXpotri. The inverse is stored + * in this object. */ void invert(); /** - * Compute all eigenvalues of real symmetric matrix using pdsyev - * After successful computation the eigenvalues are stored in @p eigenvalues in ascending order + * Compute all eigenvalues of a real symmetric matrix using pXsyev. + * If successful, the computed @p eigenvalues are arranged in ascending order. */ void eigenvalues_symmetric (std::vector &eigenvalues); /** - * Compute all eigenpairs of real symmetric matrix using pdsyev - * After successful computation the eigenvalues are stored in @p eigenvalues in ascending order - * The eigenvectors are stored in the columns of the matrix, therefore overwriting the original content of the matrix + * Compute all eigenpairs of a real symmetric matrix using pXsyev. + * If successful, the computed @p eigenvalues are arranged in ascending order. + * The eigenvectors are stored in the columns of the matrix, thereby + * overwriting the original content of the matrix. */ void eigenpairs_symmetric (std::vector &eigenvalues); /** - * Return the number of rows in processes grid. - */ - int get_process_grid_rows() const; - - /** - * Return the number of columns in processes grid. - */ - int get_process_grid_columns() const; - - /** - * Estimate the the condition number of a SPD matrix in $l_1$-norm. - * The matrix has to be in Cholesky state. The reciprocal of the + * Estimate the the condition number of a SPD matrix in the $l_1$-norm. + * The matrix has to be in the Cholesky state (see compute_cholesky_factorization()). + * The reciprocal of the * condition number is returned in order to avoid the possibility of * overflow when the condition number is very large. * - * @p a_norm shall contain $l_1$-norm of the matrix prior to calling + * @p a_norm must contain the $l_1$-norm of the matrix prior to calling * Cholesky factorization. * - * @note an alternative is to compute the inverse of the matrix + * @note An alternative is to compute the inverse of the matrix * explicitly and manually constructor $k_1 = ||A||_1 ||A^{-1}||_1$. */ NumberType reciprocal_condition_number(const NumberType a_norm) const; /** - * Compute $l_1$-norm of the matrix. - * @return + * Compute the $l_1$-norm of the matrix. */ NumberType l1_norm() const; /** - * Compute $l_{\infty}$ norm of the matrix. + * Compute the $l_{\infty}$ norm of the matrix. */ NumberType linfty_norm() const; @@ -321,14 +347,16 @@ public: NumberType frobenius_norm() const; /** - * Number of rows of the matrix $mxn$. + * Number of rows of the $M \times N$ matrix. */ - int m() const; + size_type m() const; /** - * Number of columns of the matrix $m x n$. + * Number of columns of the $M \times N$ matrix. */ - int n() const; + size_type n() const; + +private: /** * Number of local rows on this MPI processes. @@ -341,12 +369,12 @@ public: int local_n() const; /** - * Return global row number for the given local row @p loc_row . + * Return the global row number for the given local row @p loc_row . */ int global_row(const int loc_row) const; /** - * Return global column number for the given local column @p loc_column. + * Return the global column number for the given local column @p loc_column. */ int global_column(const int loc_column) const; @@ -360,20 +388,12 @@ public: */ NumberType &local_el(const int loc_row, const int loc_column); -private: - /** - * Calculate norm of a distributed dense matrix using ScaLAPACK's + * Calculate the norm of a distributed dense matrix using ScaLAPACK's * internal function. */ NumberType norm(const char type) const; - /** - * Send @p value from process with rank zero to processes which - * are not in the process grid. - */ - void send_to_inactive(NumberType &value) const; - /** * Since ScaLAPACK operations notoriously change the meaning of the matrix * entries, we record the current state after the last operation here. @@ -381,66 +401,65 @@ private: LAPACKSupport::State state; /** - * Additional properties of the matrix which may help to select more + * Additional property of the matrix which may help to select more * efficient ScaLAPACK functions. */ - LAPACKSupport::Property properties; + LAPACKSupport::Property property; /** - * Shared pointer to ProcessGrid object which contains Blacs context and MPI communicator as well as other necessary data structures + * A shared pointer to a ProcessGrid object which contains a BLACS context + * and a MPI communicator, as well as other necessary data structures. */ - std::shared_ptr grid; + std::shared_ptr grid; /** - * Number of rows in the matrix + * Number of rows in the matrix. */ int n_rows; /** - * Number of columns in the matrix + * Number of columns in the matrix. */ int n_columns; /** - * Row block size + * Row block size. */ int row_block_size; /** - * Column block size + * Column block size. */ int column_block_size; /** - * Number of rows in the matrix owned by the current process + * Number of rows in the matrix owned by the current process. */ int n_local_rows; /** - * Number of columns in the matrix owned by the current process + * Number of columns in the matrix owned by the current process. */ int n_local_columns; /** - * Scalapack description vector. + * ScaLAPACK description vector. */ int descriptor[9]; /** - * Workspace array + * Workspace array. */ mutable std::vector work; /** - * Integer workspace array + * Integer workspace array. */ mutable std::vector iwork; /** * A character to define where elements are stored in case - * ScaLAPACK operation support this. - * - * FIXME: switch to enum UpperOrLower: LOWER UPPER + * ScaLAPACK operations support this. */ const char uplo; @@ -452,19 +471,19 @@ private: /** * The process column of the process grid over which the first column - * of the global matrix is distributed + * of the global matrix is distributed. */ const int first_process_column; /** - * Global indices where to start a submatrix. Equals to unity as we - * don't use submatrices. + * Global row index that determines where to start a submatrix. + * Currently this equals unity, as we don't use submatrices. */ const int submatrix_row; /** - * Global indices where to start a submatrix. Equals to unity as we - * don't use submatrices. + * Global column index that determines where to start a submatrix. + * Currently this equals unity, as we don't use submatrices. */ const int submatrix_column; diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index e4d26217e7..3f5dba3d18 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -35,7 +35,6 @@ // http://www.netlib.org/scalapack/slug/index.html // User guide // http://www.netlib.org/scalapack/slug/node135.html // How to Measure Errors -// FIXME: similar to lapack_templates.h move those to scalapack_template.h extern "C" { /* Basic Linear Algebra Communication Subprograms (BLACS) declarations */ @@ -295,7 +294,7 @@ namespace // therefore // Np = Pc * Pc / ratio // for quadratic matrices the ratio equals 1 - const double ratio = n/m; + const double ratio = double(n)/m; int Pc = std::sqrt(ratio * Np); // one could rounds up Pc to the number which has zero remainder from the division of Np @@ -329,9 +328,12 @@ namespace -ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, const std::pair &grid_dimensions) +ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, + const std::pair &grid_dimensions) : mpi_communicator(mpi_comm), + this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)), + n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)), n_process_rows(grid_dimensions.first), n_process_columns(grid_dimensions.second) { @@ -340,14 +342,11 @@ ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, const std::pair &grid_dimen Assert (grid_dimensions.second > 0, ExcMessage("Number of process grid columns has to be positive.")); - MPI_Comm_size(mpi_communicator, &n_mpi_processes); - MPI_Comm_rank(mpi_communicator, &this_mpi_process); - Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes, ExcMessage("Size of process grid is larger than number of available MPI processes.")); - // processor grid order - // FIXME: default to column major + // processor grid order. + // FIXME: default to column major? const bool column_major = false; // Initialize Cblas context from the provided communicator @@ -422,7 +421,8 @@ ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, const std::pair &grid_dimen ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, - const std::pair &matrix_dimensions, const std::pair &block_sizes) + const std::pair &matrix_dimensions, + const std::pair &block_sizes) : ProcessGrid(mpi_comm, choose_the_processor_grid(mpi_comm, matrix_dimensions.first, matrix_dimensions.second, @@ -441,36 +441,67 @@ ProcessGrid::~ProcessGrid() +unsigned int ProcessGrid::get_process_grid_rows() const +{ + return n_process_rows; +} + + + +unsigned int ProcessGrid::get_process_grid_columns() const +{ + return n_process_columns; +} + + + +template +void ProcessGrid::send_to_inactive(NumberType *value, const int count) const +{ + Assert (count>0, ExcInternalError()); + if (mpi_communicator_inactive_with_root != MPI_COMM_NULL) + { + const int ierr = + MPI_Bcast(value,count,MPI_DOUBLE, + 0/*from root*/, + mpi_communicator_inactive_with_root); + AssertThrowMPI(ierr); + } +} + + + + /** *Constructor for a rectangular distributed Matrix */ template -ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type rows, const size_type columns, - std::shared_ptr process_grid, - const unsigned int block_size_row, const unsigned int block_size_column, +ScaLAPACKMatrix::ScaLAPACKMatrix(const std::pair &sizes, + const std::shared_ptr process_grid, + const std::pair &block_sizes, const LAPACKSupport::Property property) : TransposeTable (), state (LAPACKSupport::unusable), - properties(property), + property(property), grid (process_grid), - n_rows(rows), - n_columns(columns), - row_block_size(block_size_row), - column_block_size(block_size_column), + n_rows(sizes.first), + n_columns(sizes.second), + row_block_size(block_sizes.first), + column_block_size(block_sizes.second), uplo('L'), // for non-symmetric matrices this is not needed first_process_row(0), first_process_column(0), submatrix_row(1), submatrix_column(1) { - Assert (block_size_row > 0, + Assert (row_block_size > 0, ExcMessage("Row block size has to be positive.")); - Assert (block_size_column > 0, + Assert (column_block_size > 0, ExcMessage("Column block size has to be positive.")); - Assert (block_size_row <= rows, + Assert (row_block_size <= n_rows, ExcMessage("Row block size can not be greater than the number of rows of the matrix")); - Assert (block_size_column <= columns, + Assert (column_block_size <= n_columns, ExcMessage("Column block size can not be greater than the number of columns of the matrix")); if (grid->active) @@ -501,23 +532,25 @@ ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type rows, const size_ty template -ScaLAPACKMatrix::ScaLAPACKMatrix(const std::pair &sizes, - std::shared_ptr process_grid, - const std::pair &block_sizes, +ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type size, + const std::shared_ptr process_grid, + const size_type block_size, const LAPACKSupport::Property property) : - ScaLAPACKMatrix(sizes.first,sizes.second,process_grid,block_sizes.first,block_sizes.first,property) + ScaLAPACKMatrix(std::make_pair(size,size), + process_grid, + std::make_pair(block_size,block_size), + property) {} template -ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type size, - std::shared_ptr process_grid, - const unsigned int block_size) - : - ScaLAPACKMatrix(size,size,process_grid,block_size,block_size,LAPACKSupport::Property::symmetric) -{} +void +ScaLAPACKMatrix::set_property(const LAPACKSupport::Property property_) +{ + property = property_; +} @@ -533,7 +566,6 @@ ScaLAPACKMatrix::operator = (const FullMatrix &matrix) Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m())); Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n())); - //if (active) if (grid->active) { for (int i=0; i < n_local_rows; ++i) @@ -546,8 +578,6 @@ ScaLAPACKMatrix::operator = (const FullMatrix &matrix) } } } - // FIXME: move it from operator = to copy_from() with a const bool symmetric flag. - properties = LAPACKSupport::symmetric; state = LAPACKSupport::matrix; return *this; } @@ -555,7 +585,7 @@ ScaLAPACKMatrix::operator = (const FullMatrix &matrix) template -int +unsigned int ScaLAPACKMatrix::m() const { return n_rows; @@ -564,7 +594,7 @@ ScaLAPACKMatrix::m() const template -int +unsigned int ScaLAPACKMatrix::n() const { return n_columns; @@ -644,11 +674,11 @@ ScaLAPACKMatrix::copy_to (FullMatrix &matrix) const // we could move the following lines under the main loop above, // but they would be dependent on glob_i and glob_j, which // won't make it much prettier - if (properties == LAPACKSupport::lower_triangular) + if (property == LAPACKSupport::lower_triangular) for (unsigned int i = 0; i < matrix.n(); ++i) for (unsigned int j = i+1; j < matrix.m(); ++j) matrix(i,j) = (state == LAPACKSupport::inverse_matrix ? matrix(j,i) : 0.); - else if (properties == LAPACKSupport::upper_triangular) + else if (property == LAPACKSupport::upper_triangular) for (unsigned int i = 0; i < matrix.n(); ++i) for (unsigned int j = 0; j < i; ++j) matrix(i,j) = (state == LAPACKSupport::inverse_matrix ? matrix(j,i) : 0.); @@ -676,7 +706,7 @@ void ScaLAPACKMatrix::compute_cholesky_factorization() pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info); AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdpotrf", info)); } - properties = (uplo=='L' ? LAPACKSupport::lower_triangular : LAPACKSupport::upper_triangular); + property = (uplo=='L' ? LAPACKSupport::lower_triangular : LAPACKSupport::upper_triangular); state = LAPACKSupport::cholesky; } @@ -692,7 +722,8 @@ void ScaLAPACKMatrix::invert() { int info = 0; /* - * matrix Z is not distributed as it will not be referenced by the ScaLapack function call + * matrix Z is not distributed as it will not be referenced by the + * ScaLapack function call */ std::vector Z_loc; NumberType *A_loc = &this->values[0]; @@ -709,7 +740,7 @@ void ScaLAPACKMatrix::eigenvalues_symmetric(std::vector { Assert (state == LAPACKSupport::matrix, ExcMessage("Matrix has to be in Matrix state before calling this function.")); - Assert (properties == LAPACKSupport::symmetric, + Assert (property == LAPACKSupport::symmetric, ExcMessage("Matrix has to be symmetric for this operation.")); ScaLAPACKMatrix Z (grid->n_mpi_processes, grid, 1); @@ -743,11 +774,12 @@ void ScaLAPACKMatrix::eigenvalues_symmetric(std::vector /* * send the eigenvalues to processors not being part of the process grid */ - MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, grid->mpi_communicator_inactive_with_root); + grid->send_to_inactive(ev.data(), ev.size()); - /* On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A, - * including the diagonal, is destroyed. Therefore, the matrix is unusable - */ + /* + * On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A, + * including the diagonal, is destroyed. Therefore, the matrix is unusable + */ state = LAPACKSupport::unusable; } @@ -758,17 +790,13 @@ void ScaLAPACKMatrix::eigenpairs_symmetric(std::vector & { Assert (state == LAPACKSupport::matrix, ExcMessage("Matrix has to be in Matrix state before calling this function.")); - Assert (properties == LAPACKSupport::symmetric, + Assert (property == LAPACKSupport::symmetric, ExcMessage("Matrix has to be symmetric for this operation.")); ScaLAPACKMatrix eigenvectors (n_rows, grid, row_block_size); - eigenvectors.properties = properties; + eigenvectors.property = property; ev.resize (n_rows); - const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(grid->mpi_communicator)); - - ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - if (grid->active) { int info = 0; @@ -781,7 +809,7 @@ void ScaLAPACKMatrix::eigenpairs_symmetric(std::vector & /* * by setting lwork to -1 a workspace query for optimal length of work is performed - */ + */ int lwork=-1; NumberType *eigenvectors_loc = &eigenvectors.values[0]; work.resize(1); @@ -797,19 +825,20 @@ void ScaLAPACKMatrix::eigenpairs_symmetric(std::vector & AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdsyev", info)); - //copy eigenvectors to original matrix - //as the temporary matrix eigenvectors has identical dimensions and block-cyclic distribution we simply swap the local array + // copy eigenvectors to original matrix + // as the temporary matrix eigenvectors has identical dimensions and + // block-cyclic distribution we simply swap the local array this->values.swap(eigenvectors.values); } /* * send the eigenvalues to processors not being part of the process grid */ - MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, grid->mpi_communicator_inactive_with_root); + grid->send_to_inactive(ev.data(), ev.size()); /* - * On exit matrix A stores the eigenvectors in the columns - */ - properties = LAPACKSupport::Property::general; + * On exit matrix A stores the eigenvectors in the columns + */ + property = LAPACKSupport::Property::general; state = LAPACKSupport::eigenvalues; } @@ -834,7 +863,7 @@ NumberType ScaLAPACKMatrix::reciprocal_condition_number(const Number &a_norm, &rcond, &work[0], &lwork, &iwork[0], &liwork, &info); AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdpocon", info)); } - send_to_inactive(rcond); + grid->send_to_inactive(&rcond); return rcond; } @@ -901,43 +930,14 @@ NumberType ScaLAPACKMatrix::norm(const char type) const const NumberType *A_loc = &this->values[0]; res = pdlansy_(&type, &uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor, &work[0]); } - send_to_inactive(res); + grid->send_to_inactive(&res); return res; } - - -template -void ScaLAPACKMatrix::send_to_inactive(NumberType &value) const -{ - if (grid->mpi_communicator_inactive_with_root != MPI_COMM_NULL) - { - MPI_Bcast(&value,1,MPI_DOUBLE, - 0/*from root*/, - grid->mpi_communicator_inactive_with_root); - } -} - - - -template -int ScaLAPACKMatrix::get_process_grid_rows() const -{ - return grid->n_process_rows; -} - - - -template -int ScaLAPACKMatrix::get_process_grid_columns() const -{ - return grid->n_process_columns; -} - - - // instantiations template class ScaLAPACKMatrix; +template void ProcessGrid::send_to_inactive(double *, const int) const; + DEAL_II_NAMESPACE_CLOSE diff --git a/tests/scalapack/scalapack_01.cc b/tests/scalapack/scalapack_01.cc index d6ca93fa61..6340eb10ac 100644 --- a/tests/scalapack/scalapack_01.cc +++ b/tests/scalapack/scalapack_01.cc @@ -23,9 +23,6 @@ #include #include -#include -#include - #include #include @@ -42,10 +39,6 @@ void test(const unsigned int size, const unsigned int block_size) ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - // test multiplication with random vectors - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - // Create SPD matrices of requested size: FullMatrix full_in(size), full_out(size), diff(size); @@ -53,11 +46,8 @@ void test(const unsigned int size, const unsigned int block_size) std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); ScaLAPACKMatrix scalapack_matrix (sizes.first, grid, block_sizes.first); - /*ScaLAPACKMatrix scalapack_matrix(size, - mpi_communicator, - block_size);*/ - pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl; + pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl; { unsigned int index = 0; @@ -92,41 +82,12 @@ void test(const unsigned int size, const unsigned int block_size) int main (int argc,char **argv) { + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); - try - { - Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); - - const std::vector sizes = {{64,120,320,640}}; - const std::vector blocks = {{32,64}}; - - for (const auto &s : sizes) - for (const auto &b : blocks) - test(s,b); + const std::vector sizes = {{64,120,320,640}}; + const std::vector blocks = {{32,64}}; - } - catch (std::exception &exc) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - - return 1; - } - catch (...) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - return 1; - }; + for (const auto &s : sizes) + for (const auto &b : blocks) + test(s,b); } diff --git a/tests/scalapack/scalapack_02.cc b/tests/scalapack/scalapack_02.cc index 2f5d2f2c7f..7286c98aa5 100644 --- a/tests/scalapack/scalapack_02.cc +++ b/tests/scalapack/scalapack_02.cc @@ -14,6 +14,7 @@ // --------------------------------------------------------------------- #include "../tests.h" +#include "../lapack/create_matrix.h" // test Cholesky decomposition in ScaLAPACK vs FullMatrix @@ -23,9 +24,6 @@ #include #include -#include -#include - #include #include @@ -43,45 +41,18 @@ void test(const unsigned int size, const unsigned int block_size) ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - // test multiplication with random vectors - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - // Create SPD matrices of requested size: FullMatrix full_in(size), inverse(size), full_out(size), diff(size); std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); - ScaLAPACKMatrix scalapack_matrix (sizes.first, grid, block_sizes.first); - /*ScaLAPACKMatrix scalapack_matrix(size, - mpi_communicator, - block_size);*/ - - pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl; - - { - full_in = 0.; - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - - for (unsigned int i = 0; i < size; ++i) - for (unsigned int j = i; j < size; ++j) - { - const double val = dist(gen); - Assert (val >= 0. && val <= 1., - ExcInternalError()); - if (i==j) - // since A(i,j) < 1 and - // a symmetric diagonally dominant matrix is SPD - full_in(i,j) = val + size; - else - { - full_in(i,j) = val; - full_in(j,i) = val; - } - } - } + ScaLAPACKMatrix scalapack_matrix (sizes.first, grid, block_sizes.first, + LAPACKSupport::Property::symmetric); + + pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl; + + create_spd (full_in); // invert via Lapack inverse.cholesky(full_in); @@ -116,42 +87,13 @@ void test(const unsigned int size, const unsigned int block_size) int main (int argc,char **argv) { + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); - try - { - Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); - - const std::vector sizes = {{32,64,120,320,640}}; - const std::vector blocks = {{32,64}}; + const std::vector sizes = {{32,64,120,320,640}}; + const std::vector blocks = {{32,64}}; - for (const auto &s : sizes) - for (const auto &b : blocks) - if (b <= s) - test(s,b); - - } - catch (std::exception &exc) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - - return 1; - } - catch (...) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - return 1; - }; + for (const auto &s : sizes) + for (const auto &b : blocks) + if (b <= s) + test(s,b); } diff --git a/tests/scalapack/scalapack_03.cc b/tests/scalapack/scalapack_03.cc index 0899dedd7e..695748cee8 100644 --- a/tests/scalapack/scalapack_03.cc +++ b/tests/scalapack/scalapack_03.cc @@ -14,6 +14,7 @@ // --------------------------------------------------------------------- #include "../tests.h" +#include "../lapack/create_matrix.h" /* * test inverse via Cholesky decomposition in ScaLAPACK vs FullMatrix @@ -32,9 +33,6 @@ #include #include -#include -#include - #include #include @@ -51,50 +49,22 @@ void test(const unsigned int size, const unsigned int block_size) ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - // test multiplication with random vectors - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - // Create SPD matrices of requested size: FullMatrix full_in(size), inverse(size), full_out(size), diff(size), prod1(size), prod2(size), one(size); std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); - ScaLAPACKMatrix scalapack_matrix (sizes.first, grid, block_sizes.first); - /*ScaLAPACKMatrix scalapack_matrix(size, - mpi_communicator, - block_size);*/ - - pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl; - - { - full_in = 0.; - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - - one = 0.; - for (unsigned int i=0; i= 0. && val <= 1., - ExcInternalError()); - if (i==j) - // since A(i,j) < 1 and - // a symmetric diagonally dominant matrix is SPD - full_in(i,j) = val + size; - else - { - full_in(i,j) = val; - full_in(j,i) = val; - } - } - } + ScaLAPACKMatrix scalapack_matrix (sizes.first, grid, block_sizes.first, + LAPACKSupport::Property::symmetric); + + pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl; + create_spd (full_in); + + one = 0.; + for (unsigned int i=0; i sizes = {{32,64,120,320,640}}; - const std::vector blocks = {{32,64}}; + const std::vector sizes = {{32,64,120,320,640}}; + const std::vector blocks = {{32,64}}; - for (const auto &s : sizes) - for (const auto &b : blocks) - if (b <= s) - test(s,b); - - } - catch (std::exception &exc) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - - return 1; - } - catch (...) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - return 1; - }; + for (const auto &s : sizes) + for (const auto &b : blocks) + if (b <= s) + test(s,b); } diff --git a/tests/scalapack/scalapack_04.cc b/tests/scalapack/scalapack_04.cc index 6921ddcc41..989736927f 100644 --- a/tests/scalapack/scalapack_04.cc +++ b/tests/scalapack/scalapack_04.cc @@ -14,6 +14,7 @@ // --------------------------------------------------------------------- #include "../tests.h" +#include "../lapack/create_matrix.h" // test norms of ScaLAPACK vs FullMatrix @@ -23,9 +24,6 @@ #include #include -#include -#include - #include #include @@ -42,46 +40,18 @@ void test(const unsigned int size, const unsigned int block_size) ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - // test multiplication with random vectors - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - // Create SPD matrices of requested size: FullMatrix full_A(size); std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); - ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first); - /*ScaLAPACKMatrix scalapack_A (size, - mpi_communicator, - block_size);*/ - - pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl; - - { - full_A = 0.; - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - - for (unsigned int i = 0; i < size; ++i) - for (unsigned int j = i; j < size; ++j) - { - const double val = dist(gen); - Assert (val >= 0. && val <= 1., - ExcInternalError()); - if (i==j) - // since A(i,j) < 1 and - // a symmetric diagonally dominant matrix is SPD - full_A(i,j) = val + size; - else - { - full_A(i,j) = val; - full_A(j,i) = val; - } - } - } + ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first, + LAPACKSupport::Property::symmetric); + + pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl; + create_spd (full_A); const double l1 = full_A.l1_norm(); const double linfty = full_A.linfty_norm(); @@ -112,41 +82,14 @@ void test(const unsigned int size, const unsigned int block_size) int main (int argc,char **argv) { - try - { - Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); - - const std::vector sizes = {{32,64,120,320,640}}; - const std::vector blocks = {{32,64}}; - - for (const auto &s : sizes) - for (const auto &b : blocks) - if (b <= s) - test(s,b); - - } - catch (std::exception &exc) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - - return 1; - } - catch (...) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - return 1; - }; + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); + + const std::vector sizes = {{32,64,120,320,640}}; + const std::vector blocks = {{32,64}}; + + for (const auto &s : sizes) + for (const auto &b : blocks) + if (b <= s) + test(s,b); + } diff --git a/tests/scalapack/scalapack_04.mpirun=1.output b/tests/scalapack/scalapack_04.mpirun=1.output index df22a69b32..6762220a22 100644 --- a/tests/scalapack/scalapack_04.mpirun=1.output +++ b/tests/scalapack/scalapack_04.mpirun=1.output @@ -1,36 +1,36 @@ 32 32 1 1 -51.7156 51.7156 51.7156 -51.7156 51.7156 51.7156 -184.51 184.51 184.51 +53.3549 53.3549 53.3549 +53.3549 53.3549 53.3549 +185.54 185.54 185.54 64 32 1 1 -100.342 100.342 100.342 -100.342 100.342 100.342 -516.965 516.965 516.965 +101.897 101.897 101.897 +101.897 101.897 101.897 +517.089 517.089 517.089 64 64 1 1 -100.342 100.342 100.342 -100.342 100.342 100.342 -516.965 516.965 516.965 +101.369 101.369 101.369 +101.369 101.369 101.369 +517.457 517.457 517.457 120 32 1 1 -187.403 187.403 187.403 -187.403 187.403 187.403 -1321.51 1321.51 1321.51 +186.726 186.726 186.726 +186.726 186.726 186.726 +1321.87 1321.87 1321.87 120 64 1 1 -187.403 187.403 187.403 -187.403 187.403 187.403 -1321.51 1321.51 1321.51 +189.512 189.512 189.512 +189.512 189.512 189.512 +1321.68 1321.68 1321.68 320 32 1 1 -494.259 494.259 494.259 -494.259 494.259 494.259 -5736.38 5736.38 5736.38 +494.329 494.329 494.329 +494.329 494.329 494.329 +5736.37 5736.37 5736.37 320 64 1 1 -494.259 494.259 494.259 -494.259 494.259 494.259 -5736.38 5736.38 5736.38 +498.342 498.342 498.342 +498.342 498.342 498.342 +5736.49 5736.49 5736.49 640 32 1 1 -981.366 981.366 981.366 -981.366 981.366 981.366 -16207.1 16207.1 16207.1 +980.609 980.609 980.609 +980.609 980.609 980.609 +16207.5 16207.5 16207.5 640 64 1 1 -981.366 981.366 981.366 -981.366 981.366 981.366 -16207.1 16207.1 16207.1 +982.483 982.483 982.483 +982.483 982.483 982.483 +16208.2 16208.2 16208.2 diff --git a/tests/scalapack/scalapack_04.mpirun=11.output b/tests/scalapack/scalapack_04.mpirun=11.output index 4c1b742dc8..b756d5263b 100644 --- a/tests/scalapack/scalapack_04.mpirun=11.output +++ b/tests/scalapack/scalapack_04.mpirun=11.output @@ -1,36 +1,36 @@ 32 32 1 1 -51.7156 51.7156 51.7156 -51.7156 51.7156 51.7156 -184.51 184.51 184.51 +53.3549 53.3549 53.3549 +53.3549 53.3549 53.3549 +185.54 185.54 185.54 64 32 2 2 -100.342 100.342 100.342 -100.342 100.342 100.342 -516.965 516.965 516.965 +101.897 101.897 101.897 +101.897 101.897 101.897 +517.089 517.089 517.089 64 64 1 1 -100.342 100.342 100.342 -100.342 100.342 100.342 -516.965 516.965 516.965 +101.369 101.369 101.369 +101.369 101.369 101.369 +517.457 517.457 517.457 120 32 3 3 -187.403 187.403 187.403 -187.403 187.403 187.403 -1321.51 1321.51 1321.51 +186.726 186.726 186.726 +186.726 186.726 186.726 +1321.87 1321.87 1321.87 120 64 2 2 -187.403 187.403 187.403 -187.403 187.403 187.403 -1321.51 1321.51 1321.51 +189.512 189.512 189.512 +189.512 189.512 189.512 +1321.68 1321.68 1321.68 320 32 3 3 -494.259 494.259 494.259 -494.259 494.259 494.259 -5736.38 5736.38 5736.38 +494.329 494.329 494.329 +494.329 494.329 494.329 +5736.37 5736.37 5736.37 320 64 3 3 -494.259 494.259 494.259 -494.259 494.259 494.259 -5736.38 5736.38 5736.38 +498.342 498.342 498.342 +498.342 498.342 498.342 +5736.49 5736.49 5736.49 640 32 3 3 -981.366 981.366 981.366 -981.366 981.366 981.366 -16207.1 16207.1 16207.1 +980.609 980.609 980.609 +980.609 980.609 980.609 +16207.5 16207.5 16207.5 640 64 3 3 -981.366 981.366 981.366 -981.366 981.366 981.366 -16207.1 16207.1 16207.1 +982.483 982.483 982.483 +982.483 982.483 982.483 +16208.2 16208.2 16208.2 diff --git a/tests/scalapack/scalapack_04.mpirun=4.output b/tests/scalapack/scalapack_04.mpirun=4.output index 265f66a96c..51c20dc584 100644 --- a/tests/scalapack/scalapack_04.mpirun=4.output +++ b/tests/scalapack/scalapack_04.mpirun=4.output @@ -1,36 +1,36 @@ 32 32 1 1 -51.7156 51.7156 51.7156 -51.7156 51.7156 51.7156 -184.51 184.51 184.51 +53.3549 53.3549 53.3549 +53.3549 53.3549 53.3549 +185.54 185.54 185.54 64 32 2 2 -100.342 100.342 100.342 -100.342 100.342 100.342 -516.965 516.965 516.965 +101.897 101.897 101.897 +101.897 101.897 101.897 +517.089 517.089 517.089 64 64 1 1 -100.342 100.342 100.342 -100.342 100.342 100.342 -516.965 516.965 516.965 +101.369 101.369 101.369 +101.369 101.369 101.369 +517.457 517.457 517.457 120 32 2 2 -187.403 187.403 187.403 -187.403 187.403 187.403 -1321.51 1321.51 1321.51 +186.726 186.726 186.726 +186.726 186.726 186.726 +1321.87 1321.87 1321.87 120 64 2 2 -187.403 187.403 187.403 -187.403 187.403 187.403 -1321.51 1321.51 1321.51 +189.512 189.512 189.512 +189.512 189.512 189.512 +1321.68 1321.68 1321.68 320 32 2 2 -494.259 494.259 494.259 -494.259 494.259 494.259 -5736.38 5736.38 5736.38 +494.329 494.329 494.329 +494.329 494.329 494.329 +5736.37 5736.37 5736.37 320 64 2 2 -494.259 494.259 494.259 -494.259 494.259 494.259 -5736.38 5736.38 5736.38 +498.342 498.342 498.342 +498.342 498.342 498.342 +5736.49 5736.49 5736.49 640 32 2 2 -981.366 981.366 981.366 -981.366 981.366 981.366 -16207.1 16207.1 16207.1 +980.609 980.609 980.609 +980.609 980.609 980.609 +16207.5 16207.5 16207.5 640 64 2 2 -981.366 981.366 981.366 -981.366 981.366 981.366 -16207.1 16207.1 16207.1 +982.483 982.483 982.483 +982.483 982.483 982.483 +16208.2 16208.2 16208.2 diff --git a/tests/scalapack/scalapack_05.cc b/tests/scalapack/scalapack_05.cc index a2c78105d8..b2eedad493 100644 --- a/tests/scalapack/scalapack_05.cc +++ b/tests/scalapack/scalapack_05.cc @@ -14,6 +14,7 @@ // --------------------------------------------------------------------- #include "../tests.h" +#include "../lapack/create_matrix.h" // test reciprocal_condition_number() @@ -23,9 +24,6 @@ #include #include -#include -#include - #include #include @@ -42,47 +40,18 @@ void test(const unsigned int size, const unsigned int block_size) ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - // test multiplication with random vectors - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - // Create SPD matrices of requested size: FullMatrix full_A(size), inv_A(size); std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); - ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first); - /*ScaLAPACKMatrix scalapack_A (size, - mpi_communicator, - block_size);*/ - - pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl; - - { - full_A = 0.; - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - - for (unsigned int i = 0; i < size; ++i) - for (unsigned int j = i; j < size; ++j) - { - const double val = dist(gen); - Assert (val >= 0. && val <= 1., - ExcInternalError()); - if (i==j) - // since A(i,j) < 1 and - // a symmetric diagonally dominant matrix is SPD - full_A(i,j) = val + size; - else - { - full_A(i,j) = val; - full_A(j,i) = val; - } - } - } + ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first, + LAPACKSupport::Property::symmetric); + pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl; + create_spd (full_A); inv_A.invert(full_A); const double l1 = full_A.l1_norm(); @@ -102,42 +71,14 @@ void test(const unsigned int size, const unsigned int block_size) int main (int argc,char **argv) { + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); + + const std::vector sizes = {{32,64,120,320,640}}; + const std::vector blocks = {{32,64}}; + + for (const auto &s : sizes) + for (const auto &b : blocks) + if (b <= s) + test(s,b); - try - { - Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); - - const std::vector sizes = {{32,64,120,320,640}}; - const std::vector blocks = {{32,64}}; - - for (const auto &s : sizes) - for (const auto &b : blocks) - if (b <= s) - test(s,b); - - } - catch (std::exception &exc) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - - return 1; - } - catch (...) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - return 1; - }; } diff --git a/tests/scalapack/scalapack_05.mpirun=1.output b/tests/scalapack/scalapack_05.mpirun=1.output index 38126ce456..cb7466c9e3 100644 --- a/tests/scalapack/scalapack_05.mpirun=1.output +++ b/tests/scalapack/scalapack_05.mpirun=1.output @@ -1,18 +1,18 @@ 32 32 1 1 -0.432045 0.476465 +0.410521 0.471758 64 32 1 1 -0.455047 0.48965 +0.44694 0.478752 64 64 1 1 -0.455047 0.48965 +0.444662 0.485917 120 32 1 1 -0.457288 0.491868 +0.459052 0.486953 120 64 1 1 -0.457288 0.491868 +0.449937 0.478099 320 32 1 1 -0.465325 0.489097 +0.466151 0.486539 320 64 1 1 -0.465325 0.489097 +0.460383 0.483094 640 32 1 1 -0.471717 0.48821 +0.472941 0.487731 640 64 1 1 -0.471717 0.48821 +0.471001 0.48693 diff --git a/tests/scalapack/scalapack_05.mpirun=11.output b/tests/scalapack/scalapack_05.mpirun=11.output index e70c3eec58..f9e7a0569f 100644 --- a/tests/scalapack/scalapack_05.mpirun=11.output +++ b/tests/scalapack/scalapack_05.mpirun=11.output @@ -1,18 +1,18 @@ 32 32 1 1 -0.432045 0.476465 +0.410521 0.471758 64 32 2 2 -0.455047 0.48965 +0.44694 0.478752 64 64 1 1 -0.455047 0.48965 +0.444662 0.485917 120 32 3 3 -0.457288 0.491868 +0.459052 0.486953 120 64 2 2 -0.457288 0.491868 +0.449937 0.478099 320 32 3 3 -0.465325 0.489097 +0.466151 0.486539 320 64 3 3 -0.465325 0.489097 +0.460383 0.483094 640 32 3 3 -0.471717 0.48821 +0.472941 0.487731 640 64 3 3 -0.471717 0.48821 +0.471001 0.48693 diff --git a/tests/scalapack/scalapack_05.mpirun=4.output b/tests/scalapack/scalapack_05.mpirun=4.output index 74c5a5145a..673831fda6 100644 --- a/tests/scalapack/scalapack_05.mpirun=4.output +++ b/tests/scalapack/scalapack_05.mpirun=4.output @@ -1,18 +1,18 @@ 32 32 1 1 -0.432045 0.476465 +0.410521 0.471758 64 32 2 2 -0.455047 0.48965 +0.44694 0.478752 64 64 1 1 -0.455047 0.48965 +0.444662 0.485917 120 32 2 2 -0.457288 0.491868 +0.459052 0.486953 120 64 2 2 -0.457288 0.491868 +0.449937 0.478099 320 32 2 2 -0.465325 0.489097 +0.466151 0.486539 320 64 2 2 -0.465325 0.489097 +0.460383 0.483094 640 32 2 2 -0.471717 0.48821 +0.472941 0.487731 640 64 2 2 -0.471717 0.48821 +0.471001 0.48693 diff --git a/tests/scalapack/scalapack_06.cc b/tests/scalapack/scalapack_06.cc index 352cfa80d4..14aac59319 100644 --- a/tests/scalapack/scalapack_06.cc +++ b/tests/scalapack/scalapack_06.cc @@ -14,8 +14,9 @@ // --------------------------------------------------------------------- #include "../tests.h" +#include "../lapack/create_matrix.h" -// test eigenvalues() +// test eigenvalues_symmetric() #include #include @@ -23,9 +24,6 @@ #include #include -#include -#include - #include #include @@ -50,10 +48,6 @@ void test(const unsigned int size, const unsigned int block_size) ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - // test multiplication with random vectors - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - // Create SPD matrices of requested size: FullMatrix full_A(size); std::vector lapack_A(size*size); @@ -61,36 +55,16 @@ void test(const unsigned int size, const unsigned int block_size) std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); - ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first); + ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first, + LAPACKSupport::Property::symmetric); + + pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl; + create_spd (full_A); + + for (unsigned int i = 0; i < size; ++i) + for (unsigned int j = 0; j < size; ++j) + lapack_A[i*size+j] = full_A(i,j); - pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl; - { - full_A = 0.; - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - - for (unsigned int i = 0; i < size; ++i) - for (unsigned int j = i; j < size; ++j) - { - const double val = dist(gen); - Assert (val >= 0. && val <= 1., - ExcInternalError()); - if (i==j) - { - // since A(i,j) < 1 and - // a symmetric diagonally dominant matrix is SPD - full_A(i,j) = val + size; - lapack_A[i*size+j] = val+size; - } - else - { - full_A(i,j) = val; - full_A(j,i) = val; - lapack_A[i*size+j] = val; - lapack_A[j*size+i] = val; - } - } - } std::vector eigenvalues_ScaLapack, eigenvalues_Lapack(size); //Lapack as reference { @@ -137,41 +111,14 @@ void test(const unsigned int size, const unsigned int block_size) int main (int argc,char **argv) { - try - { - Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); - - const std::vector sizes = {{32,64,120,320,640}}; - const std::vector blocks = {{32,64}}; - - for (const auto &s : sizes) - for (const auto &b : blocks) - if (b <= s) - test(s,b); - - } - catch (std::exception &exc) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - - return 1; - } - catch (...) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - return 1; - }; + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); + + const std::vector sizes = {{32,64,120,320,640}}; + const std::vector blocks = {{32,64}}; + + for (const auto &s : sizes) + for (const auto &b : blocks) + if (b <= s) + test(s,b); + } diff --git a/tests/scalapack/scalapack_06.mpirun=1.output b/tests/scalapack/scalapack_06.mpirun=1.output index 3269f0428f..b6faabf3eb 100644 --- a/tests/scalapack/scalapack_06.mpirun=1.output +++ b/tests/scalapack/scalapack_06.mpirun=1.output @@ -1,54 +1,54 @@ 32 32 1 1 First 5 ScaLapack eigenvalues -29.2445 29.432 29.5416 29.7204 29.8717 +29.4327 29.6255 29.7881 30.092 30.3062 First 5 Lapack eigenvalues -29.2445 29.432 29.5416 29.7204 29.8717 +29.4327 29.6255 29.7881 30.092 30.3062 64 32 1 1 First 5 ScaLapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.5716 59.9842 60.1172 60.3363 60.4051 First 5 Lapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.5716 59.9842 60.1172 60.3363 60.4051 64 64 1 1 First 5 ScaLapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.7161 59.8977 60.3065 60.5131 60.6934 First 5 Lapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.7161 59.8977 60.3065 60.5131 60.6934 120 32 1 1 First 5 ScaLapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +114.098 114.163 114.52 114.662 114.877 First 5 Lapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +114.098 114.163 114.52 114.662 114.877 120 64 1 1 First 5 ScaLapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +113.953 114.207 114.496 114.525 114.8 First 5 Lapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +113.953 114.207 114.496 114.525 114.8 320 32 1 1 First 5 ScaLapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.886 310.093 310.385 310.648 310.719 First 5 Lapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.886 310.093 310.385 310.648 310.719 320 64 1 1 First 5 ScaLapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.771 310.096 310.171 310.37 310.471 First 5 Lapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.771 310.096 310.171 310.37 310.471 640 32 1 1 First 5 ScaLapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.62 625.745 625.974 625.996 626.13 First 5 Lapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.62 625.745 625.974 625.996 626.13 640 64 1 1 First 5 ScaLapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.381 625.696 625.855 625.983 626.074 First 5 Lapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.381 625.696 625.855 625.983 626.074 diff --git a/tests/scalapack/scalapack_06.mpirun=11.output b/tests/scalapack/scalapack_06.mpirun=11.output new file mode 100644 index 0000000000..5addc81fe7 --- /dev/null +++ b/tests/scalapack/scalapack_06.mpirun=11.output @@ -0,0 +1,54 @@ +32 32 1 1 +First 5 ScaLapack eigenvalues +29.4327 29.6255 29.7881 30.092 30.3062 +First 5 Lapack eigenvalues +29.4327 29.6255 29.7881 30.092 30.3062 + +64 32 2 2 +First 5 ScaLapack eigenvalues +59.5716 59.9842 60.1172 60.3363 60.4051 +First 5 Lapack eigenvalues +59.5716 59.9842 60.1172 60.3363 60.4051 + +64 64 1 1 +First 5 ScaLapack eigenvalues +59.7161 59.8977 60.3065 60.5131 60.6934 +First 5 Lapack eigenvalues +59.7161 59.8977 60.3065 60.5131 60.6934 + +120 32 3 3 +First 5 ScaLapack eigenvalues +114.098 114.163 114.52 114.662 114.877 +First 5 Lapack eigenvalues +114.098 114.163 114.52 114.662 114.877 + +120 64 2 2 +First 5 ScaLapack eigenvalues +113.953 114.207 114.496 114.525 114.8 +First 5 Lapack eigenvalues +113.953 114.207 114.496 114.525 114.8 + +320 32 3 3 +First 5 ScaLapack eigenvalues +309.886 310.093 310.385 310.648 310.719 +First 5 Lapack eigenvalues +309.886 310.093 310.385 310.648 310.719 + +320 64 3 3 +First 5 ScaLapack eigenvalues +309.771 310.096 310.171 310.37 310.471 +First 5 Lapack eigenvalues +309.771 310.096 310.171 310.37 310.471 + +640 32 3 3 +First 5 ScaLapack eigenvalues +625.62 625.745 625.974 625.996 626.13 +First 5 Lapack eigenvalues +625.62 625.745 625.974 625.996 626.13 + +640 64 3 3 +First 5 ScaLapack eigenvalues +625.381 625.696 625.855 625.983 626.074 +First 5 Lapack eigenvalues +625.381 625.696 625.855 625.983 626.074 + diff --git a/tests/scalapack/scalapack_06.mpirun=4.output b/tests/scalapack/scalapack_06.mpirun=4.output index ff89f7ffdf..fba5ffd4ad 100644 --- a/tests/scalapack/scalapack_06.mpirun=4.output +++ b/tests/scalapack/scalapack_06.mpirun=4.output @@ -1,54 +1,54 @@ 32 32 1 1 First 5 ScaLapack eigenvalues -29.2445 29.432 29.5416 29.7204 29.8717 +29.4327 29.6255 29.7881 30.092 30.3062 First 5 Lapack eigenvalues -29.2445 29.432 29.5416 29.7204 29.8717 +29.4327 29.6255 29.7881 30.092 30.3062 64 32 2 2 First 5 ScaLapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.5716 59.9842 60.1172 60.3363 60.4051 First 5 Lapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.5716 59.9842 60.1172 60.3363 60.4051 64 64 1 1 First 5 ScaLapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.7161 59.8977 60.3065 60.5131 60.6934 First 5 Lapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.7161 59.8977 60.3065 60.5131 60.6934 120 32 2 2 First 5 ScaLapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +114.098 114.163 114.52 114.662 114.877 First 5 Lapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +114.098 114.163 114.52 114.662 114.877 120 64 2 2 First 5 ScaLapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +113.953 114.207 114.496 114.525 114.8 First 5 Lapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +113.953 114.207 114.496 114.525 114.8 320 32 2 2 First 5 ScaLapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.886 310.093 310.385 310.648 310.719 First 5 Lapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.886 310.093 310.385 310.648 310.719 320 64 2 2 First 5 ScaLapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.771 310.096 310.171 310.37 310.471 First 5 Lapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.771 310.096 310.171 310.37 310.471 640 32 2 2 First 5 ScaLapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.62 625.745 625.974 625.996 626.13 First 5 Lapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.62 625.745 625.974 625.996 626.13 640 64 2 2 First 5 ScaLapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.381 625.696 625.855 625.983 626.074 First 5 Lapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.381 625.696 625.855 625.983 626.074 diff --git a/tests/scalapack/scalapack_07.cc b/tests/scalapack/scalapack_07.cc index a40fd9d41e..3a17a1a487 100644 --- a/tests/scalapack/scalapack_07.cc +++ b/tests/scalapack/scalapack_07.cc @@ -14,8 +14,9 @@ // --------------------------------------------------------------------- #include "../tests.h" +#include "../lapack/create_matrix.h" -// test eigenvalues() +// test eigenpairs_symmetric() and calls set_property() #include #include @@ -23,9 +24,6 @@ #include #include -#include -#include - #include #include @@ -51,10 +49,6 @@ void test(const unsigned int size, const unsigned int block_size) ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - // test multiplication with random vectors - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; - // Create SPD matrices of requested size: FullMatrix full_A(size); std::vector lapack_A(size*size); @@ -63,35 +57,15 @@ void test(const unsigned int size, const unsigned int block_size) std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first); + scalapack_A.set_property(LAPACKSupport::Property::symmetric); - pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl; - { - full_A = 0.; - boost::random::mt19937 gen; - boost::random::uniform_01<> dist; + pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl; + + create_spd (full_A); + for (unsigned int i = 0; i < size; ++i) + for (unsigned int j = 0; j < size; ++j) + lapack_A[i*size+j] = full_A(i,j); - for (unsigned int i = 0; i < size; ++i) - for (unsigned int j = i; j < size; ++j) - { - const double val = dist(gen); - Assert (val >= 0. && val <= 1., - ExcInternalError()); - if (i==j) - { - // since A(i,j) < 1 and - // a symmetric diagonally dominant matrix is SPD - full_A(i,j) = val + size; - lapack_A[i*size+j] = val+size; - } - else - { - full_A(i,j) = val; - full_A(j,i) = val; - lapack_A[i*size+j] = val; - lapack_A[j*size+i] = val; - } - } - } std::vector eigenvalues_ScaLapack, eigenvalues_Lapack(size); //Lapack as reference { @@ -134,7 +108,6 @@ void test(const unsigned int size, const unsigned int block_size) for (unsigned int i=0; i s_eigenvectors (size,size); for (int i=0; i sizes = {{32,64,120,320,640}}; - const std::vector blocks = {{32,64}}; + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); - for (const auto &s : sizes) - for (const auto &b : blocks) - if (b <= s) - test(s,b); + const std::vector sizes = {{32,64,120,320,640}}; + const std::vector blocks = {{32,64}}; - } - catch (std::exception &exc) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Exception on processing: " << std::endl - << exc.what() << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - - return 1; - } - catch (...) - { - std::cerr << std::endl << std::endl - << "----------------------------------------------------" - << std::endl; - std::cerr << "Unknown exception!" << std::endl - << "Aborting!" << std::endl - << "----------------------------------------------------" - << std::endl; - return 1; - }; + for (const auto &s : sizes) + for (const auto &b : blocks) + if (b <= s) + test(s,b); } diff --git a/tests/scalapack/scalapack_07.mpirun=1.output b/tests/scalapack/scalapack_07.mpirun=1.output index 3269f0428f..b6faabf3eb 100644 --- a/tests/scalapack/scalapack_07.mpirun=1.output +++ b/tests/scalapack/scalapack_07.mpirun=1.output @@ -1,54 +1,54 @@ 32 32 1 1 First 5 ScaLapack eigenvalues -29.2445 29.432 29.5416 29.7204 29.8717 +29.4327 29.6255 29.7881 30.092 30.3062 First 5 Lapack eigenvalues -29.2445 29.432 29.5416 29.7204 29.8717 +29.4327 29.6255 29.7881 30.092 30.3062 64 32 1 1 First 5 ScaLapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.5716 59.9842 60.1172 60.3363 60.4051 First 5 Lapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.5716 59.9842 60.1172 60.3363 60.4051 64 64 1 1 First 5 ScaLapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.7161 59.8977 60.3065 60.5131 60.6934 First 5 Lapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.7161 59.8977 60.3065 60.5131 60.6934 120 32 1 1 First 5 ScaLapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +114.098 114.163 114.52 114.662 114.877 First 5 Lapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +114.098 114.163 114.52 114.662 114.877 120 64 1 1 First 5 ScaLapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +113.953 114.207 114.496 114.525 114.8 First 5 Lapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +113.953 114.207 114.496 114.525 114.8 320 32 1 1 First 5 ScaLapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.886 310.093 310.385 310.648 310.719 First 5 Lapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.886 310.093 310.385 310.648 310.719 320 64 1 1 First 5 ScaLapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.771 310.096 310.171 310.37 310.471 First 5 Lapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.771 310.096 310.171 310.37 310.471 640 32 1 1 First 5 ScaLapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.62 625.745 625.974 625.996 626.13 First 5 Lapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.62 625.745 625.974 625.996 626.13 640 64 1 1 First 5 ScaLapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.381 625.696 625.855 625.983 626.074 First 5 Lapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.381 625.696 625.855 625.983 626.074 diff --git a/tests/scalapack/scalapack_07.mpirun=11.output b/tests/scalapack/scalapack_07.mpirun=11.output new file mode 100644 index 0000000000..5addc81fe7 --- /dev/null +++ b/tests/scalapack/scalapack_07.mpirun=11.output @@ -0,0 +1,54 @@ +32 32 1 1 +First 5 ScaLapack eigenvalues +29.4327 29.6255 29.7881 30.092 30.3062 +First 5 Lapack eigenvalues +29.4327 29.6255 29.7881 30.092 30.3062 + +64 32 2 2 +First 5 ScaLapack eigenvalues +59.5716 59.9842 60.1172 60.3363 60.4051 +First 5 Lapack eigenvalues +59.5716 59.9842 60.1172 60.3363 60.4051 + +64 64 1 1 +First 5 ScaLapack eigenvalues +59.7161 59.8977 60.3065 60.5131 60.6934 +First 5 Lapack eigenvalues +59.7161 59.8977 60.3065 60.5131 60.6934 + +120 32 3 3 +First 5 ScaLapack eigenvalues +114.098 114.163 114.52 114.662 114.877 +First 5 Lapack eigenvalues +114.098 114.163 114.52 114.662 114.877 + +120 64 2 2 +First 5 ScaLapack eigenvalues +113.953 114.207 114.496 114.525 114.8 +First 5 Lapack eigenvalues +113.953 114.207 114.496 114.525 114.8 + +320 32 3 3 +First 5 ScaLapack eigenvalues +309.886 310.093 310.385 310.648 310.719 +First 5 Lapack eigenvalues +309.886 310.093 310.385 310.648 310.719 + +320 64 3 3 +First 5 ScaLapack eigenvalues +309.771 310.096 310.171 310.37 310.471 +First 5 Lapack eigenvalues +309.771 310.096 310.171 310.37 310.471 + +640 32 3 3 +First 5 ScaLapack eigenvalues +625.62 625.745 625.974 625.996 626.13 +First 5 Lapack eigenvalues +625.62 625.745 625.974 625.996 626.13 + +640 64 3 3 +First 5 ScaLapack eigenvalues +625.381 625.696 625.855 625.983 626.074 +First 5 Lapack eigenvalues +625.381 625.696 625.855 625.983 626.074 + diff --git a/tests/scalapack/scalapack_07.mpirun=4.output b/tests/scalapack/scalapack_07.mpirun=4.output index ff89f7ffdf..fba5ffd4ad 100644 --- a/tests/scalapack/scalapack_07.mpirun=4.output +++ b/tests/scalapack/scalapack_07.mpirun=4.output @@ -1,54 +1,54 @@ 32 32 1 1 First 5 ScaLapack eigenvalues -29.2445 29.432 29.5416 29.7204 29.8717 +29.4327 29.6255 29.7881 30.092 30.3062 First 5 Lapack eigenvalues -29.2445 29.432 29.5416 29.7204 29.8717 +29.4327 29.6255 29.7881 30.092 30.3062 64 32 2 2 First 5 ScaLapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.5716 59.9842 60.1172 60.3363 60.4051 First 5 Lapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.5716 59.9842 60.1172 60.3363 60.4051 64 64 1 1 First 5 ScaLapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.7161 59.8977 60.3065 60.5131 60.6934 First 5 Lapack eigenvalues -59.9263 60.0671 60.1596 60.3945 60.5197 +59.7161 59.8977 60.3065 60.5131 60.6934 120 32 2 2 First 5 ScaLapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +114.098 114.163 114.52 114.662 114.877 First 5 Lapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +114.098 114.163 114.52 114.662 114.877 120 64 2 2 First 5 ScaLapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +113.953 114.207 114.496 114.525 114.8 First 5 Lapack eigenvalues -113.975 114.221 114.571 114.636 114.811 +113.953 114.207 114.496 114.525 114.8 320 32 2 2 First 5 ScaLapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.886 310.093 310.385 310.648 310.719 First 5 Lapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.886 310.093 310.385 310.648 310.719 320 64 2 2 First 5 ScaLapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.771 310.096 310.171 310.37 310.471 First 5 Lapack eigenvalues -309.946 310.139 310.275 310.403 310.451 +309.771 310.096 310.171 310.37 310.471 640 32 2 2 First 5 ScaLapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.62 625.745 625.974 625.996 626.13 First 5 Lapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.62 625.745 625.974 625.996 626.13 640 64 2 2 First 5 ScaLapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.381 625.696 625.855 625.983 626.074 First 5 Lapack eigenvalues -625.685 625.75 625.877 626.063 626.217 +625.381 625.696 625.855 625.983 626.074