From d095e9054215db8a84df6604529859e32685a780 Mon Sep 17 00:00:00 2001 From: Benjamin Brands Date: Wed, 15 Nov 2017 09:52:47 +0100 Subject: [PATCH] added routine to compute all eigenvalues for symmetric matrices --- include/deal.II/lac/scalapack.h | 25 +- source/lac/scalapack.cc | 277 +++++++++++++++++-- tests/scalapack/scalapack_06.cc | 177 ++++++++++++ tests/scalapack/scalapack_06.mpirun=1.output | 54 ++++ tests/scalapack/scalapack_06.mpirun=4.output | 54 ++++ 5 files changed, 566 insertions(+), 21 deletions(-) create mode 100644 tests/scalapack/scalapack_06.cc create mode 100644 tests/scalapack/scalapack_06.mpirun=1.output create mode 100644 tests/scalapack/scalapack_06.mpirun=4.output diff --git a/include/deal.II/lac/scalapack.h b/include/deal.II/lac/scalapack.h index 837dbab811..ba91d78b8c 100644 --- a/include/deal.II/lac/scalapack.h +++ b/include/deal.II/lac/scalapack.h @@ -99,6 +99,19 @@ public: */ typedef unsigned int size_type; + /** + * Constructor for a rectangular matrix with @p rows and @p columns, distributed + * in a given @p mpi_communicator . + * + * The choice of the block size @p block_size is a compromize between a large + * enough sizes for efficient local BLAS and small enough get + * good parallel load balance. + */ + ScaLAPACKMatrix(const size_type rows, const size_type columns, + MPI_Comm mpi_communicator, + const unsigned int block_size_row = 32, const unsigned int block_size_column = 32, + const LAPACKSupport::Property property = LAPACKSupport::Property::general); + /** * Constructor for a square matrix of size @p size, distributed * in a given @p mpi_communicator . @@ -138,6 +151,16 @@ public: */ void invert(); + /** + * Compute all eigenvalues of real symmetric matrix using pdsyev + */ + void eigenvalues_symmetric (std::vector &eigenvalues); + + /** + * Compute all eigenpairs of real symmetric matrix using pdsyev + */ + void eigenpairs_symmetric (std::vector &eigenvalues); + /** * Return the number of rows in processes grid. */ @@ -242,7 +265,7 @@ private: * Additional properties of the matrix which may help to select more * efficient ScaLAPACK functions. */ - LAPACKSupport::Properties properties; + LAPACKSupport::Property properties; /** * MPI communicator with all processes diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index 2c8a0dbb16..9208dd7c54 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -13,9 +13,12 @@ // // --------------------------------------------------------------------- + #include #include +#include + // useful examples: // https://stackoverflow.com/questions/14147705/cholesky-decomposition-scalapack-error/14203864 // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=139 // second post by Julien Langou @@ -34,17 +37,65 @@ extern "C" { /* Basic Linear Algebra Communication Subprograms (BLACS) declarations */ // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dinitb.htm#dinitb + + /* You call the BLACS_PINFO routine when you want to determine how many processes are available. + * You can use this information as input into other BLACS routines that set up your process grid*/ void Cblacs_pinfo(int *, int *); + + /* + * You call the BLACS_GET routine when you want the values the BLACS are using for internal defaults. + * The most common use is in retrieving a default system context for input into BLACS_GRIDINIT or BLACS_GRIDMAP. + */ void Cblacs_get(int icontxt, int what, int *val); + + /* + * You call the BLACS_GRIDINIT routine when you want to map the processes sequentially in row-major order + * or column-major order into the process grid. + * You must specify the same input argument values in the calls to BLACS_GRIDINIT on every process. + */ void Cblacs_gridinit(int *context, const char *order, int grid_height, int grid_width); + + /* + * You call the BLACS_GRIDINFO routine to obtain the process row and column index. + */ void Cblacs_gridinfo(int context, int *grid_height, int *grid_width, int *grid_row, int *grid_col); + + /* + * Given the system process number, returns the row and column coordinates in the BLACS' process grid. + */ void Cblacs_pcoord(int, int, int *, int *); + + /* + * You call the BLACS_GRIDEXIT routine to release a BLACS context. + */ void Cblacs_gridexit(int context); + + /* + * This routines holds up execution of all processes within the indicated scope until they have all called the routine. + */ void Cblacs_barrier(int, const char *); + + /* + * Frees all BLACS contexts and releases all allocated memory. + */ void Cblacs_exit(int error_code); + + /* + * This routine takes the indicated general rectangular matrix and sends it to the destination process in the process grid. + * Return from the routine indicates that the buffer may be reused. The routine is locally-blocking, that is, + * it will return even if the corresponding receive is not posted. + */ void Cdgerv2d(int, int, int, double *, int, int, int); + + /* + * This routine receives a message from a process into a general rectangular matrix. + * This routine is globally-blocking, that is, return from the routine indicates that the message has been received into the matrix. + */ void Cdgesd2d(int, int, int, double *, int, int, int); + /* + * + */ int Csys2blacs_handle(MPI_Comm comm); /** @@ -126,6 +177,9 @@ extern "C" */ int iceil_(const int *i1, const int *i2); + /* + * DESCINIT initializes the descriptor vector with the 8 input arguments + */ void descinit_ (int *desc, const int *m, const int *n, const int *mb, const int *nb, const int *irsrc, const int *icsrc, const int *ictxt, const int *lld, int *info); /** @@ -155,13 +209,34 @@ extern "C" const double *beta, double *C, const int *IC, const int *JC, const int *DESCC); + /* + * PDLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm, + * or the element of largest absolute value of a distributed matrix + */ double pdlange_(char const *norm, int const &m, int const &n, double *A, int const &ia, int const &ja, int *desca, double *work); + /* + * INDXG2P computes the process coordinate which posseses the entry of a + * distributed matrix specified by a global index + */ int indxg2p_(const int *glob, const int *nb, const int *iproc, const int *isproc, const int *nprocs); + /* + * The pdsyev routine computes all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A + * The by calling the recommended sequence of ScaLAPACK routines. In its present form, the routine assumes a homogeneous system + * and makes no checks for consistency of the eigenvalues or eigenvectors across the different processes. + * Because of this, it is possible that a heterogeneous system may return incorrect results without any error messages. + * + * http://www.netlib.org/scalapack/explore-html/d0/d1a/pdsyev_8f.html + * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lsyev.htm#lsyev + */ + void pdsyev_(const char *jobz, const char *uplo, const int *m, double *A, const int *ia, const int *ja, int *desca, double *w, + double *z, const int *iz, const int *jz, int *descz, double *work, const int *lwork, int *info); + + } DEAL_II_NAMESPACE_OPEN @@ -179,7 +254,8 @@ namespace * https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91 */ inline - void choose_the_processor_grid(int &n_process_rows, int &n_process_columns, const unsigned int size, const unsigned int block_size, const int n_processes) + void choose_the_processor_grid(int &n_process_rows, int &n_process_columns, const unsigned int m, const unsigned int n, + const unsigned int block_size_m, const unsigned int block_size_n, const int n_processes) { // Few notes from the ScaLAPACK user guide: // It is possible to predict the best grid shape given the number of processes available: @@ -192,17 +268,18 @@ namespace // Below we always try to create 2D processor grids: - // Get the total number of cores we can occupy in a square dense matrix - // with square blocks when every core owns only a single block: - const int n_processes_heuristic = int(std::ceil((1.*size)/block_size))* - int(std::ceil((1.*size)/block_size)); + // Get the total number of cores we can occupy in a rectangular dense matrix + // with rectangular blocks when every core owns only a single block: + const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))* + int(std::ceil((1.*n)/block_size_n)); const int Np = std::min(n_processes_heuristic, n_processes); // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio // Pc =: ratio * Pr // therefore // Np = Pc * Pc / ratio - const double ratio = 1.0; + // for quadratic matrices the ratio equals 1 + const double ratio = n/m; int Pc = std::sqrt(ratio * Np); // one could rounds up Pc to the number which has zero remainder from the division of Np @@ -234,35 +311,42 @@ namespace +/** + *Constructor for a rectangular distributes Matrix + */ template -ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type size, +ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type rows, const size_type columns, MPI_Comm mpi_communicator, - const unsigned int block_size) + const unsigned int block_size_row, const unsigned int block_size_column, + const LAPACKSupport::Property property) : TransposeTable (), state (LAPACKSupport::unusable), - properties(LAPACKSupport::general), + properties(property), mpi_communicator(mpi_communicator), - n_rows(size), - n_columns(size), - row_block_size(block_size), - column_block_size(block_size), - uplo('L'), + n_rows(rows), + n_columns(columns), + row_block_size(block_size_row), + column_block_size(block_size_column), + uplo('L'), // for non-symmetric matrices this is not needed first_process_row(0), first_process_column(0), submatrix_row(1), submatrix_column(1) { - // NOTE: routines that exploit symmetry (Cholesky pdpotrf), require row_block_size = column_block_size - Assert (block_size > 0, - ExcMessage("Block size has to be positive.")); - Assert (block_size <= size, - ExcMessage("BLock size can not be greater than the size of the matrix")); + Assert (block_size_row > 0, + ExcMessage("Row block size has to be positive.")); + Assert (block_size_column > 0, + ExcMessage("Column block size has to be positive.")); + Assert (block_size_row <= rows, + ExcMessage("Row block size can not be greater than the number of rows of the matrix")); + Assert (block_size_column <= columns, + ExcMessage("Column block size can not be greater than the number of columns of the matrix")); MPI_Comm_size(mpi_communicator, &n_mpi_processes); MPI_Comm_rank(mpi_communicator, &this_mpi_process); - choose_the_processor_grid(n_process_rows, n_process_columns, size, block_size, n_mpi_processes); + choose_the_processor_grid(n_process_rows, n_process_columns, n_rows, n_columns, row_block_size, column_block_size, n_mpi_processes); // processor grid order // FIXME: default to column major @@ -366,6 +450,17 @@ ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type size, +template +ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type size, + MPI_Comm mpi_communicator, + const unsigned int block_size) + : + //delegating constructors only work for C++11 + ScaLAPACKMatrix (size,size,mpi_communicator,block_size,block_size,LAPACKSupport::Property::symmetric) +{} + + + template ScaLAPACKMatrix & ScaLAPACKMatrix::operator = (const FullMatrix &matrix) @@ -538,6 +633,10 @@ void ScaLAPACKMatrix::invert() if (active) { int info = 0; + /* + * matrix Z is not distributed as it will not be referenced by the ScaLapack function call + */ + std::vector Z_loc; NumberType *A_loc = &this->values[0]; pdpotri_ (&uplo,&n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,&info); AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdpotri", info)); @@ -547,6 +646,144 @@ void ScaLAPACKMatrix::invert() +template +void ScaLAPACKMatrix::eigenvalues_symmetric(std::vector &ev) +{ + Assert (state == LAPACKSupport::matrix, + ExcMessage("Matrix has to be in Matrix state before calling this function.")); + Assert (properties == LAPACKSupport::symmetric, + ExcMessage("Matrix has to be symmetric for this operation.")); + + ScaLAPACKMatrix Z (n_mpi_processes, mpi_communicator, 1); + ev.resize (n_rows); + + if (active) + { + int info = 0; + + char jobz = 'N'; + NumberType *A_loc = &this->values[0]; + + /* + * by setting lwork to -1 a workspace query for optimal length of work is performed + */ + int lwork=-1; + NumberType *Z_loc = &Z.values[0]; + work.resize(1); + + pdsyev_(&jobz, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor, &ev[0], + Z_loc, &Z.submatrix_row, &Z.submatrix_column, Z.descriptor, &work[0], &lwork, &info); + + lwork=work[0]; + work.resize (lwork); + + pdsyev_(&jobz, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor, &ev[0], + Z_loc, &Z.submatrix_row, &Z.submatrix_column, Z.descriptor, &work[0], &lwork, &info); + + AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdsyev", info)); + + //Scalapack puts eigenvalues in ascending order --> reversing to obtain descending order + std::reverse (ev.begin(),ev.end()); + } + /* + * send the eigenvalues to processors not being part of the process grid + */ + MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, mpi_communicator_inactive_with_root); + + /* On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A, + * including the diagonal, is destroyed. Therefore, the matrix is unusable + */ + state = LAPACKSupport::unusable; +} + + + +template +void ScaLAPACKMatrix::eigenpairs_symmetric(std::vector &ev) +{ + Assert (state == LAPACKSupport::matrix, + ExcMessage("Matrix has to be in Matrix state before calling this function.")); + Assert (properties == LAPACKSupport::symmetric, + ExcMessage("Matrix has to be symmetric for this operation.")); + + + /*const size_type rows, const size_type columns, + MPI_Comm mpi_communicator, + const unsigned int block_size_row, const unsigned int block_size_column, + const LAPACKSupport::Property property*/ + + ScaLAPACKMatrix eigenvectors (n_rows, n_columns, mpi_communicator, row_block_size, column_block_size, LAPACKSupport::Property::general); + ev.resize (n_rows); + + + const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)); + const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)); + + ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); + + + + if (active) + { + int info = 0; + + /* + * for jobz = 'V' all eigenpairs of the matrix are computed + */ + char jobz = 'V'; + NumberType *A_loc = &this->values[0]; + + /* + * by setting lwork to -1 a workspace query for optimal length of work is performed + */ + int lwork=-1; + NumberType *eigenvectors_loc = &eigenvectors.values[0]; + work.resize(1); + + pcout << "Starting workspace query" << std::endl; + + pcout << "Descriptor A: "; + for (unsigned int i=0; i<9; ++i) + pcout << " " << descriptor[i]; + pcout << std::endl; + pcout << "Descriptor Z: "; + for (unsigned int i=0; i<9; ++i) + pcout << " " << eigenvectors.descriptor[i]; + pcout << std::endl; + + pdsyev_(&jobz, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor, &ev[0], + eigenvectors_loc, &eigenvectors.submatrix_row, &eigenvectors.submatrix_column, eigenvectors.descriptor, &work[0], &lwork, &info); + + pcout << "info = " << info << std::endl << std::endl; + + lwork=work[0]; + work.resize (lwork); + + pcout << "Starting computation" << std::endl; + + pdsyev_(&jobz, &uplo, &n_rows, A_loc, &submatrix_row, &submatrix_column, descriptor, &ev[0], + eigenvectors_loc, &eigenvectors.submatrix_row, &eigenvectors.submatrix_column, eigenvectors.descriptor, &work[0], &lwork, &info); + + pcout << "info = " << info << std::endl << std::endl; + + AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdsyev", info)); + + //Scalapack puts eigenvalues in ascending order --> reversing to obtain descending order + std::reverse (ev.begin(),ev.end()); + } + /* + * send the eigenvalues to processors not being part of the process grid + */ + MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, mpi_communicator_inactive_with_root); + + /* On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A, + * including the diagonal, is destroyed. Therefore, the matrix is unusable + */ + state = LAPACKSupport::eigenvalues; +} + + + template NumberType ScaLAPACKMatrix::reciprocal_condition_number(const NumberType a_norm) const { diff --git a/tests/scalapack/scalapack_06.cc b/tests/scalapack/scalapack_06.cc new file mode 100644 index 0000000000..0e7671fa5a --- /dev/null +++ b/tests/scalapack/scalapack_06.cc @@ -0,0 +1,177 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include "../tests.h" + +// test eigenvalues() + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include + +#include +#include +#include + +extern "C" //Some Lapack routines +{ + void dsyev_(char *jobz, char *uplo, int *n, double *A, int *lda, double *w, double *work, int *lwork, int *info); + void ssyev_(char *jobz, char *uplo, int *n, float *A, int *lda, float *w, float *work, int *lwork, int *info); +} + +template +void test(const unsigned int size, const unsigned int block_size) +{ + MPI_Comm mpi_communicator(MPI_COMM_WORLD); + const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)); + const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)); + + ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); + + // test multiplication with random vectors + boost::random::mt19937 gen; + boost::random::uniform_01<> dist; + + // Create SPD matrices of requested size: + FullMatrix full_A(size); + std::vector lapack_A(size*size); + + ScaLAPACKMatrix scalapack_A(size, mpi_communicator, block_size); + + pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl; + { + full_A = 0.; + boost::random::mt19937 gen; + boost::random::uniform_01<> dist; + + for (unsigned int i = 0; i < size; ++i) + for (unsigned int j = i; j < size; ++j) + { + const double val = dist(gen); + Assert (val >= 0. && val <= 1., + ExcInternalError()); + if (i==j) + { + // since A(i,j) < 1 and + // a symmetric diagonally dominant matrix is SPD + full_A(i,j) = val + size; + lapack_A[i*size+j] = val+size; + } + else + { + full_A(i,j) = val; + full_A(j,i) = val; + lapack_A[i*size+j] = val; + lapack_A[j*size+i] = val; + } + } + } + std::vector eigenvalues_ScaLapack, eigenvalues_Lapack(size); + //Lapack as reference + { + int info; //Variable containing information about the successfull exit of the lapack routine + char jobz = 'N'; //'N': the eigenvalues_Lapack of A are computed + char uplo = 'U'; //storage format of the matrix A; not so important as matrix is symmetric + int LDA = size; //leading dimension of the matrix A + int lwork; //length of vector/array work + std::vector work (1); + + //by setting lwork to -1 a workspace query for work is done + //as matrix is symmetric: LDA == size of matrix + lwork = -1; + dsyev_(&jobz, &uplo, &LDA, & *lapack_A.begin(), &LDA, & *eigenvalues_Lapack.begin(), & *work.begin(), &lwork, &info); + lwork=work[0]; + work.resize (lwork); + dsyev_(&jobz, &uplo, &LDA, & *lapack_A.begin(), &LDA, & *eigenvalues_Lapack.begin(), & *work.begin(), &lwork, &info); + + AssertThrow (info==0, LAPACKSupport::ExcErrorCode("syev", info)); + + //save eigenvalues_Lapack in descending order instead of ascending order + std::reverse (eigenvalues_Lapack.begin(),eigenvalues_Lapack.end()); + } + // Scalapack: + scalapack_A = full_A; + scalapack_A.eigenvalues_symmetric(eigenvalues_ScaLapack); + unsigned int n_eigenvalues = eigenvalues_ScaLapack.size(), max_n_eigenvalues=5; + + pcout << "First " << max_n_eigenvalues << " ScaLapack eigenvalues" << std::endl; + + for (unsigned int i=0; i sizes = {{32,64,120,320,640}}; + const std::vector blocks = {{32,64}}; + + for (const auto &s : sizes) + for (const auto &b : blocks) + if (b <= s) + test(s,b); + + } + catch (std::exception &exc) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Exception on processing: " << std::endl + << exc.what() << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + + return 1; + } + catch (...) + { + std::cerr << std::endl << std::endl + << "----------------------------------------------------" + << std::endl; + std::cerr << "Unknown exception!" << std::endl + << "Aborting!" << std::endl + << "----------------------------------------------------" + << std::endl; + return 1; + }; +} diff --git a/tests/scalapack/scalapack_06.mpirun=1.output b/tests/scalapack/scalapack_06.mpirun=1.output new file mode 100644 index 0000000000..34374f1a96 --- /dev/null +++ b/tests/scalapack/scalapack_06.mpirun=1.output @@ -0,0 +1,54 @@ +32 32 1 1 +First 5 ScaLapack eigenvalues +48.3154 34.7121 34.4886 34.4474 34.1038 +First 5 Lapack eigenvalues +48.3154 34.7121 34.4886 34.4474 34.1038 + +64 32 1 1 +First 5 ScaLapack eigenvalues +95.9029 68.4041 68.061 67.7781 67.7537 +First 5 Lapack eigenvalues +95.9029 68.4041 68.061 67.7781 67.7537 + +64 64 1 1 +First 5 ScaLapack eigenvalues +95.9029 68.4041 68.061 67.7781 67.7537 +First 5 Lapack eigenvalues +95.9029 68.4041 68.061 67.7781 67.7537 + +120 32 1 1 +First 5 ScaLapack eigenvalues +180.662 126.026 125.642 125.511 125.285 +First 5 Lapack eigenvalues +180.662 126.026 125.642 125.511 125.285 + +120 64 1 1 +First 5 ScaLapack eigenvalues +180.662 126.026 125.642 125.511 125.285 +First 5 Lapack eigenvalues +180.662 126.026 125.642 125.511 125.285 + +320 32 1 1 +First 5 ScaLapack eigenvalues +479.554 330.118 329.968 329.781 329.646 +First 5 Lapack eigenvalues +479.554 330.118 329.968 329.781 329.646 + +320 64 1 1 +First 5 ScaLapack eigenvalues +479.554 330.118 329.968 329.781 329.646 +First 5 Lapack eigenvalues +479.554 330.118 329.968 329.781 329.646 + +640 32 1 1 +First 5 ScaLapack eigenvalues +959.748 654.219 654.077 654.023 654.015 +First 5 Lapack eigenvalues +959.748 654.219 654.077 654.023 654.015 + +640 64 1 1 +First 5 ScaLapack eigenvalues +959.748 654.219 654.077 654.023 654.015 +First 5 Lapack eigenvalues +959.748 654.219 654.077 654.023 654.015 + diff --git a/tests/scalapack/scalapack_06.mpirun=4.output b/tests/scalapack/scalapack_06.mpirun=4.output new file mode 100644 index 0000000000..034152e1f2 --- /dev/null +++ b/tests/scalapack/scalapack_06.mpirun=4.output @@ -0,0 +1,54 @@ +32 32 1 1 +First 5 ScaLapack eigenvalues +48.3154 34.7121 34.4886 34.4474 34.1038 +First 5 Lapack eigenvalues +48.3154 34.7121 34.4886 34.4474 34.1038 + +64 32 2 2 +First 5 ScaLapack eigenvalues +95.9029 68.4041 68.061 67.7781 67.7537 +First 5 Lapack eigenvalues +95.9029 68.4041 68.061 67.7781 67.7537 + +64 64 1 1 +First 5 ScaLapack eigenvalues +95.9029 68.4041 68.061 67.7781 67.7537 +First 5 Lapack eigenvalues +95.9029 68.4041 68.061 67.7781 67.7537 + +120 32 2 2 +First 5 ScaLapack eigenvalues +180.662 126.026 125.642 125.511 125.285 +First 5 Lapack eigenvalues +180.662 126.026 125.642 125.511 125.285 + +120 64 2 2 +First 5 ScaLapack eigenvalues +180.662 126.026 125.642 125.511 125.285 +First 5 Lapack eigenvalues +180.662 126.026 125.642 125.511 125.285 + +320 32 2 2 +First 5 ScaLapack eigenvalues +479.554 330.118 329.968 329.781 329.646 +First 5 Lapack eigenvalues +479.554 330.118 329.968 329.781 329.646 + +320 64 2 2 +First 5 ScaLapack eigenvalues +479.554 330.118 329.968 329.781 329.646 +First 5 Lapack eigenvalues +479.554 330.118 329.968 329.781 329.646 + +640 32 2 2 +First 5 ScaLapack eigenvalues +959.748 654.219 654.077 654.023 654.015 +First 5 Lapack eigenvalues +959.748 654.219 654.077 654.023 654.015 + +640 64 2 2 +First 5 ScaLapack eigenvalues +959.748 654.219 654.077 654.023 654.015 +First 5 Lapack eigenvalues +959.748 654.219 654.077 654.023 654.015 + -- 2.39.5