-New: add ScaLAPACKMatrix and ProcessGrid wrappers for high-performance dense linear
+New: Add ScaLAPACKMatrix and ProcessGrid wrappers for high-performance dense linear
algebra routines for parallel distributed memory machines.
<br>
(Denis Davydov, Benjamin Brands, 2017/11/20)
<dt>
<a name="scalapack"></a>
- <a href="http://www.netlib.org/scalapack/">scalapack</a>
+ <a href="http://www.netlib.org/scalapack/">ScaLAPACK</a>
</dt>
<dd>
<p>
<a href="http://www.netlib.org/scalapack/">scalapack</a>
is a library of high-performance linear algebra routines for parallel
- distributed memory machines. ScaLAPACK solves dense and banded linear
- systems, least squares problems, eigenvalue problems, and singular value
- problems. In order to enable this feature, add
- <code>-DSCALAPACK_DIR=/path/to/scalapack</code> to the argument list for
- <code>cmake</code>.
+ distributed memory machines. ScaLAPACK solves dense and banded linear
+ systems, least squares problems, eigenvalue problems, and singular value
+ problems. In order to enable this feature, add
+ <code>-DSCALAPACK_DIR=/path/to/scalapack</code> to the argument list for
+ <code>cmake</code>. If ScaLAPACK does not have embedded BLACS, you might
+ need to pass <code>-DBLACS_DIR=/path/to/blacs</code> as well.
</p>
</dd>
DEAL_II_NAMESPACE_OPEN
-/**
- * Forward declaration of class ScaLAPACKMatrix
- */
+// Forward declaration of class ScaLAPACKMatrix for ProcessGrid
template <typename NumberType> class ScaLAPACKMatrix;
/**
- * A class taking care of setting up the process grid for BLACS
- * ScaLapack matrices will have shared pointer of this class to perform block-cyclic distribution
+ * A class taking care of setting up a two-dimensional processor grid.
+ * For example an MPI communicator with 5 processes can be arranged into a
+ * 2x2 grid with 5-th processor being inactive:
+ * @code
+ * | 0 | 1
+ * -----| ------- |-----
+ * 0 | P0 | P1
+ * | |
+ * -----| ------- |-----
+ * 1 | P2 | P3
+ * @endcode
+ *
+ * A shared pointer to this class is provided to ScaLAPACKMatrix matrices to
+ * perform block-cyclic distribution.
+ *
+ * Note that this class allows to setup a process grid which has fewer
+ * MPI cores than the total number of cores in the communicator.
*
* @author Benjamin Brands, 2017
*/
*/
template <typename NumberType> friend class ScaLAPACKMatrix;
- /*
- * Constructor for a process grid for a given @p mpi_communicator
- * The pair @p grid_dimensions contains the user-defined numbers of process rows and columns
+ /**
+ * Constructor for a process grid for a given @p mpi_communicator .
+ * The pair @p grid_dimensions contains the user-defined numbers of process rows and columns.
+ * Their product should be less or equal to the total number of cores
+ * in the @p mpi_communicator.
*/
- ProcessGrid(MPI_Comm mpi_communicator, const std::pair<int,int> &grid_dimensions);
+ ProcessGrid(MPI_Comm mpi_communicator,
+ const std::pair<unsigned int,unsigned int> &grid_dimensions);
- /*
- * Constructor for a process grid for a given @p mpi_communicator
- * The process grid is built based on the dimension and blockcyclic distribution of a target matrix described by @p matrix_dimensions and @p block_sizes
+ /**
+ * Constructor for a process grid for a given @p mpi_communicator.
+ * In this case the process grid is heuristically chosen based on the
+ * dimensions and block-cyclic distribution of a target matrix provided
+ * in @p matrix_dimensions and @p block_sizes.
+ *
+ * The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$
+ * are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of
+ * processes in the @p mpi_communicator. This function then creates a 2D processor grid
+ * assuming the ratio between number of process row $p$ and columns $q$ to be
+ * equal the ratio between matrix dimensions $M$ and $N$.
*/
ProcessGrid(MPI_Comm mpi_communicator,
- const std::pair<int,int> &matrix_dimensions, const std::pair<int,int> &block_sizes);
+ const std::pair<unsigned int,unsigned int> &matrix_dimensions,
+ const std::pair<unsigned int,unsigned int> &block_sizes);
/**
- * Destructor
+ * Destructor.
*/
virtual ~ProcessGrid();
/**
- * Return the number of rows in processes grid.
+ * Return the number of rows in the processes grid.
*/
- int get_process_grid_rows() const;
+ unsigned int get_process_grid_rows() const;
/**
- * Return the number of columns in processes grid.
+ * Return the number of columns in the processes grid.
*/
- int get_process_grid_columns() const;
+ unsigned int get_process_grid_columns() const;
private:
/**
- * MPI communicator with all processes
+ * Send @p count values stored consequently starting at @p value from
+ * the process with rank zero to processes which
+ * are not in the process grid.
+ */
+ template <typename NumberType>
+ void send_to_inactive(NumberType *value, const int count=1) const;
+
+ /**
+ * An MPI communicator with all processes.
*/
MPI_Comm mpi_communicator;
/**
- * MPI communicator with inactive processes and the root
+ * An MPI communicator with inactive processes and the process with rank zero.
*/
MPI_Comm mpi_communicator_inactive_with_root;
/**
- * BLACS context
+ * BLACS context. This is equivalent to MPI communicators and is
+ * used by ScaLAPACK.
*/
int blacs_context;
/**
- * ID of this MPI process
+ * Rank of this MPI process.
*/
- int this_mpi_process;
+ const unsigned int this_mpi_process;
/**
- * Total number of MPI processes
+ * Total number of MPI processes.
*/
- int n_mpi_processes;
+ const unsigned int n_mpi_processes;
/**
- * Number of rows in processes grid
+ * Number of rows in the processes grid.
*/
int n_process_rows;
/**
- * Number of columns in processes grid
+ * Number of columns in the processes grid.
*/
int n_process_columns;
/**
- * Row of this process in the grid
+ * Row of this process in the grid.
*/
int this_process_row;
/**
- * Column of this process in the grid
+ * Column of this process in the grid.
*/
int this_process_column;
/**
- * A flag which is true for processes within the 2D process grid
+ * A flag which is true for processes within the 2D process grid.
*/
bool active;
-protected:
};
/**
* A wrapper class around ScaLAPACK parallel dense linear algebra.
*
- * ScaLAPACK assumes that matrices to be distributed according to the
- * block-cyclic decomposition scheme. A $M$ by $N$ matrix is first decomposed
- * into $MB$ by $NB$ blocks which are then uniformly distributed across the 2D process grid
- * $p*q <= Np$.
+ * ScaLAPACK assumes that matrices are distributed according to the
+ * block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed
+ * into $MB$ by $NB$ blocks which are then uniformly distributed across
+ * the 2D process grid $p*q \le Np$.
*
- * For example, global real symmetric matrix of order 9 is stored in upper storage mode with block sizes 4 × 4:
+ * For example, a global real symmetric matrix of order 9 is stored in
+ * upper storage mode with block sizes 4 × 4:
* @code
* 0 1 2
* ┌ ┐
* | . . . . -4.0 | . . . -4.0
* @endcode
*
+ * The choice of the block size is a compromise between a sufficiently large
+ * sizes for efficient local/serial BLAS, but one that is also small enough to achieve
+ * good parallel load balance.
*
- * Here is a strong scaling example of ScaLAPACKMatrix::invert()
+ * Below we show a strong scaling example of ScaLAPACKMatrix::invert()
* on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets
- * 2.20GHz, 10 cores/socket. Calculate are performed on square processor
+ * 2.20GHz, 10 cores/socket. Calculations are performed on square processor
* grids 1x1, 2x2, 3x3, 4x4, 5x5, 6x6, 7x7, 8x8, 9x9, 10x10.
*
* @image html scalapack_invert.png
public:
/**
- * Declare type for container size.
- */
+ * Declare the type for container size.
+ */
typedef unsigned int size_type;
/**
- * Constructor for a rectangular matrix with @p rows and @p columns, distributed
- * in a given @p mpi_communicator .
- *
- * The choice of the block size @p block_size is a compromise between a large
- * enough sizes for efficient local BLAS and small enough to get
- * good parallel load balance.
- */
- ScaLAPACKMatrix(const size_type rows, const size_type columns,
- std::shared_ptr<ProcessGrid> process_grid,
- const unsigned int block_size_row = 32, const unsigned int block_size_column = 32,
- const LAPACKSupport::Property property = LAPACKSupport::Property::general);
-
- /*
- * Alternative constructor having the matrix dimensions and block sizes in the std::pairs @p sizes and @p block_sizes
+ * Constructor for a rectangular matrix with rows and columns provided in
+ * @p sizes, and distributed using the grid @p process_grid.
*/
ScaLAPACKMatrix(const std::pair<size_type,size_type> &sizes,
- std::shared_ptr<ProcessGrid> process_grid,
+ const std::shared_ptr<const ProcessGrid> process_grid,
const std::pair<size_type,size_type> &block_sizes = std::make_pair(32,32),
const LAPACKSupport::Property property = LAPACKSupport::Property::general);
/**
- * Constructor for a square matrix of size @p size, distributed
+ * Constructor for a square matrix of size @p size, and distributed
* using the process grid in @p process_grid.
- *
- * The choice of the block size @p block_size is a compromize between a large
- * enough sizes for efficient local BLAS and small enough get
- * good parallel load balance.
*/
ScaLAPACKMatrix(const size_type size,
- std::shared_ptr<ProcessGrid> process_grid,
- const unsigned int block_size = 32);
+ const std::shared_ptr<const ProcessGrid> process_grid,
+ const size_type block_size = 32,
+ const LAPACKSupport::Property property = LAPACKSupport::Property::symmetric);
/**
* Destructor
*/
virtual ~ScaLAPACKMatrix();
+ /**
+ * Assign @p property to this matrix.
+ */
+ void set_property(const LAPACKSupport::Property property);
+
/**
* Assignment operator from a regular FullMatrix.
+ *
+ * @note This function should only be used for relatively small matrix
+ * dimensions. It is primarily intended for debugging purposes.
*/
ScaLAPACKMatrix<NumberType> &
operator = (const FullMatrix<NumberType> &);
/**
- * Copy the content of the distributed matrix into @p matrix.
+ * Copy the contents of the distributed matrix into @p matrix.
+ *
+ * @note This function should only be used for relatively small matrix
+ * dimensions. It is primarily intended for debugging purposes.
*/
void copy_to (FullMatrix<NumberType> &matrix) const;
/**
- * Compute the Cholesky factorization of the matrix using ScaLAPACK function pXpotrf.
+ * Compute the Cholesky factorization of the matrix using ScaLAPACK
+ * function <code>pXpotrf</code>. The result of factorization is stored in this object.
*/
void compute_cholesky_factorization ();
/**
* Invert the matrix by first computing Cholesky factorization and then
- * building the actual inverse using pXpotri.
+ * building the actual inverse using <code>pXpotri</code>. The inverse is stored
+ * in this object.
*/
void invert();
/**
- * Compute all eigenvalues of real symmetric matrix using pdsyev
- * After successful computation the eigenvalues are stored in @p eigenvalues in ascending order
+ * Compute all eigenvalues of a real symmetric matrix using <code>pXsyev</code>.
+ * If successful, the computed @p eigenvalues are arranged in ascending order.
*/
void eigenvalues_symmetric (std::vector<NumberType> &eigenvalues);
/**
- * Compute all eigenpairs of real symmetric matrix using pdsyev
- * After successful computation the eigenvalues are stored in @p eigenvalues in ascending order
- * The eigenvectors are stored in the columns of the matrix, therefore overwriting the original content of the matrix
+ * Compute all eigenpairs of a real symmetric matrix using <code>pXsyev</code>.
+ * If successful, the computed @p eigenvalues are arranged in ascending order.
+ * The eigenvectors are stored in the columns of the matrix, thereby
+ * overwriting the original content of the matrix.
*/
void eigenpairs_symmetric (std::vector<NumberType> &eigenvalues);
/**
- * Return the number of rows in processes grid.
- */
- int get_process_grid_rows() const;
-
- /**
- * Return the number of columns in processes grid.
- */
- int get_process_grid_columns() const;
-
- /**
- * Estimate the the condition number of a SPD matrix in $l_1$-norm.
- * The matrix has to be in Cholesky state. The reciprocal of the
+ * Estimate the the condition number of a SPD matrix in the $l_1$-norm.
+ * The matrix has to be in the Cholesky state (see compute_cholesky_factorization()).
+ * The reciprocal of the
* condition number is returned in order to avoid the possibility of
* overflow when the condition number is very large.
*
- * @p a_norm shall contain $l_1$-norm of the matrix prior to calling
+ * @p a_norm must contain the $l_1$-norm of the matrix prior to calling
* Cholesky factorization.
*
- * @note an alternative is to compute the inverse of the matrix
+ * @note An alternative is to compute the inverse of the matrix
* explicitly and manually constructor $k_1 = ||A||_1 ||A^{-1}||_1$.
*/
NumberType reciprocal_condition_number(const NumberType a_norm) const;
/**
- * Compute $l_1$-norm of the matrix.
- * @return
+ * Compute the $l_1$-norm of the matrix.
*/
NumberType l1_norm() const;
/**
- * Compute $l_{\infty}$ norm of the matrix.
+ * Compute the $l_{\infty}$ norm of the matrix.
*/
NumberType linfty_norm() const;
NumberType frobenius_norm() const;
/**
- * Number of rows of the matrix $mxn$.
+ * Number of rows of the $M \times N$ matrix.
*/
- int m() const;
+ size_type m() const;
/**
- * Number of columns of the matrix $m x n$.
+ * Number of columns of the $M \times N$ matrix.
*/
- int n() const;
+ size_type n() const;
+
+private:
/**
* Number of local rows on this MPI processes.
int local_n() const;
/**
- * Return global row number for the given local row @p loc_row .
+ * Return the global row number for the given local row @p loc_row .
*/
int global_row(const int loc_row) const;
/**
- * Return global column number for the given local column @p loc_column.
+ * Return the global column number for the given local column @p loc_column.
*/
int global_column(const int loc_column) const;
*/
NumberType &local_el(const int loc_row, const int loc_column);
-private:
-
/**
- * Calculate norm of a distributed dense matrix using ScaLAPACK's
+ * Calculate the norm of a distributed dense matrix using ScaLAPACK's
* internal function.
*/
NumberType norm(const char type) const;
- /**
- * Send @p value from process with rank zero to processes which
- * are not in the process grid.
- */
- void send_to_inactive(NumberType &value) const;
-
/**
* Since ScaLAPACK operations notoriously change the meaning of the matrix
* entries, we record the current state after the last operation here.
LAPACKSupport::State state;
/**
- * Additional properties of the matrix which may help to select more
+ * Additional property of the matrix which may help to select more
* efficient ScaLAPACK functions.
*/
- LAPACKSupport::Property properties;
+ LAPACKSupport::Property property;
/**
- * Shared pointer to ProcessGrid object which contains Blacs context and MPI communicator as well as other necessary data structures
+ * A shared pointer to a ProcessGrid object which contains a BLACS context
+ * and a MPI communicator, as well as other necessary data structures.
*/
- std::shared_ptr<ProcessGrid> grid;
+ std::shared_ptr<const ProcessGrid> grid;
/**
- * Number of rows in the matrix
+ * Number of rows in the matrix.
*/
int n_rows;
/**
- * Number of columns in the matrix
+ * Number of columns in the matrix.
*/
int n_columns;
/**
- * Row block size
+ * Row block size.
*/
int row_block_size;
/**
- * Column block size
+ * Column block size.
*/
int column_block_size;
/**
- * Number of rows in the matrix owned by the current process
+ * Number of rows in the matrix owned by the current process.
*/
int n_local_rows;
/**
- * Number of columns in the matrix owned by the current process
+ * Number of columns in the matrix owned by the current process.
*/
int n_local_columns;
/**
- * Scalapack description vector.
+ * ScaLAPACK description vector.
*/
int descriptor[9];
/**
- * Workspace array
+ * Workspace array.
*/
mutable std::vector<NumberType> work;
/**
- * Integer workspace array
+ * Integer workspace array.
*/
mutable std::vector<int> iwork;
/**
* A character to define where elements are stored in case
- * ScaLAPACK operation support this.
- *
- * FIXME: switch to enum UpperOrLower: LOWER UPPER
+ * ScaLAPACK operations support this.
*/
const char uplo;
/**
* The process column of the process grid over which the first column
- * of the global matrix is distributed
+ * of the global matrix is distributed.
*/
const int first_process_column;
/**
- * Global indices where to start a submatrix. Equals to unity as we
- * don't use submatrices.
+ * Global row index that determines where to start a submatrix.
+ * Currently this equals unity, as we don't use submatrices.
*/
const int submatrix_row;
/**
- * Global indices where to start a submatrix. Equals to unity as we
- * don't use submatrices.
+ * Global column index that determines where to start a submatrix.
+ * Currently this equals unity, as we don't use submatrices.
*/
const int submatrix_column;
// http://www.netlib.org/scalapack/slug/index.html // User guide
// http://www.netlib.org/scalapack/slug/node135.html // How to Measure Errors
-// FIXME: similar to lapack_templates.h move those to scalapack_template.h
extern "C"
{
/* Basic Linear Algebra Communication Subprograms (BLACS) declarations */
// therefore
// Np = Pc * Pc / ratio
// for quadratic matrices the ratio equals 1
- const double ratio = n/m;
+ const double ratio = double(n)/m;
int Pc = std::sqrt(ratio * Np);
// one could rounds up Pc to the number which has zero remainder from the division of Np
-ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, const std::pair<int,int> &grid_dimensions)
+ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
+ const std::pair<unsigned int,unsigned int> &grid_dimensions)
:
mpi_communicator(mpi_comm),
+ this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)),
+ n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)),
n_process_rows(grid_dimensions.first),
n_process_columns(grid_dimensions.second)
{
Assert (grid_dimensions.second > 0,
ExcMessage("Number of process grid columns has to be positive."));
- MPI_Comm_size(mpi_communicator, &n_mpi_processes);
- MPI_Comm_rank(mpi_communicator, &this_mpi_process);
-
Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes,
ExcMessage("Size of process grid is larger than number of available MPI processes."));
- // processor grid order
- // FIXME: default to column major
+ // processor grid order.
+ // FIXME: default to column major?
const bool column_major = false;
// Initialize Cblas context from the provided communicator
ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
- const std::pair<int,int> &matrix_dimensions, const std::pair<int,int> &block_sizes)
+ const std::pair<unsigned int,unsigned int> &matrix_dimensions,
+ const std::pair<unsigned int,unsigned int> &block_sizes)
:
ProcessGrid(mpi_comm,
choose_the_processor_grid(mpi_comm, matrix_dimensions.first, matrix_dimensions.second,
+unsigned int ProcessGrid::get_process_grid_rows() const
+{
+ return n_process_rows;
+}
+
+
+
+unsigned int ProcessGrid::get_process_grid_columns() const
+{
+ return n_process_columns;
+}
+
+
+
+template <typename NumberType>
+void ProcessGrid::send_to_inactive(NumberType *value, const int count) const
+{
+ Assert (count>0, ExcInternalError());
+ if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
+ {
+ const int ierr =
+ MPI_Bcast(value,count,MPI_DOUBLE,
+ 0/*from root*/,
+ mpi_communicator_inactive_with_root);
+ AssertThrowMPI(ierr);
+ }
+}
+
+
+
+
/**
*Constructor for a rectangular distributed Matrix
*/
template <typename NumberType>
-ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type rows, const size_type columns,
- std::shared_ptr<ProcessGrid> process_grid,
- const unsigned int block_size_row, const unsigned int block_size_column,
+ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const std::pair<size_type,size_type> &sizes,
+ const std::shared_ptr<const ProcessGrid> process_grid,
+ const std::pair<size_type,size_type> &block_sizes,
const LAPACKSupport::Property property)
:
TransposeTable<NumberType> (),
state (LAPACKSupport::unusable),
- properties(property),
+ property(property),
grid (process_grid),
- n_rows(rows),
- n_columns(columns),
- row_block_size(block_size_row),
- column_block_size(block_size_column),
+ n_rows(sizes.first),
+ n_columns(sizes.second),
+ row_block_size(block_sizes.first),
+ column_block_size(block_sizes.second),
uplo('L'), // for non-symmetric matrices this is not needed
first_process_row(0),
first_process_column(0),
submatrix_row(1),
submatrix_column(1)
{
- Assert (block_size_row > 0,
+ Assert (row_block_size > 0,
ExcMessage("Row block size has to be positive."));
- Assert (block_size_column > 0,
+ Assert (column_block_size > 0,
ExcMessage("Column block size has to be positive."));
- Assert (block_size_row <= rows,
+ Assert (row_block_size <= n_rows,
ExcMessage("Row block size can not be greater than the number of rows of the matrix"));
- Assert (block_size_column <= columns,
+ Assert (column_block_size <= n_columns,
ExcMessage("Column block size can not be greater than the number of columns of the matrix"));
if (grid->active)
template <typename NumberType>
-ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const std::pair<size_type,size_type> &sizes,
- std::shared_ptr<ProcessGrid> process_grid,
- const std::pair<size_type,size_type> &block_sizes,
+ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type size,
+ const std::shared_ptr<const ProcessGrid> process_grid,
+ const size_type block_size,
const LAPACKSupport::Property property)
:
- ScaLAPACKMatrix<NumberType>(sizes.first,sizes.second,process_grid,block_sizes.first,block_sizes.first,property)
+ ScaLAPACKMatrix<NumberType>(std::make_pair(size,size),
+ process_grid,
+ std::make_pair(block_size,block_size),
+ property)
{}
template <typename NumberType>
-ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type size,
- std::shared_ptr<ProcessGrid> process_grid,
- const unsigned int block_size)
- :
- ScaLAPACKMatrix<NumberType>(size,size,process_grid,block_size,block_size,LAPACKSupport::Property::symmetric)
-{}
+void
+ScaLAPACKMatrix<NumberType>::set_property(const LAPACKSupport::Property property_)
+{
+ property = property_;
+}
Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n()));
- //if (active)
if (grid->active)
{
for (int i=0; i < n_local_rows; ++i)
}
}
}
- // FIXME: move it from operator = to copy_from() with a const bool symmetric flag.
- properties = LAPACKSupport::symmetric;
state = LAPACKSupport::matrix;
return *this;
}
template <typename NumberType>
-int
+unsigned int
ScaLAPACKMatrix<NumberType>::m() const
{
return n_rows;
template <typename NumberType>
-int
+unsigned int
ScaLAPACKMatrix<NumberType>::n() const
{
return n_columns;
// we could move the following lines under the main loop above,
// but they would be dependent on glob_i and glob_j, which
// won't make it much prettier
- if (properties == LAPACKSupport::lower_triangular)
+ if (property == LAPACKSupport::lower_triangular)
for (unsigned int i = 0; i < matrix.n(); ++i)
for (unsigned int j = i+1; j < matrix.m(); ++j)
matrix(i,j) = (state == LAPACKSupport::inverse_matrix ? matrix(j,i) : 0.);
- else if (properties == LAPACKSupport::upper_triangular)
+ else if (property == LAPACKSupport::upper_triangular)
for (unsigned int i = 0; i < matrix.n(); ++i)
for (unsigned int j = 0; j < i; ++j)
matrix(i,j) = (state == LAPACKSupport::inverse_matrix ? matrix(j,i) : 0.);
pdpotrf_(&uplo,&n_columns,A_loc,&submatrix_row,&submatrix_column,descriptor,&info);
AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdpotrf", info));
}
- properties = (uplo=='L' ? LAPACKSupport::lower_triangular : LAPACKSupport::upper_triangular);
+ property = (uplo=='L' ? LAPACKSupport::lower_triangular : LAPACKSupport::upper_triangular);
state = LAPACKSupport::cholesky;
}
{
int info = 0;
/*
- * matrix Z is not distributed as it will not be referenced by the ScaLapack function call
+ * matrix Z is not distributed as it will not be referenced by the
+ * ScaLapack function call
*/
std::vector<double> Z_loc;
NumberType *A_loc = &this->values[0];
{
Assert (state == LAPACKSupport::matrix,
ExcMessage("Matrix has to be in Matrix state before calling this function."));
- Assert (properties == LAPACKSupport::symmetric,
+ Assert (property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
ScaLAPACKMatrix<NumberType> Z (grid->n_mpi_processes, grid, 1);
/*
* send the eigenvalues to processors not being part of the process grid
*/
- MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, grid->mpi_communicator_inactive_with_root);
+ grid->send_to_inactive(ev.data(), ev.size());
- /* On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A,
- * including the diagonal, is destroyed. Therefore, the matrix is unusable
- */
+ /*
+ * On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A,
+ * including the diagonal, is destroyed. Therefore, the matrix is unusable
+ */
state = LAPACKSupport::unusable;
}
{
Assert (state == LAPACKSupport::matrix,
ExcMessage("Matrix has to be in Matrix state before calling this function."));
- Assert (properties == LAPACKSupport::symmetric,
+ Assert (property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
ScaLAPACKMatrix<NumberType> eigenvectors (n_rows, grid, row_block_size);
- eigenvectors.properties = properties;
+ eigenvectors.property = property;
ev.resize (n_rows);
- const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(grid->mpi_communicator));
-
- ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
-
if (grid->active)
{
int info = 0;
/*
* by setting lwork to -1 a workspace query for optimal length of work is performed
- */
+ */
int lwork=-1;
NumberType *eigenvectors_loc = &eigenvectors.values[0];
work.resize(1);
AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdsyev", info));
- //copy eigenvectors to original matrix
- //as the temporary matrix eigenvectors has identical dimensions and block-cyclic distribution we simply swap the local array
+ // copy eigenvectors to original matrix
+ // as the temporary matrix eigenvectors has identical dimensions and
+ // block-cyclic distribution we simply swap the local array
this->values.swap(eigenvectors.values);
}
/*
* send the eigenvalues to processors not being part of the process grid
*/
- MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, grid->mpi_communicator_inactive_with_root);
+ grid->send_to_inactive(ev.data(), ev.size());
/*
- * On exit matrix A stores the eigenvectors in the columns
- */
- properties = LAPACKSupport::Property::general;
+ * On exit matrix A stores the eigenvectors in the columns
+ */
+ property = LAPACKSupport::Property::general;
state = LAPACKSupport::eigenvalues;
}
&a_norm, &rcond, &work[0], &lwork, &iwork[0], &liwork, &info);
AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdpocon", info));
}
- send_to_inactive(rcond);
+ grid->send_to_inactive(&rcond);
return rcond;
}
const NumberType *A_loc = &this->values[0];
res = pdlansy_(&type, &uplo, &n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor, &work[0]);
}
- send_to_inactive(res);
+ grid->send_to_inactive(&res);
return res;
}
-
-
-template <typename NumberType>
-void ScaLAPACKMatrix<NumberType>::send_to_inactive(NumberType &value) const
-{
- if (grid->mpi_communicator_inactive_with_root != MPI_COMM_NULL)
- {
- MPI_Bcast(&value,1,MPI_DOUBLE,
- 0/*from root*/,
- grid->mpi_communicator_inactive_with_root);
- }
-}
-
-
-
-template <typename NumberType>
-int ScaLAPACKMatrix<NumberType>::get_process_grid_rows() const
-{
- return grid->n_process_rows;
-}
-
-
-
-template <typename NumberType>
-int ScaLAPACKMatrix<NumberType>::get_process_grid_columns() const
-{
- return grid->n_process_columns;
-}
-
-
-
// instantiations
template class ScaLAPACKMatrix<double>;
+template void ProcessGrid::send_to_inactive<double>(double *, const int) const;
+
DEAL_II_NAMESPACE_CLOSE
#include <deal.II/base/timer.h>
#include <deal.II/base/multithread_info.h>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_01.hpp>
-
#include <deal.II/lac/vector.h>
#include <deal.II/lac/scalapack.h>
ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
- // test multiplication with random vectors
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_in(size), full_out(size), diff(size);
std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,sizes,block_sizes);
ScaLAPACKMatrix<NumberType> scalapack_matrix (sizes.first, grid, block_sizes.first);
- /*ScaLAPACKMatrix<NumberType> scalapack_matrix(size,
- mpi_communicator,
- block_size);*/
- pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl;
+ pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl;
{
unsigned int index = 0;
int main (int argc,char **argv)
{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
- try
- {
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- const std::vector<unsigned int> sizes = {{64,120,320,640}};
- const std::vector<unsigned int> blocks = {{32,64}};
-
- for (const auto &s : sizes)
- for (const auto &b : blocks)
- test<double>(s,b);
+ const std::vector<unsigned int> sizes = {{64,120,320,640}};
+ const std::vector<unsigned int> blocks = {{32,64}};
- }
- catch (std::exception &exc)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Exception on processing: " << std::endl
- << exc.what() << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
-
- return 1;
- }
- catch (...)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Unknown exception!" << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
- return 1;
- };
+ for (const auto &s : sizes)
+ for (const auto &b : blocks)
+ test<double>(s,b);
}
// ---------------------------------------------------------------------
#include "../tests.h"
+#include "../lapack/create_matrix.h"
// test Cholesky decomposition in ScaLAPACK vs FullMatrix
#include <deal.II/base/timer.h>
#include <deal.II/base/multithread_info.h>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_01.hpp>
-
#include <deal.II/lac/vector.h>
#include <deal.II/lac/scalapack.h>
ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
- // test multiplication with random vectors
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_in(size), inverse(size), full_out(size), diff(size);
std::pair<int,int> sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size);
std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,sizes,block_sizes);
- ScaLAPACKMatrix<NumberType> scalapack_matrix (sizes.first, grid, block_sizes.first);
- /*ScaLAPACKMatrix<NumberType> scalapack_matrix(size,
- mpi_communicator,
- block_size);*/
-
- pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl;
-
- {
- full_in = 0.;
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
- for (unsigned int i = 0; i < size; ++i)
- for (unsigned int j = i; j < size; ++j)
- {
- const double val = dist(gen);
- Assert (val >= 0. && val <= 1.,
- ExcInternalError());
- if (i==j)
- // since A(i,j) < 1 and
- // a symmetric diagonally dominant matrix is SPD
- full_in(i,j) = val + size;
- else
- {
- full_in(i,j) = val;
- full_in(j,i) = val;
- }
- }
- }
+ ScaLAPACKMatrix<NumberType> scalapack_matrix (sizes.first, grid, block_sizes.first,
+ LAPACKSupport::Property::symmetric);
+
+ pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl;
+
+ create_spd (full_in);
// invert via Lapack
inverse.cholesky(full_in);
int main (int argc,char **argv)
{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
- try
- {
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
- const std::vector<unsigned int> blocks = {{32,64}};
+ const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
+ const std::vector<unsigned int> blocks = {{32,64}};
- for (const auto &s : sizes)
- for (const auto &b : blocks)
- if (b <= s)
- test<double>(s,b);
-
- }
- catch (std::exception &exc)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Exception on processing: " << std::endl
- << exc.what() << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
-
- return 1;
- }
- catch (...)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Unknown exception!" << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
- return 1;
- };
+ for (const auto &s : sizes)
+ for (const auto &b : blocks)
+ if (b <= s)
+ test<double>(s,b);
}
// ---------------------------------------------------------------------
#include "../tests.h"
+#include "../lapack/create_matrix.h"
/*
* test inverse via Cholesky decomposition in ScaLAPACK vs FullMatrix
#include <deal.II/base/timer.h>
#include <deal.II/base/multithread_info.h>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_01.hpp>
-
#include <deal.II/lac/vector.h>
#include <deal.II/lac/scalapack.h>
ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
- // test multiplication with random vectors
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_in(size), inverse(size), full_out(size), diff(size), prod1(size), prod2(size), one(size);
std::pair<int,int> sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size);
std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,sizes,block_sizes);
- ScaLAPACKMatrix<NumberType> scalapack_matrix (sizes.first, grid, block_sizes.first);
- /*ScaLAPACKMatrix<NumberType> scalapack_matrix(size,
- mpi_communicator,
- block_size);*/
-
- pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl;
-
- {
- full_in = 0.;
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
- one = 0.;
- for (unsigned int i=0; i<size; ++i)
- one(i,i) = 1.;
-
- for (unsigned int i = 0; i < size; ++i)
- for (unsigned int j = i; j < size; ++j)
- {
- const double val = dist(gen);
- Assert (val >= 0. && val <= 1.,
- ExcInternalError());
- if (i==j)
- // since A(i,j) < 1 and
- // a symmetric diagonally dominant matrix is SPD
- full_in(i,j) = val + size;
- else
- {
- full_in(i,j) = val;
- full_in(j,i) = val;
- }
- }
- }
+ ScaLAPACKMatrix<NumberType> scalapack_matrix (sizes.first, grid, block_sizes.first,
+ LAPACKSupport::Property::symmetric);
+
+ pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl;
+ create_spd (full_in);
+
+ one = 0.;
+ for (unsigned int i=0; i<size; ++i)
+ one(i,i) = 1.;
// invert via Lapack
inverse.invert(full_in);
inverse.mmult(prod1, full_in);
int main (int argc,char **argv)
{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
- try
- {
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- // 8x8 works, 32x32 does not.
- const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
- const std::vector<unsigned int> blocks = {{32,64}};
+ const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
+ const std::vector<unsigned int> blocks = {{32,64}};
- for (const auto &s : sizes)
- for (const auto &b : blocks)
- if (b <= s)
- test<double>(s,b);
-
- }
- catch (std::exception &exc)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Exception on processing: " << std::endl
- << exc.what() << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
-
- return 1;
- }
- catch (...)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Unknown exception!" << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
- return 1;
- };
+ for (const auto &s : sizes)
+ for (const auto &b : blocks)
+ if (b <= s)
+ test<double>(s,b);
}
// ---------------------------------------------------------------------
#include "../tests.h"
+#include "../lapack/create_matrix.h"
// test norms of ScaLAPACK vs FullMatrix
#include <deal.II/base/timer.h>
#include <deal.II/base/multithread_info.h>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_01.hpp>
-
#include <deal.II/lac/vector.h>
#include <deal.II/lac/scalapack.h>
ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
- // test multiplication with random vectors
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_A(size);
std::pair<int,int> sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size);
std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,sizes,block_sizes);
- ScaLAPACKMatrix<NumberType> scalapack_A (sizes.first, grid, block_sizes.first);
- /*ScaLAPACKMatrix<NumberType> scalapack_A (size,
- mpi_communicator,
- block_size);*/
-
- pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl;
-
- {
- full_A = 0.;
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
- for (unsigned int i = 0; i < size; ++i)
- for (unsigned int j = i; j < size; ++j)
- {
- const double val = dist(gen);
- Assert (val >= 0. && val <= 1.,
- ExcInternalError());
- if (i==j)
- // since A(i,j) < 1 and
- // a symmetric diagonally dominant matrix is SPD
- full_A(i,j) = val + size;
- else
- {
- full_A(i,j) = val;
- full_A(j,i) = val;
- }
- }
- }
+ ScaLAPACKMatrix<NumberType> scalapack_A (sizes.first, grid, block_sizes.first,
+ LAPACKSupport::Property::symmetric);
+
+ pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl;
+ create_spd (full_A);
const double l1 = full_A.l1_norm();
const double linfty = full_A.linfty_norm();
int main (int argc,char **argv)
{
- try
- {
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
- const std::vector<unsigned int> blocks = {{32,64}};
-
- for (const auto &s : sizes)
- for (const auto &b : blocks)
- if (b <= s)
- test<double>(s,b);
-
- }
- catch (std::exception &exc)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Exception on processing: " << std::endl
- << exc.what() << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
-
- return 1;
- }
- catch (...)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Unknown exception!" << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
- return 1;
- };
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
+ const std::vector<unsigned int> blocks = {{32,64}};
+
+ for (const auto &s : sizes)
+ for (const auto &b : blocks)
+ if (b <= s)
+ test<double>(s,b);
+
}
32 32 1 1
-51.7156 51.7156 51.7156
-51.7156 51.7156 51.7156
-184.51 184.51 184.51
+53.3549 53.3549 53.3549
+53.3549 53.3549 53.3549
+185.54 185.54 185.54
64 32 1 1
-100.342 100.342 100.342
-100.342 100.342 100.342
-516.965 516.965 516.965
+101.897 101.897 101.897
+101.897 101.897 101.897
+517.089 517.089 517.089
64 64 1 1
-100.342 100.342 100.342
-100.342 100.342 100.342
-516.965 516.965 516.965
+101.369 101.369 101.369
+101.369 101.369 101.369
+517.457 517.457 517.457
120 32 1 1
-187.403 187.403 187.403
-187.403 187.403 187.403
-1321.51 1321.51 1321.51
+186.726 186.726 186.726
+186.726 186.726 186.726
+1321.87 1321.87 1321.87
120 64 1 1
-187.403 187.403 187.403
-187.403 187.403 187.403
-1321.51 1321.51 1321.51
+189.512 189.512 189.512
+189.512 189.512 189.512
+1321.68 1321.68 1321.68
320 32 1 1
-494.259 494.259 494.259
-494.259 494.259 494.259
-5736.38 5736.38 5736.38
+494.329 494.329 494.329
+494.329 494.329 494.329
+5736.37 5736.37 5736.37
320 64 1 1
-494.259 494.259 494.259
-494.259 494.259 494.259
-5736.38 5736.38 5736.38
+498.342 498.342 498.342
+498.342 498.342 498.342
+5736.49 5736.49 5736.49
640 32 1 1
-981.366 981.366 981.366
-981.366 981.366 981.366
-16207.1 16207.1 16207.1
+980.609 980.609 980.609
+980.609 980.609 980.609
+16207.5 16207.5 16207.5
640 64 1 1
-981.366 981.366 981.366
-981.366 981.366 981.366
-16207.1 16207.1 16207.1
+982.483 982.483 982.483
+982.483 982.483 982.483
+16208.2 16208.2 16208.2
32 32 1 1
-51.7156 51.7156 51.7156
-51.7156 51.7156 51.7156
-184.51 184.51 184.51
+53.3549 53.3549 53.3549
+53.3549 53.3549 53.3549
+185.54 185.54 185.54
64 32 2 2
-100.342 100.342 100.342
-100.342 100.342 100.342
-516.965 516.965 516.965
+101.897 101.897 101.897
+101.897 101.897 101.897
+517.089 517.089 517.089
64 64 1 1
-100.342 100.342 100.342
-100.342 100.342 100.342
-516.965 516.965 516.965
+101.369 101.369 101.369
+101.369 101.369 101.369
+517.457 517.457 517.457
120 32 3 3
-187.403 187.403 187.403
-187.403 187.403 187.403
-1321.51 1321.51 1321.51
+186.726 186.726 186.726
+186.726 186.726 186.726
+1321.87 1321.87 1321.87
120 64 2 2
-187.403 187.403 187.403
-187.403 187.403 187.403
-1321.51 1321.51 1321.51
+189.512 189.512 189.512
+189.512 189.512 189.512
+1321.68 1321.68 1321.68
320 32 3 3
-494.259 494.259 494.259
-494.259 494.259 494.259
-5736.38 5736.38 5736.38
+494.329 494.329 494.329
+494.329 494.329 494.329
+5736.37 5736.37 5736.37
320 64 3 3
-494.259 494.259 494.259
-494.259 494.259 494.259
-5736.38 5736.38 5736.38
+498.342 498.342 498.342
+498.342 498.342 498.342
+5736.49 5736.49 5736.49
640 32 3 3
-981.366 981.366 981.366
-981.366 981.366 981.366
-16207.1 16207.1 16207.1
+980.609 980.609 980.609
+980.609 980.609 980.609
+16207.5 16207.5 16207.5
640 64 3 3
-981.366 981.366 981.366
-981.366 981.366 981.366
-16207.1 16207.1 16207.1
+982.483 982.483 982.483
+982.483 982.483 982.483
+16208.2 16208.2 16208.2
32 32 1 1
-51.7156 51.7156 51.7156
-51.7156 51.7156 51.7156
-184.51 184.51 184.51
+53.3549 53.3549 53.3549
+53.3549 53.3549 53.3549
+185.54 185.54 185.54
64 32 2 2
-100.342 100.342 100.342
-100.342 100.342 100.342
-516.965 516.965 516.965
+101.897 101.897 101.897
+101.897 101.897 101.897
+517.089 517.089 517.089
64 64 1 1
-100.342 100.342 100.342
-100.342 100.342 100.342
-516.965 516.965 516.965
+101.369 101.369 101.369
+101.369 101.369 101.369
+517.457 517.457 517.457
120 32 2 2
-187.403 187.403 187.403
-187.403 187.403 187.403
-1321.51 1321.51 1321.51
+186.726 186.726 186.726
+186.726 186.726 186.726
+1321.87 1321.87 1321.87
120 64 2 2
-187.403 187.403 187.403
-187.403 187.403 187.403
-1321.51 1321.51 1321.51
+189.512 189.512 189.512
+189.512 189.512 189.512
+1321.68 1321.68 1321.68
320 32 2 2
-494.259 494.259 494.259
-494.259 494.259 494.259
-5736.38 5736.38 5736.38
+494.329 494.329 494.329
+494.329 494.329 494.329
+5736.37 5736.37 5736.37
320 64 2 2
-494.259 494.259 494.259
-494.259 494.259 494.259
-5736.38 5736.38 5736.38
+498.342 498.342 498.342
+498.342 498.342 498.342
+5736.49 5736.49 5736.49
640 32 2 2
-981.366 981.366 981.366
-981.366 981.366 981.366
-16207.1 16207.1 16207.1
+980.609 980.609 980.609
+980.609 980.609 980.609
+16207.5 16207.5 16207.5
640 64 2 2
-981.366 981.366 981.366
-981.366 981.366 981.366
-16207.1 16207.1 16207.1
+982.483 982.483 982.483
+982.483 982.483 982.483
+16208.2 16208.2 16208.2
// ---------------------------------------------------------------------
#include "../tests.h"
+#include "../lapack/create_matrix.h"
// test reciprocal_condition_number()
#include <deal.II/base/timer.h>
#include <deal.II/base/multithread_info.h>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_01.hpp>
-
#include <deal.II/lac/vector.h>
#include <deal.II/lac/scalapack.h>
ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
- // test multiplication with random vectors
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_A(size), inv_A(size);
std::pair<int,int> sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size);
std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,sizes,block_sizes);
- ScaLAPACKMatrix<NumberType> scalapack_A (sizes.first, grid, block_sizes.first);
- /*ScaLAPACKMatrix<NumberType> scalapack_A (size,
- mpi_communicator,
- block_size);*/
-
- pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl;
-
- {
- full_A = 0.;
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
- for (unsigned int i = 0; i < size; ++i)
- for (unsigned int j = i; j < size; ++j)
- {
- const double val = dist(gen);
- Assert (val >= 0. && val <= 1.,
- ExcInternalError());
- if (i==j)
- // since A(i,j) < 1 and
- // a symmetric diagonally dominant matrix is SPD
- full_A(i,j) = val + size;
- else
- {
- full_A(i,j) = val;
- full_A(j,i) = val;
- }
- }
- }
+ ScaLAPACKMatrix<NumberType> scalapack_A (sizes.first, grid, block_sizes.first,
+ LAPACKSupport::Property::symmetric);
+ pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl;
+ create_spd (full_A);
inv_A.invert(full_A);
const double l1 = full_A.l1_norm();
int main (int argc,char **argv)
{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
+ const std::vector<unsigned int> blocks = {{32,64}};
+
+ for (const auto &s : sizes)
+ for (const auto &b : blocks)
+ if (b <= s)
+ test<double>(s,b);
- try
- {
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
- const std::vector<unsigned int> blocks = {{32,64}};
-
- for (const auto &s : sizes)
- for (const auto &b : blocks)
- if (b <= s)
- test<double>(s,b);
-
- }
- catch (std::exception &exc)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Exception on processing: " << std::endl
- << exc.what() << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
-
- return 1;
- }
- catch (...)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Unknown exception!" << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
- return 1;
- };
}
32 32 1 1
-0.432045 0.476465
+0.410521 0.471758
64 32 1 1
-0.455047 0.48965
+0.44694 0.478752
64 64 1 1
-0.455047 0.48965
+0.444662 0.485917
120 32 1 1
-0.457288 0.491868
+0.459052 0.486953
120 64 1 1
-0.457288 0.491868
+0.449937 0.478099
320 32 1 1
-0.465325 0.489097
+0.466151 0.486539
320 64 1 1
-0.465325 0.489097
+0.460383 0.483094
640 32 1 1
-0.471717 0.48821
+0.472941 0.487731
640 64 1 1
-0.471717 0.48821
+0.471001 0.48693
32 32 1 1
-0.432045 0.476465
+0.410521 0.471758
64 32 2 2
-0.455047 0.48965
+0.44694 0.478752
64 64 1 1
-0.455047 0.48965
+0.444662 0.485917
120 32 3 3
-0.457288 0.491868
+0.459052 0.486953
120 64 2 2
-0.457288 0.491868
+0.449937 0.478099
320 32 3 3
-0.465325 0.489097
+0.466151 0.486539
320 64 3 3
-0.465325 0.489097
+0.460383 0.483094
640 32 3 3
-0.471717 0.48821
+0.472941 0.487731
640 64 3 3
-0.471717 0.48821
+0.471001 0.48693
32 32 1 1
-0.432045 0.476465
+0.410521 0.471758
64 32 2 2
-0.455047 0.48965
+0.44694 0.478752
64 64 1 1
-0.455047 0.48965
+0.444662 0.485917
120 32 2 2
-0.457288 0.491868
+0.459052 0.486953
120 64 2 2
-0.457288 0.491868
+0.449937 0.478099
320 32 2 2
-0.465325 0.489097
+0.466151 0.486539
320 64 2 2
-0.465325 0.489097
+0.460383 0.483094
640 32 2 2
-0.471717 0.48821
+0.472941 0.487731
640 64 2 2
-0.471717 0.48821
+0.471001 0.48693
// ---------------------------------------------------------------------
#include "../tests.h"
+#include "../lapack/create_matrix.h"
-// test eigenvalues()
+// test eigenvalues_symmetric()
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/timer.h>
#include <deal.II/base/multithread_info.h>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_01.hpp>
-
#include <deal.II/lac/vector.h>
#include <deal.II/lac/lapack_full_matrix.h>
ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
- // test multiplication with random vectors
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_A(size);
std::vector<NumberType> lapack_A(size*size);
std::pair<int,int> sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size);
std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,sizes,block_sizes);
- ScaLAPACKMatrix<NumberType> scalapack_A (sizes.first, grid, block_sizes.first);
+ ScaLAPACKMatrix<NumberType> scalapack_A (sizes.first, grid, block_sizes.first,
+ LAPACKSupport::Property::symmetric);
+
+ pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl;
+ create_spd (full_A);
+
+ for (unsigned int i = 0; i < size; ++i)
+ for (unsigned int j = 0; j < size; ++j)
+ lapack_A[i*size+j] = full_A(i,j);
- pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl;
- {
- full_A = 0.;
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
- for (unsigned int i = 0; i < size; ++i)
- for (unsigned int j = i; j < size; ++j)
- {
- const double val = dist(gen);
- Assert (val >= 0. && val <= 1.,
- ExcInternalError());
- if (i==j)
- {
- // since A(i,j) < 1 and
- // a symmetric diagonally dominant matrix is SPD
- full_A(i,j) = val + size;
- lapack_A[i*size+j] = val+size;
- }
- else
- {
- full_A(i,j) = val;
- full_A(j,i) = val;
- lapack_A[i*size+j] = val;
- lapack_A[j*size+i] = val;
- }
- }
- }
std::vector<NumberType> eigenvalues_ScaLapack, eigenvalues_Lapack(size);
//Lapack as reference
{
int main (int argc,char **argv)
{
- try
- {
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
- const std::vector<unsigned int> blocks = {{32,64}};
-
- for (const auto &s : sizes)
- for (const auto &b : blocks)
- if (b <= s)
- test<double>(s,b);
-
- }
- catch (std::exception &exc)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Exception on processing: " << std::endl
- << exc.what() << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
-
- return 1;
- }
- catch (...)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Unknown exception!" << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
- return 1;
- };
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
+ const std::vector<unsigned int> blocks = {{32,64}};
+
+ for (const auto &s : sizes)
+ for (const auto &b : blocks)
+ if (b <= s)
+ test<double>(s,b);
+
}
32 32 1 1
First 5 ScaLapack eigenvalues
-29.2445 29.432 29.5416 29.7204 29.8717
+29.4327 29.6255 29.7881 30.092 30.3062
First 5 Lapack eigenvalues
-29.2445 29.432 29.5416 29.7204 29.8717
+29.4327 29.6255 29.7881 30.092 30.3062
64 32 1 1
First 5 ScaLapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.5716 59.9842 60.1172 60.3363 60.4051
First 5 Lapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.5716 59.9842 60.1172 60.3363 60.4051
64 64 1 1
First 5 ScaLapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.7161 59.8977 60.3065 60.5131 60.6934
First 5 Lapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.7161 59.8977 60.3065 60.5131 60.6934
120 32 1 1
First 5 ScaLapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+114.098 114.163 114.52 114.662 114.877
First 5 Lapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+114.098 114.163 114.52 114.662 114.877
120 64 1 1
First 5 ScaLapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+113.953 114.207 114.496 114.525 114.8
First 5 Lapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+113.953 114.207 114.496 114.525 114.8
320 32 1 1
First 5 ScaLapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.886 310.093 310.385 310.648 310.719
First 5 Lapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.886 310.093 310.385 310.648 310.719
320 64 1 1
First 5 ScaLapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.771 310.096 310.171 310.37 310.471
First 5 Lapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.771 310.096 310.171 310.37 310.471
640 32 1 1
First 5 ScaLapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.62 625.745 625.974 625.996 626.13
First 5 Lapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.62 625.745 625.974 625.996 626.13
640 64 1 1
First 5 ScaLapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.381 625.696 625.855 625.983 626.074
First 5 Lapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.381 625.696 625.855 625.983 626.074
--- /dev/null
+32 32 1 1
+First 5 ScaLapack eigenvalues
+29.4327 29.6255 29.7881 30.092 30.3062
+First 5 Lapack eigenvalues
+29.4327 29.6255 29.7881 30.092 30.3062
+
+64 32 2 2
+First 5 ScaLapack eigenvalues
+59.5716 59.9842 60.1172 60.3363 60.4051
+First 5 Lapack eigenvalues
+59.5716 59.9842 60.1172 60.3363 60.4051
+
+64 64 1 1
+First 5 ScaLapack eigenvalues
+59.7161 59.8977 60.3065 60.5131 60.6934
+First 5 Lapack eigenvalues
+59.7161 59.8977 60.3065 60.5131 60.6934
+
+120 32 3 3
+First 5 ScaLapack eigenvalues
+114.098 114.163 114.52 114.662 114.877
+First 5 Lapack eigenvalues
+114.098 114.163 114.52 114.662 114.877
+
+120 64 2 2
+First 5 ScaLapack eigenvalues
+113.953 114.207 114.496 114.525 114.8
+First 5 Lapack eigenvalues
+113.953 114.207 114.496 114.525 114.8
+
+320 32 3 3
+First 5 ScaLapack eigenvalues
+309.886 310.093 310.385 310.648 310.719
+First 5 Lapack eigenvalues
+309.886 310.093 310.385 310.648 310.719
+
+320 64 3 3
+First 5 ScaLapack eigenvalues
+309.771 310.096 310.171 310.37 310.471
+First 5 Lapack eigenvalues
+309.771 310.096 310.171 310.37 310.471
+
+640 32 3 3
+First 5 ScaLapack eigenvalues
+625.62 625.745 625.974 625.996 626.13
+First 5 Lapack eigenvalues
+625.62 625.745 625.974 625.996 626.13
+
+640 64 3 3
+First 5 ScaLapack eigenvalues
+625.381 625.696 625.855 625.983 626.074
+First 5 Lapack eigenvalues
+625.381 625.696 625.855 625.983 626.074
+
32 32 1 1
First 5 ScaLapack eigenvalues
-29.2445 29.432 29.5416 29.7204 29.8717
+29.4327 29.6255 29.7881 30.092 30.3062
First 5 Lapack eigenvalues
-29.2445 29.432 29.5416 29.7204 29.8717
+29.4327 29.6255 29.7881 30.092 30.3062
64 32 2 2
First 5 ScaLapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.5716 59.9842 60.1172 60.3363 60.4051
First 5 Lapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.5716 59.9842 60.1172 60.3363 60.4051
64 64 1 1
First 5 ScaLapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.7161 59.8977 60.3065 60.5131 60.6934
First 5 Lapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.7161 59.8977 60.3065 60.5131 60.6934
120 32 2 2
First 5 ScaLapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+114.098 114.163 114.52 114.662 114.877
First 5 Lapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+114.098 114.163 114.52 114.662 114.877
120 64 2 2
First 5 ScaLapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+113.953 114.207 114.496 114.525 114.8
First 5 Lapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+113.953 114.207 114.496 114.525 114.8
320 32 2 2
First 5 ScaLapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.886 310.093 310.385 310.648 310.719
First 5 Lapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.886 310.093 310.385 310.648 310.719
320 64 2 2
First 5 ScaLapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.771 310.096 310.171 310.37 310.471
First 5 Lapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.771 310.096 310.171 310.37 310.471
640 32 2 2
First 5 ScaLapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.62 625.745 625.974 625.996 626.13
First 5 Lapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.62 625.745 625.974 625.996 626.13
640 64 2 2
First 5 ScaLapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.381 625.696 625.855 625.983 626.074
First 5 Lapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.381 625.696 625.855 625.983 626.074
// ---------------------------------------------------------------------
#include "../tests.h"
+#include "../lapack/create_matrix.h"
-// test eigenvalues()
+// test eigenpairs_symmetric() and calls set_property()
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/timer.h>
#include <deal.II/base/multithread_info.h>
-#include <boost/random/mersenne_twister.hpp>
-#include <boost/random/uniform_01.hpp>
-
#include <deal.II/lac/vector.h>
#include <deal.II/lac/lapack_full_matrix.h>
ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
- // test multiplication with random vectors
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
-
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_A(size);
std::vector<NumberType> lapack_A(size*size);
std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,sizes,block_sizes);
ScaLAPACKMatrix<NumberType> scalapack_A (sizes.first, grid, block_sizes.first);
+ scalapack_A.set_property(LAPACKSupport::Property::symmetric);
- pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl;
- {
- full_A = 0.;
- boost::random::mt19937 gen;
- boost::random::uniform_01<> dist;
+ pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl;
+
+ create_spd (full_A);
+ for (unsigned int i = 0; i < size; ++i)
+ for (unsigned int j = 0; j < size; ++j)
+ lapack_A[i*size+j] = full_A(i,j);
- for (unsigned int i = 0; i < size; ++i)
- for (unsigned int j = i; j < size; ++j)
- {
- const double val = dist(gen);
- Assert (val >= 0. && val <= 1.,
- ExcInternalError());
- if (i==j)
- {
- // since A(i,j) < 1 and
- // a symmetric diagonally dominant matrix is SPD
- full_A(i,j) = val + size;
- lapack_A[i*size+j] = val+size;
- }
- else
- {
- full_A(i,j) = val;
- full_A(j,i) = val;
- lapack_A[i*size+j] = val;
- lapack_A[j*size+i] = val;
- }
- }
- }
std::vector<NumberType> eigenvalues_ScaLapack, eigenvalues_Lapack(size);
//Lapack as reference
{
for (unsigned int i=0; i<max_n_eigenvalues; ++i)
AssertThrow ( std::fabs(eigenvalues_ScaLapack[i]-eigenvalues_Lapack[i]) < std::fabs(eigenvalues_Lapack[i])*1e-10, dealii::ExcInternalError());
-
FullMatrix<NumberType> s_eigenvectors (size,size);
for (int i=0; i<size; ++i)
for (int j=0; j<size; ++j)
int main (int argc,char **argv)
{
- try
- {
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
- const std::vector<unsigned int> blocks = {{32,64}};
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
- for (const auto &s : sizes)
- for (const auto &b : blocks)
- if (b <= s)
- test<double>(s,b);
+ const std::vector<unsigned int> sizes = {{32,64,120,320,640}};
+ const std::vector<unsigned int> blocks = {{32,64}};
- }
- catch (std::exception &exc)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Exception on processing: " << std::endl
- << exc.what() << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
-
- return 1;
- }
- catch (...)
- {
- std::cerr << std::endl << std::endl
- << "----------------------------------------------------"
- << std::endl;
- std::cerr << "Unknown exception!" << std::endl
- << "Aborting!" << std::endl
- << "----------------------------------------------------"
- << std::endl;
- return 1;
- };
+ for (const auto &s : sizes)
+ for (const auto &b : blocks)
+ if (b <= s)
+ test<double>(s,b);
}
32 32 1 1
First 5 ScaLapack eigenvalues
-29.2445 29.432 29.5416 29.7204 29.8717
+29.4327 29.6255 29.7881 30.092 30.3062
First 5 Lapack eigenvalues
-29.2445 29.432 29.5416 29.7204 29.8717
+29.4327 29.6255 29.7881 30.092 30.3062
64 32 1 1
First 5 ScaLapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.5716 59.9842 60.1172 60.3363 60.4051
First 5 Lapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.5716 59.9842 60.1172 60.3363 60.4051
64 64 1 1
First 5 ScaLapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.7161 59.8977 60.3065 60.5131 60.6934
First 5 Lapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.7161 59.8977 60.3065 60.5131 60.6934
120 32 1 1
First 5 ScaLapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+114.098 114.163 114.52 114.662 114.877
First 5 Lapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+114.098 114.163 114.52 114.662 114.877
120 64 1 1
First 5 ScaLapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+113.953 114.207 114.496 114.525 114.8
First 5 Lapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+113.953 114.207 114.496 114.525 114.8
320 32 1 1
First 5 ScaLapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.886 310.093 310.385 310.648 310.719
First 5 Lapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.886 310.093 310.385 310.648 310.719
320 64 1 1
First 5 ScaLapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.771 310.096 310.171 310.37 310.471
First 5 Lapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.771 310.096 310.171 310.37 310.471
640 32 1 1
First 5 ScaLapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.62 625.745 625.974 625.996 626.13
First 5 Lapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.62 625.745 625.974 625.996 626.13
640 64 1 1
First 5 ScaLapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.381 625.696 625.855 625.983 626.074
First 5 Lapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.381 625.696 625.855 625.983 626.074
--- /dev/null
+32 32 1 1
+First 5 ScaLapack eigenvalues
+29.4327 29.6255 29.7881 30.092 30.3062
+First 5 Lapack eigenvalues
+29.4327 29.6255 29.7881 30.092 30.3062
+
+64 32 2 2
+First 5 ScaLapack eigenvalues
+59.5716 59.9842 60.1172 60.3363 60.4051
+First 5 Lapack eigenvalues
+59.5716 59.9842 60.1172 60.3363 60.4051
+
+64 64 1 1
+First 5 ScaLapack eigenvalues
+59.7161 59.8977 60.3065 60.5131 60.6934
+First 5 Lapack eigenvalues
+59.7161 59.8977 60.3065 60.5131 60.6934
+
+120 32 3 3
+First 5 ScaLapack eigenvalues
+114.098 114.163 114.52 114.662 114.877
+First 5 Lapack eigenvalues
+114.098 114.163 114.52 114.662 114.877
+
+120 64 2 2
+First 5 ScaLapack eigenvalues
+113.953 114.207 114.496 114.525 114.8
+First 5 Lapack eigenvalues
+113.953 114.207 114.496 114.525 114.8
+
+320 32 3 3
+First 5 ScaLapack eigenvalues
+309.886 310.093 310.385 310.648 310.719
+First 5 Lapack eigenvalues
+309.886 310.093 310.385 310.648 310.719
+
+320 64 3 3
+First 5 ScaLapack eigenvalues
+309.771 310.096 310.171 310.37 310.471
+First 5 Lapack eigenvalues
+309.771 310.096 310.171 310.37 310.471
+
+640 32 3 3
+First 5 ScaLapack eigenvalues
+625.62 625.745 625.974 625.996 626.13
+First 5 Lapack eigenvalues
+625.62 625.745 625.974 625.996 626.13
+
+640 64 3 3
+First 5 ScaLapack eigenvalues
+625.381 625.696 625.855 625.983 626.074
+First 5 Lapack eigenvalues
+625.381 625.696 625.855 625.983 626.074
+
32 32 1 1
First 5 ScaLapack eigenvalues
-29.2445 29.432 29.5416 29.7204 29.8717
+29.4327 29.6255 29.7881 30.092 30.3062
First 5 Lapack eigenvalues
-29.2445 29.432 29.5416 29.7204 29.8717
+29.4327 29.6255 29.7881 30.092 30.3062
64 32 2 2
First 5 ScaLapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.5716 59.9842 60.1172 60.3363 60.4051
First 5 Lapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.5716 59.9842 60.1172 60.3363 60.4051
64 64 1 1
First 5 ScaLapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.7161 59.8977 60.3065 60.5131 60.6934
First 5 Lapack eigenvalues
-59.9263 60.0671 60.1596 60.3945 60.5197
+59.7161 59.8977 60.3065 60.5131 60.6934
120 32 2 2
First 5 ScaLapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+114.098 114.163 114.52 114.662 114.877
First 5 Lapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+114.098 114.163 114.52 114.662 114.877
120 64 2 2
First 5 ScaLapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+113.953 114.207 114.496 114.525 114.8
First 5 Lapack eigenvalues
-113.975 114.221 114.571 114.636 114.811
+113.953 114.207 114.496 114.525 114.8
320 32 2 2
First 5 ScaLapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.886 310.093 310.385 310.648 310.719
First 5 Lapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.886 310.093 310.385 310.648 310.719
320 64 2 2
First 5 ScaLapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.771 310.096 310.171 310.37 310.471
First 5 Lapack eigenvalues
-309.946 310.139 310.275 310.403 310.451
+309.771 310.096 310.171 310.37 310.471
640 32 2 2
First 5 ScaLapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.62 625.745 625.974 625.996 626.13
First 5 Lapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.62 625.745 625.974 625.996 626.13
640 64 2 2
First 5 ScaLapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.381 625.696 625.855 625.983 626.074
First 5 Lapack eigenvalues
-625.685 625.75 625.877 626.063 626.217
+625.381 625.696 625.855 625.983 626.074