From: Benjamin Brands Date: Thu, 16 Nov 2017 12:26:00 +0000 (+0100) Subject: seperated Blacs context into separate class ProcessGrid X-Git-Tag: v9.0.0-rc1~701^2~8 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c14ecbabc982f16dd6e2c996f73b16710115a704;p=dealii.git seperated Blacs context into separate class ProcessGrid --- diff --git a/include/deal.II/lac/scalapack.h b/include/deal.II/lac/scalapack.h index ba91d78b8c..d9bce6af67 100644 --- a/include/deal.II/lac/scalapack.h +++ b/include/deal.II/lac/scalapack.h @@ -23,10 +23,117 @@ #include #include #include +#include #include +#include + DEAL_II_NAMESPACE_OPEN + +/** + * Forward declaration of class ScaLAPACKMatrix + */ +template class ScaLAPACKMatrix; + + +/** + * A class taking care of setting up the process grid for BLACS + * ScaLapack matrices will have shared pointer of this class to perform block-cyclic distribution + */ +class ProcessGrid +{ +public: + + /** + * Declare class ScaLAPACK as friend to provide access to private members, e.g. the MPI Communicator + */ + template friend class ScaLAPACKMatrix; + + /* + * Constructor for a process grid for a given @p mpi_communicator + * The pair @p grid_dimensions contains the user-defined numbers of process rows and columns + */ + ProcessGrid(MPI_Comm mpi_communicator, const std::pair &grid_dimensions); + + /* + * Constructor for a process grid for a given @p mpi_communicator + * The process grid is built based on the dimension and blockcyclic distribution of a target matrix described by @p matrix_dimensions and @p block_sizes + */ + ProcessGrid(MPI_Comm mpi_communicator, + const std::pair &matrix_dimensions, const std::pair &block_sizes); + + /** + * Destructor + */ + virtual ~ProcessGrid(); + + /** + * Return the number of rows in processes grid. + */ + int get_process_grid_rows() const; + + /** + * Return the number of columns in processes grid. + */ + int get_process_grid_columns() const; + +private: + + /** + * MPI communicator with all processes + */ + MPI_Comm mpi_communicator; + + /** + * MPI communicator with inactive processes and the root + */ + MPI_Comm mpi_communicator_inactive_with_root; + + /** + * BLACS context + */ + int blacs_context; + + /** + * ID of this MPI process + */ + int this_mpi_process; + + /** + * Total number of MPI processes + */ + int n_mpi_processes; + + /** + * Number of rows in processes grid + */ + int n_process_rows; + + /** + * Number of columns in processes grid + */ + int n_process_columns; + + /** + * Row of this process in the grid + */ + int this_process_row; + + /** + * Column of this process in the grid + */ + int this_process_column; + + /** + * A flag which is true for processes within the 2D process grid + */ + bool active; + +protected: +}; + + /** * A wrapper class around ScaLAPACK parallel dense linear algebra. * @@ -77,7 +184,6 @@ DEAL_II_NAMESPACE_OPEN * | . . . . -4.0 | . . . -4.0 * @endcode * - * Currently, only symmetric real matrices are supported. * * Here is a strong scaling example of ScaLAPACKMatrix::invert() * on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets @@ -103,25 +209,33 @@ public: * Constructor for a rectangular matrix with @p rows and @p columns, distributed * in a given @p mpi_communicator . * - * The choice of the block size @p block_size is a compromize between a large - * enough sizes for efficient local BLAS and small enough get + * The choice of the block size @p block_size is a compromise between a large + * enough sizes for efficient local BLAS and small enough to get * good parallel load balance. */ ScaLAPACKMatrix(const size_type rows, const size_type columns, - MPI_Comm mpi_communicator, + std::shared_ptr process_grid, const unsigned int block_size_row = 32, const unsigned int block_size_column = 32, const LAPACKSupport::Property property = LAPACKSupport::Property::general); + /* + * Alternative constructor having the matrix dimensions and block sizes in the std::pairs @p sizes and @p block_sizes + */ + ScaLAPACKMatrix(const std::pair &sizes, + std::shared_ptr process_grid, + const std::pair &block_sizes = std::make_pair(32,32), + const LAPACKSupport::Property property = LAPACKSupport::Property::general); + /** * Constructor for a square matrix of size @p size, distributed - * in a given @p mpi_communicator . + * using the process grid in @p process_grid. * * The choice of the block size @p block_size is a compromize between a large * enough sizes for efficient local BLAS and small enough get * good parallel load balance. */ ScaLAPACKMatrix(const size_type size, - MPI_Comm mpi_communicator, + std::shared_ptr process_grid, const unsigned int block_size = 32); /** @@ -268,14 +382,9 @@ private: LAPACKSupport::Property properties; /** - * MPI communicator with all processes + * Shared pointer to ProcessGrid object which contains Blacs context and MPI communicator as well as other necessary data structures */ - MPI_Comm mpi_communicator; - - /** - * MPI communicator with inactive processes and the root - */ - MPI_Comm mpi_communicator_inactive_with_root; + std::shared_ptr grid; /** * Number of rows in the matrix @@ -297,41 +406,6 @@ private: */ int column_block_size; - /** - * BLACS context - */ - int blacs_context; - - /** - * ID of this MPI process - */ - int this_mpi_process; - - /** - * Total number of MPI processes - */ - int n_mpi_processes; - - /** - * Number of rows in processes grid - */ - int n_process_rows; - - /** - * Number of columns in processes grid - */ - int n_process_columns; - - /** - * Row of this process in the grid - */ - int this_process_row; - - /** - * Column of this process in the grid - */ - int this_process_column; - /** * Number of rows in the matrix owned by the current process */ @@ -357,11 +431,6 @@ private: */ mutable std::vector iwork; - /** - * A flag which is true for processes within the 2D process grid - */ - bool active; - /** * A character to define where elements are stored in case * ScaLAPACK operation support this. diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index 9208dd7c54..f9ccbc0526 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -235,17 +235,17 @@ extern "C" */ void pdsyev_(const char *jobz, const char *uplo, const int *m, double *A, const int *ia, const int *ja, int *desca, double *w, double *z, const int *iz, const int *jz, int *descz, double *work, const int *lwork, int *info); +} -} DEAL_II_NAMESPACE_OPEN namespace { /** - * Internal function to create processor grid based on the total - * number of cores and MPI size. See + * Internal function to determine dimension of process grid based on the total + * number of cores, matrix dimensions and matrix block sizes * * Amesos heuristics: * https://github.com/trilinos/Trilinos/blob/master/packages/amesos/src/Amesos_Scalapack.cpp#L142-L166 @@ -254,8 +254,8 @@ namespace * https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91 */ inline - void choose_the_processor_grid(int &n_process_rows, int &n_process_columns, const unsigned int m, const unsigned int n, - const unsigned int block_size_m, const unsigned int block_size_n, const int n_processes) + std::pair choose_the_processor_grid(MPI_Comm mpi_comm, const unsigned int m, const unsigned int n, + const unsigned int block_size_m, const unsigned int block_size_n) { // Few notes from the ScaLAPACK user guide: // It is possible to predict the best grid shape given the number of processes available: @@ -268,6 +268,9 @@ namespace // Below we always try to create 2D processor grids: + int n_processes; + MPI_Comm_size(mpi_comm, &n_processes); + // Get the total number of cores we can occupy in a rectangular dense matrix // with rectangular blocks when every core owns only a single block: const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))* @@ -288,9 +291,9 @@ namespace // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes 2x5. // limit our estimate to be in [2, Np] - n_process_columns = std::min (Np, std::max(2, Pc)); + int n_process_columns = std::min (Np, std::max(2, Pc)); // finally, get the rows: - n_process_rows = Np / n_process_columns ; + int n_process_rows = Np / n_process_columns ; Assert (n_process_columns >=1 && n_process_rows >=1 && n_processes >= n_process_rows*n_process_columns, ExcMessage("error in process grid: "+ @@ -301,6 +304,8 @@ namespace " out of " + std::to_string(n_processes))); + return std::make_pair(n_process_rows,n_process_columns); + // For example, // 320x320 with 32x32 blocks and 16 cores: // Pc = 1.0 * Pr => 4x4 grid @@ -311,42 +316,22 @@ namespace -/** - *Constructor for a rectangular distributes Matrix - */ -template -ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type rows, const size_type columns, - MPI_Comm mpi_communicator, - const unsigned int block_size_row, const unsigned int block_size_column, - const LAPACKSupport::Property property) +ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, const std::pair &grid_dimensions) : - TransposeTable (), - state (LAPACKSupport::unusable), - properties(property), - mpi_communicator(mpi_communicator), - n_rows(rows), - n_columns(columns), - row_block_size(block_size_row), - column_block_size(block_size_column), - uplo('L'), // for non-symmetric matrices this is not needed - first_process_row(0), - first_process_column(0), - submatrix_row(1), - submatrix_column(1) + mpi_communicator(mpi_comm), + n_process_rows(grid_dimensions.first), + n_process_columns(grid_dimensions.second) { - Assert (block_size_row > 0, - ExcMessage("Row block size has to be positive.")); - Assert (block_size_column > 0, - ExcMessage("Column block size has to be positive.")); - Assert (block_size_row <= rows, - ExcMessage("Row block size can not be greater than the number of rows of the matrix")); - Assert (block_size_column <= columns, - ExcMessage("Column block size can not be greater than the number of columns of the matrix")); + Assert (grid_dimensions.first > 0, + ExcMessage("Number of process grid rows has to be positive.")); + Assert (grid_dimensions.second > 0, + ExcMessage("Number of process grid columns has to be positive.")); MPI_Comm_size(mpi_communicator, &n_mpi_processes); MPI_Comm_rank(mpi_communicator, &this_mpi_process); - choose_the_processor_grid(n_process_rows, n_process_columns, n_rows, n_columns, row_block_size, column_block_size, n_mpi_processes); + Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes, + ExcMessage("Size of process grid is larger than number of available MPI processes.")); // processor grid order // FIXME: default to column major @@ -374,33 +359,6 @@ ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type rows, const size_ty else active = true; - if (active) - { - Assert (n_process_rows == proccols_ && proccols_ == n_process_columns, - ExcInternalError()); - - // Get local sizes: - n_local_rows = numroc_(&n_rows, &row_block_size, &this_process_row, &first_process_row, &n_process_rows); - n_local_columns = numroc_(&n_columns, &column_block_size, &this_process_column, &first_process_column, &n_process_columns); - - // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different between processes - int lda = std::max(1,n_local_rows); - - int info=0; - descinit_(descriptor, &n_rows, &n_columns, &row_block_size, &column_block_size,&first_process_row,&first_process_column,&blacs_context, &lda, &info); - AssertThrow (info==0, LAPACKSupport::ExcErrorCode("descinit_", info)); - - this->reinit(n_local_rows, n_local_columns); - } - else - { - // set process-local variables to something telling: - n_local_rows = -1; - n_local_columns = -1; - for (unsigned int i = 0; i < 9; ++i) - descriptor[i] = -1; - } - // Create an auxiliary communicator which has root and all inactive cores // Assume that inactive cores start with id=n_process_rows*n_process_columns Assert (active || this_mpi_process >= n_process_rows*n_process_columns, @@ -450,13 +408,102 @@ ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type rows, const size_ty +ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, + const std::pair &matrix_dimensions, const std::pair &block_sizes) + : + ProcessGrid(mpi_comm, + choose_the_processor_grid(mpi_comm, matrix_dimensions.first, matrix_dimensions.second, + block_sizes.first, block_sizes.second) ) +{} + + + +ProcessGrid::~ProcessGrid() +{ + if (active) + Cblacs_gridexit(blacs_context); + + MPI_Comm_free(&mpi_communicator_inactive_with_root); +} + + + +/** + *Constructor for a rectangular distributed Matrix + */ +template +ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type rows, const size_type columns, + std::shared_ptr process_grid, + const unsigned int block_size_row, const unsigned int block_size_column, + const LAPACKSupport::Property property) + : + TransposeTable (), + state (LAPACKSupport::unusable), + properties(property), + grid (process_grid), + n_rows(rows), + n_columns(columns), + row_block_size(block_size_row), + column_block_size(block_size_column), + uplo('L'), // for non-symmetric matrices this is not needed + first_process_row(0), + first_process_column(0), + submatrix_row(1), + submatrix_column(1) +{ + Assert (block_size_row > 0, + ExcMessage("Row block size has to be positive.")); + Assert (block_size_column > 0, + ExcMessage("Column block size has to be positive.")); + Assert (block_size_row <= rows, + ExcMessage("Row block size can not be greater than the number of rows of the matrix")); + Assert (block_size_column <= columns, + ExcMessage("Column block size can not be greater than the number of columns of the matrix")); + + if (grid->active) + { + // Get local sizes: + n_local_rows = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows)); + n_local_columns = numroc_(&n_columns, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns)); + + // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different between processes + int lda = std::max(1,n_local_rows); + + int info=0; + descinit_(descriptor, &n_rows, &n_columns, &row_block_size, &column_block_size,&first_process_row,&first_process_column,&(grid->blacs_context), &lda, &info); + AssertThrow (info==0, LAPACKSupport::ExcErrorCode("descinit_", info)); + + this->reinit(n_local_rows, n_local_columns); + } + else + { + // set process-local variables to something telling: + n_local_rows = -1; + n_local_columns = -1; + for (unsigned int i = 0; i < 9; ++i) + descriptor[i] = -1; + } +} + + + +template +ScaLAPACKMatrix::ScaLAPACKMatrix(const std::pair &sizes, + std::shared_ptr process_grid, + const std::pair &block_sizes, + const LAPACKSupport::Property property) +: +ScaLAPACKMatrix(sizes.first,sizes.second,process_grid,block_sizes.first,block_sizes.first,property) +{} + + + template ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type size, - MPI_Comm mpi_communicator, + std::shared_ptr process_grid, const unsigned int block_size) : - //delegating constructors only work for C++11 - ScaLAPACKMatrix (size,size,mpi_communicator,block_size,block_size,LAPACKSupport::Property::symmetric) + ScaLAPACKMatrix(size,size,process_grid,block_size,block_size,LAPACKSupport::Property::symmetric) {} @@ -473,7 +520,8 @@ ScaLAPACKMatrix::operator = (const FullMatrix &matrix) Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m())); Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n())); - if (active) + //if (active) + if (grid->active) { for (int i=0; i < n_local_rows; ++i) { @@ -536,7 +584,7 @@ ScaLAPACKMatrix::global_row(const int loc_row) const Assert (loc_row >= 0 && loc_row < n_local_rows, ExcIndexRange(loc_row,0,n_local_rows)); const int i = loc_row+1; - return indxl2g_ (&i, &row_block_size, &this_process_row, &first_process_row, &n_process_rows) - 1; + return indxl2g_ (&i, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows)) - 1; } @@ -548,7 +596,7 @@ ScaLAPACKMatrix::global_column(const int loc_column) const Assert (loc_column >= 0 && loc_column < n_local_columns, ExcIndexRange(loc_column,0,n_local_columns)); const int j = loc_column+1; - return indxl2g_ (&j, &column_block_size, &this_process_column, &first_process_column, &n_process_columns) - 1; + return indxl2g_ (&j, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns)) - 1; } @@ -564,7 +612,8 @@ ScaLAPACKMatrix::copy_to (FullMatrix &matrix) const Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m())); Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n())); - if (active) + //if (active) + if (grid->active) { matrix = 0.; for (int i=0; i < n_local_rows; ++i) @@ -577,8 +626,7 @@ ScaLAPACKMatrix::copy_to (FullMatrix &matrix) const } } } - - Utilities::MPI::sum(matrix, mpi_communicator, matrix); + Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix); // we could move the following lines under the main loop above, // but they would be dependent on glob_i and glob_j, which @@ -598,10 +646,6 @@ ScaLAPACKMatrix::copy_to (FullMatrix &matrix) const template ScaLAPACKMatrix::~ScaLAPACKMatrix() { - if (active) - Cblacs_gridexit(blacs_context); - - MPI_Comm_free(&mpi_communicator_inactive_with_root); } @@ -611,7 +655,8 @@ void ScaLAPACKMatrix::compute_cholesky_factorization() { Assert (n_columns == n_rows, ExcMessage("Cholesky factorization can be applied to SPD matrices only.")); - if (active) + + if (grid->active) { int info = 0; NumberType *A_loc = &this->values[0]; @@ -630,7 +675,7 @@ void ScaLAPACKMatrix::invert() if (state == LAPACKSupport::matrix) compute_cholesky_factorization(); - if (active) + if (grid->active) { int info = 0; /* @@ -654,10 +699,10 @@ void ScaLAPACKMatrix::eigenvalues_symmetric(std::vector Assert (properties == LAPACKSupport::symmetric, ExcMessage("Matrix has to be symmetric for this operation.")); - ScaLAPACKMatrix Z (n_mpi_processes, mpi_communicator, 1); + ScaLAPACKMatrix Z (grid->n_mpi_processes, grid, 1); ev.resize (n_rows); - if (active) + if (grid->active) { int info = 0; @@ -688,7 +733,7 @@ void ScaLAPACKMatrix::eigenvalues_symmetric(std::vector /* * send the eigenvalues to processors not being part of the process grid */ - MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, mpi_communicator_inactive_with_root); + MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, grid->mpi_communicator_inactive_with_root); /* On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A, * including the diagonal, is destroyed. Therefore, the matrix is unusable @@ -706,24 +751,15 @@ void ScaLAPACKMatrix::eigenpairs_symmetric(std::vector & Assert (properties == LAPACKSupport::symmetric, ExcMessage("Matrix has to be symmetric for this operation.")); - - /*const size_type rows, const size_type columns, - MPI_Comm mpi_communicator, - const unsigned int block_size_row, const unsigned int block_size_column, - const LAPACKSupport::Property property*/ - - ScaLAPACKMatrix eigenvectors (n_rows, n_columns, mpi_communicator, row_block_size, column_block_size, LAPACKSupport::Property::general); + ScaLAPACKMatrix eigenvectors (n_rows, n_columns, grid, row_block_size, column_block_size, LAPACKSupport::Property::general); + eigenvectors.descriptor[1]=0; ev.resize (n_rows); - - const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)); - const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)); + const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(grid->mpi_communicator)); ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); - - - if (active) + if (grid->active) { int info = 0; @@ -774,7 +810,7 @@ void ScaLAPACKMatrix::eigenpairs_symmetric(std::vector & /* * send the eigenvalues to processors not being part of the process grid */ - MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, mpi_communicator_inactive_with_root); + MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, grid->mpi_communicator_inactive_with_root); /* On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A, * including the diagonal, is destroyed. Therefore, the matrix is unusable @@ -790,7 +826,8 @@ NumberType ScaLAPACKMatrix::reciprocal_condition_number(const Number Assert (state == LAPACKSupport::cholesky, ExcMessage("Matrix has to be in Cholesky state before calling this function.")); NumberType rcond = 0.; - if (active) + + if (grid->active) { int lwork = 2 * n_local_rows + 3 * n_local_columns + column_block_size; int liwork = n_local_rows; @@ -844,17 +881,18 @@ NumberType ScaLAPACKMatrix::norm(const char type) const NumberType res = 0.; - if (active) + //if (active) + if (grid->active) { //int IROFFA = MOD( IA-1, MB_A ) //int ICOFFA = MOD( JA-1, NB_A ) - const int lcm = ilcm_(&n_process_rows, &n_process_columns); - const int v2 = lcm/n_process_rows; + const int lcm = ilcm_(&(grid->n_process_rows), &(grid->n_process_columns)); + const int v2 = lcm/(grid->n_process_rows); - const int IAROW = indxg2p_(&submatrix_row, &row_block_size, &this_process_row, &first_process_row, &n_process_rows); - const int IACOL = indxg2p_(&submatrix_column, &column_block_size, &this_process_column, &first_process_column, &n_process_columns); - const int Np0 = numroc_(&n_columns/*+IROFFA*/, &row_block_size, &this_process_row, &IAROW, &n_process_rows); - const int Nq0 = numroc_(&n_columns/*+ICOFFA*/, &column_block_size, &this_process_column, &IACOL, &n_process_columns); + const int IAROW = indxg2p_(&submatrix_row, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows)); + const int IACOL = indxg2p_(&submatrix_column, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns)); + const int Np0 = numroc_(&n_columns/*+IROFFA*/, &row_block_size, &(grid->this_process_row), &IAROW, &(grid->n_process_rows)); + const int Nq0 = numroc_(&n_columns/*+ICOFFA*/, &column_block_size, &(grid->this_process_column), &IACOL, &(grid->n_process_columns)); const int v1 = iceil_(&Np0, &row_block_size); const int ldw = (n_local_rows==n_local_columns) ? @@ -877,11 +915,11 @@ NumberType ScaLAPACKMatrix::norm(const char type) const template void ScaLAPACKMatrix::send_to_inactive(NumberType &value) const { - if (mpi_communicator_inactive_with_root != MPI_COMM_NULL) + if (grid->mpi_communicator_inactive_with_root != MPI_COMM_NULL) { MPI_Bcast(&value,1,MPI_DOUBLE, 0/*from root*/, - mpi_communicator_inactive_with_root); + grid->mpi_communicator_inactive_with_root); } } @@ -890,7 +928,7 @@ void ScaLAPACKMatrix::send_to_inactive(NumberType &value) const template int ScaLAPACKMatrix::get_process_grid_rows() const { - return n_process_rows; + return grid->n_process_rows; } @@ -898,7 +936,7 @@ int ScaLAPACKMatrix::get_process_grid_rows() const template int ScaLAPACKMatrix::get_process_grid_columns() const { - return n_process_columns; + return grid->n_process_columns; } diff --git a/tests/scalapack/scalapack_01.cc b/tests/scalapack/scalapack_01.cc index a22d255767..d6ca93fa61 100644 --- a/tests/scalapack/scalapack_01.cc +++ b/tests/scalapack/scalapack_01.cc @@ -49,9 +49,13 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_in(size), full_out(size), diff(size); - ScaLAPACKMatrix scalapack_matrix(size, + std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); + + ScaLAPACKMatrix scalapack_matrix (sizes.first, grid, block_sizes.first); + /*ScaLAPACKMatrix scalapack_matrix(size, mpi_communicator, - block_size); + block_size);*/ pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl; diff --git a/tests/scalapack/scalapack_02.cc b/tests/scalapack/scalapack_02.cc index 08668f031c..2f5d2f2c7f 100644 --- a/tests/scalapack/scalapack_02.cc +++ b/tests/scalapack/scalapack_02.cc @@ -50,9 +50,13 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_in(size), inverse(size), full_out(size), diff(size); - ScaLAPACKMatrix scalapack_matrix(size, + std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); + + ScaLAPACKMatrix scalapack_matrix (sizes.first, grid, block_sizes.first); + /*ScaLAPACKMatrix scalapack_matrix(size, mpi_communicator, - block_size); + block_size);*/ pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl; diff --git a/tests/scalapack/scalapack_03.cc b/tests/scalapack/scalapack_03.cc index 45b3bb8cb4..0899dedd7e 100644 --- a/tests/scalapack/scalapack_03.cc +++ b/tests/scalapack/scalapack_03.cc @@ -58,9 +58,13 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_in(size), inverse(size), full_out(size), diff(size), prod1(size), prod2(size), one(size); - ScaLAPACKMatrix scalapack_matrix(size, + std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); + + ScaLAPACKMatrix scalapack_matrix (sizes.first, grid, block_sizes.first); + /*ScaLAPACKMatrix scalapack_matrix(size, mpi_communicator, - block_size); + block_size);*/ pcout << size << " " << block_size << " " << scalapack_matrix.get_process_grid_rows() << " " << scalapack_matrix.get_process_grid_columns() << std::endl; diff --git a/tests/scalapack/scalapack_04.cc b/tests/scalapack/scalapack_04.cc index f8579b02e6..6921ddcc41 100644 --- a/tests/scalapack/scalapack_04.cc +++ b/tests/scalapack/scalapack_04.cc @@ -49,9 +49,13 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_A(size); - ScaLAPACKMatrix scalapack_A(size, - mpi_communicator, - block_size); + std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); + + ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first); + /*ScaLAPACKMatrix scalapack_A (size, + mpi_communicator, + block_size);*/ pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl; diff --git a/tests/scalapack/scalapack_05.cc b/tests/scalapack/scalapack_05.cc index 234eeb066c..a2c78105d8 100644 --- a/tests/scalapack/scalapack_05.cc +++ b/tests/scalapack/scalapack_05.cc @@ -49,9 +49,13 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_A(size), inv_A(size); - ScaLAPACKMatrix scalapack_A(size, - mpi_communicator, - block_size); + std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); + + ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first); + /*ScaLAPACKMatrix scalapack_A (size, + mpi_communicator, + block_size);*/ pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl; diff --git a/tests/scalapack/scalapack_06.cc b/tests/scalapack/scalapack_06.cc index 0e7671fa5a..cb9fca3838 100644 --- a/tests/scalapack/scalapack_06.cc +++ b/tests/scalapack/scalapack_06.cc @@ -58,7 +58,10 @@ void test(const unsigned int size, const unsigned int block_size) FullMatrix full_A(size); std::vector lapack_A(size*size); - ScaLAPACKMatrix scalapack_A(size, mpi_communicator, block_size); + std::pair sizes = std::make_pair(size,size), block_sizes = std::make_pair(block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,sizes,block_sizes); + + ScaLAPACKMatrix scalapack_A (sizes.first, grid, block_sizes.first); pcout << size << " " << block_size << " " << scalapack_A.get_process_grid_rows() << " " << scalapack_A.get_process_grid_columns() << std::endl; {