#include <deal.II/base/exceptions.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/lapack_support.h>
+#include <deal.II/base/mpi.h>
#include <mpi.h>
+#include <memory>
+
DEAL_II_NAMESPACE_OPEN
+
+/**
+ * Forward declaration of class ScaLAPACKMatrix
+ */
+template <typename NumberType> class ScaLAPACKMatrix;
+
+
+/**
+ * A class taking care of setting up the process grid for BLACS
+ * ScaLapack matrices will have shared pointer of this class to perform block-cyclic distribution
+ */
+class ProcessGrid
+{
+public:
+
+ /**
+ * Declare class ScaLAPACK as friend to provide access to private members, e.g. the MPI Communicator
+ */
+ template <typename NumberType> friend class ScaLAPACKMatrix;
+
+ /*
+ * Constructor for a process grid for a given @p mpi_communicator
+ * The pair @p grid_dimensions contains the user-defined numbers of process rows and columns
+ */
+ ProcessGrid(MPI_Comm mpi_communicator, const std::pair<int,int> &grid_dimensions);
+
+ /*
+ * Constructor for a process grid for a given @p mpi_communicator
+ * The process grid is built based on the dimension and blockcyclic distribution of a target matrix described by @p matrix_dimensions and @p block_sizes
+ */
+ ProcessGrid(MPI_Comm mpi_communicator,
+ const std::pair<int,int> &matrix_dimensions, const std::pair<int,int> &block_sizes);
+
+ /**
+ * Destructor
+ */
+ virtual ~ProcessGrid();
+
+ /**
+ * Return the number of rows in processes grid.
+ */
+ int get_process_grid_rows() const;
+
+ /**
+ * Return the number of columns in processes grid.
+ */
+ int get_process_grid_columns() const;
+
+private:
+
+ /**
+ * MPI communicator with all processes
+ */
+ MPI_Comm mpi_communicator;
+
+ /**
+ * MPI communicator with inactive processes and the root
+ */
+ MPI_Comm mpi_communicator_inactive_with_root;
+
+ /**
+ * BLACS context
+ */
+ int blacs_context;
+
+ /**
+ * ID of this MPI process
+ */
+ int this_mpi_process;
+
+ /**
+ * Total number of MPI processes
+ */
+ int n_mpi_processes;
+
+ /**
+ * Number of rows in processes grid
+ */
+ int n_process_rows;
+
+ /**
+ * Number of columns in processes grid
+ */
+ int n_process_columns;
+
+ /**
+ * Row of this process in the grid
+ */
+ int this_process_row;
+
+ /**
+ * Column of this process in the grid
+ */
+ int this_process_column;
+
+ /**
+ * A flag which is true for processes within the 2D process grid
+ */
+ bool active;
+
+protected:
+};
+
+
/**
* A wrapper class around ScaLAPACK parallel dense linear algebra.
*
* | . . . . -4.0 | . . . -4.0
* @endcode
*
- * Currently, only symmetric real matrices are supported.
*
* Here is a strong scaling example of ScaLAPACKMatrix::invert()
* on up to 5 nodes each composed of two Intel Xeon 2660v2 IvyBridge sockets
* Constructor for a rectangular matrix with @p rows and @p columns, distributed
* in a given @p mpi_communicator .
*
- * The choice of the block size @p block_size is a compromize between a large
- * enough sizes for efficient local BLAS and small enough get
+ * The choice of the block size @p block_size is a compromise between a large
+ * enough sizes for efficient local BLAS and small enough to get
* good parallel load balance.
*/
ScaLAPACKMatrix(const size_type rows, const size_type columns,
- MPI_Comm mpi_communicator,
+ std::shared_ptr<ProcessGrid> process_grid,
const unsigned int block_size_row = 32, const unsigned int block_size_column = 32,
const LAPACKSupport::Property property = LAPACKSupport::Property::general);
+ /*
+ * Alternative constructor having the matrix dimensions and block sizes in the std::pairs @p sizes and @p block_sizes
+ */
+ ScaLAPACKMatrix(const std::pair<size_type,size_type> &sizes,
+ std::shared_ptr<ProcessGrid> process_grid,
+ const std::pair<size_type,size_type> &block_sizes = std::make_pair(32,32),
+ const LAPACKSupport::Property property = LAPACKSupport::Property::general);
+
/**
* Constructor for a square matrix of size @p size, distributed
- * in a given @p mpi_communicator .
+ * using the process grid in @p process_grid.
*
* The choice of the block size @p block_size is a compromize between a large
* enough sizes for efficient local BLAS and small enough get
* good parallel load balance.
*/
ScaLAPACKMatrix(const size_type size,
- MPI_Comm mpi_communicator,
+ std::shared_ptr<ProcessGrid> process_grid,
const unsigned int block_size = 32);
/**
LAPACKSupport::Property properties;
/**
- * MPI communicator with all processes
+ * Shared pointer to ProcessGrid object which contains Blacs context and MPI communicator as well as other necessary data structures
*/
- MPI_Comm mpi_communicator;
-
- /**
- * MPI communicator with inactive processes and the root
- */
- MPI_Comm mpi_communicator_inactive_with_root;
+ std::shared_ptr<ProcessGrid> grid;
/**
* Number of rows in the matrix
*/
int column_block_size;
- /**
- * BLACS context
- */
- int blacs_context;
-
- /**
- * ID of this MPI process
- */
- int this_mpi_process;
-
- /**
- * Total number of MPI processes
- */
- int n_mpi_processes;
-
- /**
- * Number of rows in processes grid
- */
- int n_process_rows;
-
- /**
- * Number of columns in processes grid
- */
- int n_process_columns;
-
- /**
- * Row of this process in the grid
- */
- int this_process_row;
-
- /**
- * Column of this process in the grid
- */
- int this_process_column;
-
/**
* Number of rows in the matrix owned by the current process
*/
*/
mutable std::vector<int> iwork;
- /**
- * A flag which is true for processes within the 2D process grid
- */
- bool active;
-
/**
* A character to define where elements are stored in case
* ScaLAPACK operation support this.
*/
void pdsyev_(const char *jobz, const char *uplo, const int *m, double *A, const int *ia, const int *ja, int *desca, double *w,
double *z, const int *iz, const int *jz, int *descz, double *work, const int *lwork, int *info);
+}
-}
DEAL_II_NAMESPACE_OPEN
namespace
{
/**
- * Internal function to create processor grid based on the total
- * number of cores and MPI size. See
+ * Internal function to determine dimension of process grid based on the total
+ * number of cores, matrix dimensions and matrix block sizes
*
* Amesos heuristics:
* https://github.com/trilinos/Trilinos/blob/master/packages/amesos/src/Amesos_Scalapack.cpp#L142-L166
* https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91
*/
inline
- void choose_the_processor_grid(int &n_process_rows, int &n_process_columns, const unsigned int m, const unsigned int n,
- const unsigned int block_size_m, const unsigned int block_size_n, const int n_processes)
+ std::pair<int,int> choose_the_processor_grid(MPI_Comm mpi_comm, const unsigned int m, const unsigned int n,
+ const unsigned int block_size_m, const unsigned int block_size_n)
{
// Few notes from the ScaLAPACK user guide:
// It is possible to predict the best grid shape given the number of processes available:
// Below we always try to create 2D processor grids:
+ int n_processes;
+ MPI_Comm_size(mpi_comm, &n_processes);
+
// Get the total number of cores we can occupy in a rectangular dense matrix
// with rectangular blocks when every core owns only a single block:
const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))*
// but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes 2x5.
// limit our estimate to be in [2, Np]
- n_process_columns = std::min (Np, std::max(2, Pc));
+ int n_process_columns = std::min (Np, std::max(2, Pc));
// finally, get the rows:
- n_process_rows = Np / n_process_columns ;
+ int n_process_rows = Np / n_process_columns ;
Assert (n_process_columns >=1 && n_process_rows >=1 && n_processes >= n_process_rows*n_process_columns,
ExcMessage("error in process grid: "+
" out of " +
std::to_string(n_processes)));
+ return std::make_pair(n_process_rows,n_process_columns);
+
// For example,
// 320x320 with 32x32 blocks and 16 cores:
// Pc = 1.0 * Pr => 4x4 grid
-/**
- *Constructor for a rectangular distributes Matrix
- */
-template <typename NumberType>
-ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type rows, const size_type columns,
- MPI_Comm mpi_communicator,
- const unsigned int block_size_row, const unsigned int block_size_column,
- const LAPACKSupport::Property property)
+ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, const std::pair<int,int> &grid_dimensions)
:
- TransposeTable<NumberType> (),
- state (LAPACKSupport::unusable),
- properties(property),
- mpi_communicator(mpi_communicator),
- n_rows(rows),
- n_columns(columns),
- row_block_size(block_size_row),
- column_block_size(block_size_column),
- uplo('L'), // for non-symmetric matrices this is not needed
- first_process_row(0),
- first_process_column(0),
- submatrix_row(1),
- submatrix_column(1)
+ mpi_communicator(mpi_comm),
+ n_process_rows(grid_dimensions.first),
+ n_process_columns(grid_dimensions.second)
{
- Assert (block_size_row > 0,
- ExcMessage("Row block size has to be positive."));
- Assert (block_size_column > 0,
- ExcMessage("Column block size has to be positive."));
- Assert (block_size_row <= rows,
- ExcMessage("Row block size can not be greater than the number of rows of the matrix"));
- Assert (block_size_column <= columns,
- ExcMessage("Column block size can not be greater than the number of columns of the matrix"));
+ Assert (grid_dimensions.first > 0,
+ ExcMessage("Number of process grid rows has to be positive."));
+ Assert (grid_dimensions.second > 0,
+ ExcMessage("Number of process grid columns has to be positive."));
MPI_Comm_size(mpi_communicator, &n_mpi_processes);
MPI_Comm_rank(mpi_communicator, &this_mpi_process);
- choose_the_processor_grid(n_process_rows, n_process_columns, n_rows, n_columns, row_block_size, column_block_size, n_mpi_processes);
+ Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes,
+ ExcMessage("Size of process grid is larger than number of available MPI processes."));
// processor grid order
// FIXME: default to column major
else
active = true;
- if (active)
- {
- Assert (n_process_rows == proccols_ && proccols_ == n_process_columns,
- ExcInternalError());
-
- // Get local sizes:
- n_local_rows = numroc_(&n_rows, &row_block_size, &this_process_row, &first_process_row, &n_process_rows);
- n_local_columns = numroc_(&n_columns, &column_block_size, &this_process_column, &first_process_column, &n_process_columns);
-
- // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different between processes
- int lda = std::max(1,n_local_rows);
-
- int info=0;
- descinit_(descriptor, &n_rows, &n_columns, &row_block_size, &column_block_size,&first_process_row,&first_process_column,&blacs_context, &lda, &info);
- AssertThrow (info==0, LAPACKSupport::ExcErrorCode("descinit_", info));
-
- this->reinit(n_local_rows, n_local_columns);
- }
- else
- {
- // set process-local variables to something telling:
- n_local_rows = -1;
- n_local_columns = -1;
- for (unsigned int i = 0; i < 9; ++i)
- descriptor[i] = -1;
- }
-
// Create an auxiliary communicator which has root and all inactive cores
// Assume that inactive cores start with id=n_process_rows*n_process_columns
Assert (active || this_mpi_process >= n_process_rows*n_process_columns,
+ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
+ const std::pair<int,int> &matrix_dimensions, const std::pair<int,int> &block_sizes)
+ :
+ ProcessGrid(mpi_comm,
+ choose_the_processor_grid(mpi_comm, matrix_dimensions.first, matrix_dimensions.second,
+ block_sizes.first, block_sizes.second) )
+{}
+
+
+
+ProcessGrid::~ProcessGrid()
+{
+ if (active)
+ Cblacs_gridexit(blacs_context);
+
+ MPI_Comm_free(&mpi_communicator_inactive_with_root);
+}
+
+
+
+/**
+ *Constructor for a rectangular distributed Matrix
+ */
+template <typename NumberType>
+ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type rows, const size_type columns,
+ std::shared_ptr<ProcessGrid> process_grid,
+ const unsigned int block_size_row, const unsigned int block_size_column,
+ const LAPACKSupport::Property property)
+ :
+ TransposeTable<NumberType> (),
+ state (LAPACKSupport::unusable),
+ properties(property),
+ grid (process_grid),
+ n_rows(rows),
+ n_columns(columns),
+ row_block_size(block_size_row),
+ column_block_size(block_size_column),
+ uplo('L'), // for non-symmetric matrices this is not needed
+ first_process_row(0),
+ first_process_column(0),
+ submatrix_row(1),
+ submatrix_column(1)
+{
+ Assert (block_size_row > 0,
+ ExcMessage("Row block size has to be positive."));
+ Assert (block_size_column > 0,
+ ExcMessage("Column block size has to be positive."));
+ Assert (block_size_row <= rows,
+ ExcMessage("Row block size can not be greater than the number of rows of the matrix"));
+ Assert (block_size_column <= columns,
+ ExcMessage("Column block size can not be greater than the number of columns of the matrix"));
+
+ if (grid->active)
+ {
+ // Get local sizes:
+ n_local_rows = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
+ n_local_columns = numroc_(&n_columns, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns));
+
+ // LLD_A = MAX(1,NUMROC(M_A, MB_A, MYROW, RSRC_A, NPROW)), different between processes
+ int lda = std::max(1,n_local_rows);
+
+ int info=0;
+ descinit_(descriptor, &n_rows, &n_columns, &row_block_size, &column_block_size,&first_process_row,&first_process_column,&(grid->blacs_context), &lda, &info);
+ AssertThrow (info==0, LAPACKSupport::ExcErrorCode("descinit_", info));
+
+ this->reinit(n_local_rows, n_local_columns);
+ }
+ else
+ {
+ // set process-local variables to something telling:
+ n_local_rows = -1;
+ n_local_columns = -1;
+ for (unsigned int i = 0; i < 9; ++i)
+ descriptor[i] = -1;
+ }
+}
+
+
+
+template <typename NumberType>
+ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const std::pair<size_type,size_type> &sizes,
+ std::shared_ptr<ProcessGrid> process_grid,
+ const std::pair<size_type,size_type> &block_sizes,
+ const LAPACKSupport::Property property)
+:
+ScaLAPACKMatrix<NumberType>(sizes.first,sizes.second,process_grid,block_sizes.first,block_sizes.first,property)
+{}
+
+
+
template <typename NumberType>
ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type size,
- MPI_Comm mpi_communicator,
+ std::shared_ptr<ProcessGrid> process_grid,
const unsigned int block_size)
:
- //delegating constructors only work for C++11
- ScaLAPACKMatrix<NumberType> (size,size,mpi_communicator,block_size,block_size,LAPACKSupport::Property::symmetric)
+ ScaLAPACKMatrix<NumberType>(size,size,process_grid,block_size,block_size,LAPACKSupport::Property::symmetric)
{}
Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n()));
- if (active)
+ //if (active)
+ if (grid->active)
{
for (int i=0; i < n_local_rows; ++i)
{
Assert (loc_row >= 0 && loc_row < n_local_rows,
ExcIndexRange(loc_row,0,n_local_rows));
const int i = loc_row+1;
- return indxl2g_ (&i, &row_block_size, &this_process_row, &first_process_row, &n_process_rows) - 1;
+ return indxl2g_ (&i, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows)) - 1;
}
Assert (loc_column >= 0 && loc_column < n_local_columns,
ExcIndexRange(loc_column,0,n_local_columns));
const int j = loc_column+1;
- return indxl2g_ (&j, &column_block_size, &this_process_column, &first_process_column, &n_process_columns) - 1;
+ return indxl2g_ (&j, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns)) - 1;
}
Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n()));
- if (active)
+ //if (active)
+ if (grid->active)
{
matrix = 0.;
for (int i=0; i < n_local_rows; ++i)
}
}
}
-
- Utilities::MPI::sum(matrix, mpi_communicator, matrix);
+ Utilities::MPI::sum(matrix, grid->mpi_communicator, matrix);
// we could move the following lines under the main loop above,
// but they would be dependent on glob_i and glob_j, which
template <typename NumberType>
ScaLAPACKMatrix<NumberType>::~ScaLAPACKMatrix()
{
- if (active)
- Cblacs_gridexit(blacs_context);
-
- MPI_Comm_free(&mpi_communicator_inactive_with_root);
}
{
Assert (n_columns == n_rows,
ExcMessage("Cholesky factorization can be applied to SPD matrices only."));
- if (active)
+
+ if (grid->active)
{
int info = 0;
NumberType *A_loc = &this->values[0];
if (state == LAPACKSupport::matrix)
compute_cholesky_factorization();
- if (active)
+ if (grid->active)
{
int info = 0;
/*
Assert (properties == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
- ScaLAPACKMatrix<NumberType> Z (n_mpi_processes, mpi_communicator, 1);
+ ScaLAPACKMatrix<NumberType> Z (grid->n_mpi_processes, grid, 1);
ev.resize (n_rows);
- if (active)
+ if (grid->active)
{
int info = 0;
/*
* send the eigenvalues to processors not being part of the process grid
*/
- MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, mpi_communicator_inactive_with_root);
+ MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, grid->mpi_communicator_inactive_with_root);
/* On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A,
* including the diagonal, is destroyed. Therefore, the matrix is unusable
Assert (properties == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
-
- /*const size_type rows, const size_type columns,
- MPI_Comm mpi_communicator,
- const unsigned int block_size_row, const unsigned int block_size_column,
- const LAPACKSupport::Property property*/
-
- ScaLAPACKMatrix<NumberType> eigenvectors (n_rows, n_columns, mpi_communicator, row_block_size, column_block_size, LAPACKSupport::Property::general);
+ ScaLAPACKMatrix<NumberType> eigenvectors (n_rows, n_columns, grid, row_block_size, column_block_size, LAPACKSupport::Property::general);
+ eigenvectors.descriptor[1]=0;
ev.resize (n_rows);
-
- const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
- const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(grid->mpi_communicator));
ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
-
-
- if (active)
+ if (grid->active)
{
int info = 0;
/*
* send the eigenvalues to processors not being part of the process grid
*/
- MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, mpi_communicator_inactive_with_root);
+ MPI_Bcast(&ev.front(),ev.size(),MPI_DOUBLE, 0/*from root*/, grid->mpi_communicator_inactive_with_root);
/* On exit, the lower triangle (if uplo='L') or the upper triangle (if uplo='U') of A,
* including the diagonal, is destroyed. Therefore, the matrix is unusable
Assert (state == LAPACKSupport::cholesky,
ExcMessage("Matrix has to be in Cholesky state before calling this function."));
NumberType rcond = 0.;
- if (active)
+
+ if (grid->active)
{
int lwork = 2 * n_local_rows + 3 * n_local_columns + column_block_size;
int liwork = n_local_rows;
NumberType res = 0.;
- if (active)
+ //if (active)
+ if (grid->active)
{
//int IROFFA = MOD( IA-1, MB_A )
//int ICOFFA = MOD( JA-1, NB_A )
- const int lcm = ilcm_(&n_process_rows, &n_process_columns);
- const int v2 = lcm/n_process_rows;
+ const int lcm = ilcm_(&(grid->n_process_rows), &(grid->n_process_columns));
+ const int v2 = lcm/(grid->n_process_rows);
- const int IAROW = indxg2p_(&submatrix_row, &row_block_size, &this_process_row, &first_process_row, &n_process_rows);
- const int IACOL = indxg2p_(&submatrix_column, &column_block_size, &this_process_column, &first_process_column, &n_process_columns);
- const int Np0 = numroc_(&n_columns/*+IROFFA*/, &row_block_size, &this_process_row, &IAROW, &n_process_rows);
- const int Nq0 = numroc_(&n_columns/*+ICOFFA*/, &column_block_size, &this_process_column, &IACOL, &n_process_columns);
+ const int IAROW = indxg2p_(&submatrix_row, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
+ const int IACOL = indxg2p_(&submatrix_column, &column_block_size, &(grid->this_process_column), &first_process_column, &(grid->n_process_columns));
+ const int Np0 = numroc_(&n_columns/*+IROFFA*/, &row_block_size, &(grid->this_process_row), &IAROW, &(grid->n_process_rows));
+ const int Nq0 = numroc_(&n_columns/*+ICOFFA*/, &column_block_size, &(grid->this_process_column), &IACOL, &(grid->n_process_columns));
const int v1 = iceil_(&Np0, &row_block_size);
const int ldw = (n_local_rows==n_local_columns) ?
template <typename NumberType>
void ScaLAPACKMatrix<NumberType>::send_to_inactive(NumberType &value) const
{
- if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
+ if (grid->mpi_communicator_inactive_with_root != MPI_COMM_NULL)
{
MPI_Bcast(&value,1,MPI_DOUBLE,
0/*from root*/,
- mpi_communicator_inactive_with_root);
+ grid->mpi_communicator_inactive_with_root);
}
}
template <typename NumberType>
int ScaLAPACKMatrix<NumberType>::get_process_grid_rows() const
{
- return n_process_rows;
+ return grid->n_process_rows;
}
template <typename NumberType>
int ScaLAPACKMatrix<NumberType>::get_process_grid_columns() const
{
- return n_process_columns;
+ return grid->n_process_columns;
}