#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/lapack_support.h>
#include <deal.II/base/mpi.h>
+#include <deal.II/base/thread_management.h>
#include <mpi.h>
#include <memory>
/**
* A class taking care of setting up a two-dimensional processor grid.
* For example an MPI communicator with 5 processes can be arranged into a
- * 2x2 grid with 5-th processor being inactive:
+ * 2x2 grid with the 5-th processor being inactive:
* @code
* | 0 | 1
* -----| ------- |-----
* Note that this class allows to setup a process grid which has fewer
* MPI cores than the total number of cores in the communicator.
*
+ * Currently the only place where one would use a ProcessGrid object is
+ * in connection with a ScaLAPACKMatrix object.
+ *
* @author Benjamin Brands, 2017
*/
class ProcessGrid
public:
/**
- * Declare class ScaLAPACK as friend to provide access to private members, e.g. the MPI Communicator
+ * Declare class ScaLAPACK as friend to provide access to private members.
*/
template <typename NumberType> friend class ScaLAPACKMatrix;
/**
- * Constructor for a process grid for a given @p mpi_communicator .
- * The pair @p grid_dimensions contains the user-defined numbers of process rows and columns.
- * Their product should be less or equal to the total number of cores
+ * Constructor for a process grid with @p n_rows and @p n_columns for a given @p mpi_communicator.
+ * The product of rows and columns should be less or equal to the total number of cores
* in the @p mpi_communicator.
*/
ProcessGrid(MPI_Comm mpi_communicator,
- const std::pair<unsigned int,unsigned int> &grid_dimensions);
+ const unsigned int n_rows,
+ const unsigned int n_columns);
/**
* Constructor for a process grid for a given @p mpi_communicator.
* In this case the process grid is heuristically chosen based on the
* dimensions and block-cyclic distribution of a target matrix provided
- * in @p matrix_dimensions and @p block_sizes.
+ * in @p n_rows_matrix, @p n_columns_matrix, @p row_block_size and @p column_block_size.
*
* The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$
* are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of
* processes in the @p mpi_communicator. This function then creates a 2D processor grid
* assuming the ratio between number of process row $p$ and columns $q$ to be
* equal the ratio between matrix dimensions $M$ and $N$.
+ *
+ * For example, a square matrix $640x640$ with the block size $32$
+ * and the @p mpi_communicator with 11 cores will result in the $3x3$
+ * process grid.
*/
ProcessGrid(MPI_Comm mpi_communicator,
- const std::pair<unsigned int,unsigned int> &matrix_dimensions,
- const std::pair<unsigned int,unsigned int> &block_sizes);
+ const unsigned int n_rows_matrix,
+ const unsigned int n_columns_matrix,
+ const unsigned int row_block_size,
+ const unsigned int column_block_size);
/**
* Destructor.
*/
- virtual ~ProcessGrid();
+ ~ProcessGrid();
/**
* Return the number of rows in the processes grid.
*/
unsigned int get_process_grid_columns() const;
-private:
-
/**
* Send @p count values stored consequently starting at @p value from
* the process with rank zero to processes which
void send_to_inactive(NumberType *value, const int count=1) const;
/**
- * An MPI communicator with all processes.
+ * Return <code>true</code> if the process is active within the grid.
+ */
+ bool is_process_active() const;
+
+private:
+
+ /**
+ * A private constructor which takes grid dimensions as an <code>std::pair</code>.
+ */
+ ProcessGrid(MPI_Comm mpi_communicator,
+ const std::pair<unsigned int,unsigned int> &grid_dimensions);
+
+ /**
+ * An MPI communicator with all processes (active and inactive).
*/
MPI_Comm mpi_communicator;
/**
* Row of this process in the grid.
+ *
+ * It's negative for in-active processes.
*/
int this_process_row;
/**
* Column of this process in the grid.
+ *
+ * It's negative for in-active processes.
*/
int this_process_column;
/**
* A flag which is true for processes within the 2D process grid.
*/
- bool active;
-
+ bool mpi_process_is_active;
};
* ScaLAPACK assumes that matrices are distributed according to the
* block-cyclic decomposition scheme. An $M$ by $N$ matrix is first decomposed
* into $MB$ by $NB$ blocks which are then uniformly distributed across
- * the 2D process grid $p*q \le Np$.
+ * the 2D process grid $p*q \le Np$, where $p,q$ are grid dimensions and
+ * $Np$ is the total number of processes.
*
- * For example, a global real symmetric matrix of order 9 is stored in
+ * For example, a global real symmetric matrix of size $9\times 9$ is stored in
* upper storage mode with block sizes 4 × 4:
* @code
* 0 1 2
* 1 | . . . . 0.0 | . . -4.0 0.0
* | . . . . -4.0 | . . . -4.0
* @endcode
+ * Note how processes $(0,0)$ and $(1,0)$ of the process grid store an
+ * extra column to represent the last column of the original matrix that
+ * did not fit the decomposition into $4\times 4$ sub-blocks.
*
* The choice of the block size is a compromise between a sufficiently large
- * sizes for efficient local/serial BLAS, but one that is also small enough to achieve
+ * size for efficient local/serial BLAS, but one that is also small enough to achieve
* good parallel load balance.
*
* Below we show a strong scaling example of ScaLAPACKMatrix::invert()
typedef unsigned int size_type;
/**
- * Constructor for a rectangular matrix with rows and columns provided in
- * @p sizes, and distributed using the grid @p process_grid.
+ * Constructor for a rectangular matrix with @p n_rows and @p n_cols
+ * and distributed using the grid @p process_grid.
*/
- ScaLAPACKMatrix(const std::pair<size_type,size_type> &sizes,
+ ScaLAPACKMatrix(const size_type n_rows,
+ const size_type n_columns,
const std::shared_ptr<const ProcessGrid> process_grid,
- const std::pair<size_type,size_type> &block_sizes = std::make_pair(32,32),
+ const size_type row_block_size = 32,
+ const size_type column_block_size = 32,
const LAPACKSupport::Property property = LAPACKSupport::Property::general);
/**
/**
* Destructor
*/
- virtual ~ScaLAPACKMatrix();
+ ~ScaLAPACKMatrix() = default;
/**
* Assign @p property to this matrix.
/**
* Compute the Cholesky factorization of the matrix using ScaLAPACK
- * function <code>pXpotrf</code>. The result of factorization is stored in this object.
+ * function <code>pXpotrf</code>. The result of the factorization is stored in this object.
*/
void compute_cholesky_factorization ();
/**
- * Invert the matrix by first computing Cholesky factorization and then
+ * Invert the matrix by first computing a Cholesky factorization and then
* building the actual inverse using <code>pXpotri</code>. The inverse is stored
* in this object.
*/
/**
* Compute all eigenvalues of a real symmetric matrix using <code>pXsyev</code>.
- * If successful, the computed @p eigenvalues are arranged in ascending order.
+ * If successful, the computed eigenvalues are arranged in ascending order.
+ * After this function is called, the content of the matrix is overwritten
+ * making it unusable.
*/
- void eigenvalues_symmetric (std::vector<NumberType> &eigenvalues);
+ std::vector<NumberType> eigenvalues_symmetric();
/**
* Compute all eigenpairs of a real symmetric matrix using <code>pXsyev</code>.
- * If successful, the computed @p eigenvalues are arranged in ascending order.
+ * If successful, the computed eigenvalues are arranged in ascending order.
* The eigenvectors are stored in the columns of the matrix, thereby
* overwriting the original content of the matrix.
*/
- void eigenpairs_symmetric (std::vector<NumberType> &eigenvalues);
+ std::vector<NumberType> eigenpairs_symmetric ();
/**
* Estimate the the condition number of a SPD matrix in the $l_1$-norm.
* overflow when the condition number is very large.
*
* @p a_norm must contain the $l_1$-norm of the matrix prior to calling
- * Cholesky factorization.
+ * Cholesky factorization (see l1_norm()).
*
* @note An alternative is to compute the inverse of the matrix
- * explicitly and manually constructor $k_1 = ||A||_1 ||A^{-1}||_1$.
+ * explicitly and manually construct $k_1 = ||A||_1 ||A^{-1}||_1$.
*/
NumberType reciprocal_condition_number(const NumberType a_norm) const;
*/
size_type n() const;
-private:
-
/**
* Number of local rows on this MPI processes.
*/
*/
NumberType &local_el(const int loc_row, const int loc_column);
+private:
+
/**
* Calculate the norm of a distributed dense matrix using ScaLAPACK's
* internal function.
*/
const int submatrix_column;
+ /**
+ * Thread mutex.
+ */
+ mutable Threads::Mutex mutex;
};
// ----------------------- inline functions ----------------------------
#ifdef DEAL_II_WITH_SCALAPACK
#include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi.templates.h>
#include <deal.II/base/conditional_ostream.h>
/* Basic Linear Algebra Communication Subprograms (BLACS) declarations */
// https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dinitb.htm#dinitb
- /* You call the BLACS_PINFO routine when you want to determine how many processes are available.
- * You can use this information as input into other BLACS routines that set up your process grid*/
- void Cblacs_pinfo(int *, int *);
+ /**
+ * Determine how many processes are available and the current process rank.
+ *
+ * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbpnf.htm
+ */
+ void Cblacs_pinfo(int *rank, int *nprocs);
- /*
- * You call the BLACS_GET routine when you want the values the BLACS are using for internal defaults.
- * The most common use is in retrieving a default system context for input into BLACS_GRIDINIT or BLACS_GRIDMAP.
+ /**
+ * Return internal BLACS value in @p val based on the input @p what and @p icontxt.
+ * The most common use is in retrieving a default system context (@p what = 0, @p icontxt is ignored)
+ * to be used in BLACS_GRIDINIT or BLACS_GRIDMAP.
+ *
+ * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbget.htm
*/
void Cblacs_get(int icontxt, int what, int *val);
- /*
- * You call the BLACS_GRIDINIT routine when you want to map the processes sequentially in row-major order
- * or column-major order into the process grid.
- * You must specify the same input argument values in the calls to BLACS_GRIDINIT on every process.
+ /**
+ * Map the processes sequentially in row-major or column-major order
+ * into the process grid. Input arguments must be the same on every process.
+ *
+ * On return, @p context is the integer handle to the BLACS context,
+ * whereas on entry it is a system context to be used in creating the
+ * BLACS context.
+ *
+ * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbint.htm
*/
void Cblacs_gridinit(int *context, const char *order, int grid_height, int grid_width);
- /*
- * You call the BLACS_GRIDINFO routine to obtain the process row and column index.
+ /**
+ * Return the process row and column index.
+ *
+ * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbinfo.htm
*/
- void Cblacs_gridinfo(int context, int *grid_height, int *grid_width, int *grid_row, int *grid_col);
+ void Cblacs_gridinfo(int context, int *grid_height, int *grid_width, int *grid_row, int *grid_col);
- /*
- * Given the system process number, returns the row and column coordinates in the BLACS' process grid.
+ /**
+ * Given the system process number, return the row and column coordinates in the BLACS' process grid.
*/
- void Cblacs_pcoord(int, int, int *, int *);
+ void Cblacs_pcoord(int ictxt, int pnum, int *prow, int *pcol);
- /*
- * You call the BLACS_GRIDEXIT routine to release a BLACS context.
+ /**
+ * Release a BLACS context.
*/
void Cblacs_gridexit(int context);
- /*
- * This routines holds up execution of all processes within the indicated scope until they have all called the routine.
+ /**
+ * This routines holds up execution of all processes within the indicated
+ * scope until they have all called the routine.
*/
void Cblacs_barrier(int, const char *);
- /*
- * Frees all BLACS contexts and releases all allocated memory.
+ /**
+ * Free all BLACS contexts and releases all allocated memory.
*/
void Cblacs_exit(int error_code);
- /*
- * This routine takes the indicated general rectangular matrix and sends it to the destination process in the process grid.
- * Return from the routine indicates that the buffer may be reused. The routine is locally-blocking, that is,
- * it will return even if the corresponding receive is not posted.
+ /**
+ * Receives a message from a process @prsrc, @p csrc into a general rectangular matrix.
+ *
+ * https://software.intel.com/en-us/mkl-developer-reference-c-gerv2d
*/
- void Cdgerv2d(int, int, int, double *, int, int, int);
+ void Cdgerv2d(int context, int M, int N, double *A, int lda, int rsrc, int csrc);
- /*
- * This routine receives a message from a process into a general rectangular matrix.
- * This routine is globally-blocking, that is, return from the routine indicates that the message has been received into the matrix.
+ /**
+ * Sends the general rectangular matrix A to the destination
+ * process @p rdest @p cdest in the process grid.
+ *
+ * https://software.intel.com/en-us/mkl-developer-reference-c-2018-beta-gesd2d
*/
- void Cdgesd2d(int, int, int, double *, int, int, int);
+ void Cdgesd2d(int context , int M, int N, double *A, int lda, int rdest, int cdest);
- /*
- *
+ /**
+ * Get BLACS context from MPI @p comm.
*/
int Csys2blacs_handle(MPI_Comm comm);
/**
- * NUMber of Rows Or Columns) -- computes how many rows and columns each process owns.
+ * Compute how many rows and columns each process owns (NUMber of Rows Or Columns).
*
* https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dnumy.htm
*/
int numroc_ (const int *n, const int *nb, const int *iproc, const int *isproc, const int *nprocs);
/**
- * Computes the Cholesky factorization of an N-by-N real
- * symmetric positive definite distributed matrix sub( A ) denoting
- * A(IA:IA+N-1, JA:JA+N-1).
- * see http://www.netlib.org/scalapack/explore-html/d5/d9e/pdpotrf_8f_source.html
+ * Compute the Cholesky factorization of an N-by-N real
+ * symmetric positive definite distributed matrix sub( A ) denoting
+ * A(IA:IA+N-1, JA:JA+N-1).
+ *
+ * http://www.netlib.org/scalapack/explore-html/d5/d9e/pdpotrf_8f_source.html
* https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotrf.htm
*/
void pdpotrf_(const char *UPLO,
int *INFO);
/**
- * Computes the inverse of a real symmetric positive definite
- * distributed matrix sub( A ) = A(IA:IA+N-1,JA:JA+N-1) using the
- * Cholesky factorization sub( A ) = U**T*U or L*L**T computed by
- * PDPOTRF.
+ * Compute the inverse of a real symmetric positive definite
+ * distributed matrix sub( A ) = A(IA:IA+N-1,JA:JA+N-1) using the
+ * Cholesky factorization sub( A ) = U**T*U or L*L**T computed by
+ * PDPOTRF.
*
- * see http://www.netlib.org/scalapack/explore-html/d2/d44/pdpotri_8f_source.html
+ * http://www.netlib.org/scalapack/explore-html/d2/d44/pdpotri_8f_source.html
* https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotri.htm
* https://software.intel.com/en-us/mkl-developer-reference-c-p-potri
*/
int *INFO);
/**
- * Estimates the reciprocal of the condition number (in the
- * 1-norm) of a real symmetric positive definite distributed matrix
- * using the Cholesky factorization.
+ * Estimate the reciprocal of the condition number (in the
+ * l1-norm) of a real symmetric positive definite distributed matrix
+ * using the Cholesky factorization.
*
- * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpocon.htm#lpocon
- * http://www.netlib.org/scalapack/explore-html/d4/df7/pdpocon_8f.html
- * https://software.intel.com/en-us/mkl-developer-reference-fortran-pocon
+ * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpocon.htm#lpocon
+ * http://www.netlib.org/scalapack/explore-html/d4/df7/pdpocon_8f.html
+ * https://software.intel.com/en-us/mkl-developer-reference-fortran-pocon
*/
void pdpocon_(const char *uplo,
const int *N,
double *work);
/**
- * Computes the Least Common Multiple (LCM) of two positive integers @p M and @p N.
- * In fact the routine computes the greatest common divisor (GCD) and
- * use the fact that M*N = GCD*LCM.
+ * Compute the Least Common Multiple (LCM) of two positive integers @p M and @p N.
+ * In fact the routine Compute the greatest common divisor (GCD) and
+ * use the fact that M*N = GCD*LCM.
*
- * http://www.netlib.org/scalapack/explore-html/d0/d9b/ilcm_8f_source.html
+ * http://www.netlib.org/scalapack/explore-html/d0/d9b/ilcm_8f_source.html
*/
int ilcm_(const int *M, const int *N);
/**
- * returns the ceiling of the division of two integers.
+ * Return the ceiling of the division of two integers.
*
* http://www.netlib.org/scalapack/explore-html/df/d07/iceil_8f_source.html
*/
int iceil_(const int *i1, const int *i2);
- /*
- * DESCINIT initializes the descriptor vector with the 8 input arguments
+ /**
+ * Initialize the descriptor vector with the 8 input arguments
*/
- void descinit_ (int *desc, const int *m, const int *n, const int *mb, const int *nb, const int *irsrc, const int *icsrc, const int *ictxt, const int *lld, int *info);
+ void descinit_ (int *desc,
+ const int *m, const int *n, const int *mb, const int *nb,
+ const int *irsrc, const int *icsrc,
+ const int *ictxt, const int *lld, int *info);
/**
- * computes the global index of a distributed matrix entry
+ * Compute the global index of a distributed matrix entry
* pointed to by the local index @p indxloc of the process indicated by
* @p iproc.
*
*/
int indxl2g_ (const int *indxloc, const int *nb, const int *iproc, const int *isrcproc, const int *nprocs);
+ /**
+ * Compute the solution to a real system of linear equations
+ */
void pdgesv_(const int *n, const int *nrhs,
double *A, const int *ia, const int *ja, const int *desca,
int *ipiv,
double *B, const int *ib, const int *jb, const int *descb,
int *info);
+ /**
+ * Perform one of the matrix-matrix operations:
+ * sub( C ) := alpha*op( sub( A ) )*op( sub( B ) ) + beta*sub( C ),
+ * where
+ * sub( C ) denotes C(IC:IC+M-1,JC:JC+N-1), and, op( X ) is one of
+ * op( X ) = X or op( X ) = X'.
+ */
void pdgemm_(const char *transa, const char *transb,
const int *m, const int *n, const int *k,
const double *alpha,
const double *beta,
double *C, const int *IC, const int *JC, const int *DESCC);
- /*
- * PDLANGE returns the value of the one norm, or the Frobenius norm, or the infinity norm,
+ /**
+ * Return the value of the one norm, or the Frobenius norm, or the infinity norm,
* or the element of largest absolute value of a distributed matrix
*/
double pdlange_(char const *norm,
double *A, int const &ia, int const &ja, int *desca,
double *work);
- /*
- * INDXG2P computes the process coordinate which posseses the entry of a
+ /**
+ * Compute the process coordinate which possesses the entry of a
* distributed matrix specified by a global index
*/
int indxg2p_(const int *glob, const int *nb, const int *iproc, const int *isproc, const int *nprocs);
- /*
- * The pdsyev routine computes all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A
- * The by calling the recommended sequence of ScaLAPACK routines. In its present form, the routine assumes a homogeneous system
+ /**
+ * Compute all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A
+ * by calling the recommended sequence of ScaLAPACK routines. In its present form, the routine assumes a homogeneous system
* and makes no checks for consistency of the eigenvalues or eigenvectors across the different processes.
* Because of this, it is possible that a heterogeneous system may return incorrect results without any error messages.
*
* http://www.netlib.org/scalapack/explore-html/d0/d1a/pdsyev_8f.html
* https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lsyev.htm#lsyev
*/
- void pdsyev_(const char *jobz, const char *uplo, const int *m, double *A, const int *ia, const int *ja, int *desca, double *w,
- double *z, const int *iz, const int *jz, int *descz, double *work, const int *lwork, int *info);
+ void pdsyev_(const char *jobz, const char *uplo,
+ const int *m, double *A, const int *ia, const int *ja, int *desca,
+ double *w,
+ double *z, const int *iz, const int *jz, int *descz,
+ double *work, const int *lwork, int *info);
- /*
- * pdlacpy copies all or a part of a distributed matrix A to another
+ /**
+ * Copy all or a part of a distributed matrix A to another
* distributed matrix B. No communication is performed, pdlacpy
* performs a local copy sub(A) := sub(B), where sub(A) denotes
* A(ia:ia+m-1,ja:ja+n-1) and sub(B) denotes B(ib:ib+m-1,jb:jb+n-1)
- *
*/
- void pdlacpy_(const char *uplo, const int *m, const int *n, double *A, const int *ia, const int *ja, int *desca,
+ void pdlacpy_(const char *uplo,
+ const int *m, const int *n, double *A, const int *ia, const int *ja, int *desca,
double *B, const int *ib, const int *jb, int *descb);
}
* https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91
*/
inline
- std::pair<int,int> choose_the_processor_grid(MPI_Comm mpi_comm, const unsigned int m, const unsigned int n,
- const unsigned int block_size_m, const unsigned int block_size_n)
+ std::pair<int,int> compute_processor_grid_sizes(MPI_Comm mpi_comm, const unsigned int m, const unsigned int n,
+ const unsigned int block_size_m, const unsigned int block_size_n)
{
// Few notes from the ScaLAPACK user guide:
// It is possible to predict the best grid shape given the number of processes available:
// Np = Pc * Pc / ratio
// for quadratic matrices the ratio equals 1
const double ratio = double(n)/m;
- int Pc = std::sqrt(ratio * Np);
+ int Pc = std::floor(std::sqrt(ratio * Np));
// one could rounds up Pc to the number which has zero remainder from the division of Np
// while ( Np % Pc != 0 )
ExcMessage("Size of process grid is larger than number of available MPI processes."));
// processor grid order.
- // FIXME: default to column major?
const bool column_major = false;
// Initialize Cblas context from the provided communicator
int proccols_ = n_process_columns;
Cblacs_gridinfo( blacs_context, &procrows_, &proccols_, &this_process_row, &this_process_column );
- // If this MPI core is not on the grid, flag is as inactive and
+ // If this MPI core is not on the grid, flag it as inactive and
// skip all jobs
// FIXME: different condition is used here
// https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used
if (this_process_row < 0 || this_process_column < 0)
- active = false;
+ mpi_process_is_active = false;
else
- active = true;
+ mpi_process_is_active = true;
// Create an auxiliary communicator which has root and all inactive cores
// Assume that inactive cores start with id=n_process_rows*n_process_columns
- Assert (active || this_mpi_process >= n_process_rows*n_process_columns,
+ Assert (mpi_process_is_active || this_mpi_process >= n_process_rows*n_process_columns,
ExcInternalError());
std::vector<int> inactive_with_root_ranks;
MPI_Group inactive_with_root_group;
const int n = inactive_with_root_ranks.size();
ierr = MPI_Group_incl(all_group,
- n, &inactive_with_root_ranks[0],
+ n, inactive_with_root_ranks.data(),
&inactive_with_root_group);
AssertThrowMPI(ierr);
// Double check that the process with rank 0 in subgroup is active:
#ifdef DEBUG
- if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
- {
- int subgroup_rank = -1, subgroup_size = -1;
- MPI_Comm_rank(mpi_communicator_inactive_with_root, &subgroup_rank);
- MPI_Comm_size(mpi_communicator_inactive_with_root, &subgroup_size);
- if (subgroup_rank == 0)
- Assert (active, ExcInternalError());
- }
+ if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
+ Utilities::MPI::this_mpi_process(mpi_communicator_inactive_with_root) == 0)
+ Assert (mpi_process_is_active, ExcInternalError());
#endif
}
ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
- const std::pair<unsigned int,unsigned int> &matrix_dimensions,
- const std::pair<unsigned int,unsigned int> &block_sizes)
+ const unsigned int n_rows_matrix,
+ const unsigned int n_columns_matrix,
+ const unsigned int row_block_size,
+ const unsigned int column_block_size)
:
ProcessGrid(mpi_comm,
- choose_the_processor_grid(mpi_comm, matrix_dimensions.first, matrix_dimensions.second,
- block_sizes.first, block_sizes.second) )
+ compute_processor_grid_sizes(mpi_comm, n_rows_matrix, n_columns_matrix,
+ row_block_size, column_block_size) )
{}
+ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
+ const unsigned int n_rows,
+ const unsigned int n_columns)
+ :
+ ProcessGrid(mpi_comm,
+ std::make_pair(n_rows,n_columns))
+{}
+
+
+
+
ProcessGrid::~ProcessGrid()
{
- if (active)
+ if (mpi_process_is_active)
Cblacs_gridexit(blacs_context);
MPI_Comm_free(&mpi_communicator_inactive_with_root);
+bool ProcessGrid::is_process_active() const
+{
+ return mpi_process_is_active;
+}
+
+
+
template <typename NumberType>
void ProcessGrid::send_to_inactive(NumberType *value, const int count) const
{
if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
{
const int ierr =
- MPI_Bcast(value,count,MPI_DOUBLE,
+ MPI_Bcast(value,count,
+ Utilities::MPI::internal::mpi_type_id (value),
0/*from root*/,
mpi_communicator_inactive_with_root);
AssertThrowMPI(ierr);
-
-/**
- *Constructor for a rectangular distributed Matrix
- */
template <typename NumberType>
-ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const std::pair<size_type,size_type> &sizes,
+ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type n_rows_,
+ const size_type n_columns_,
const std::shared_ptr<const ProcessGrid> process_grid,
- const std::pair<size_type,size_type> &block_sizes,
+ const size_type row_block_size_,
+ const size_type column_block_size_,
const LAPACKSupport::Property property)
:
TransposeTable<NumberType> (),
state (LAPACKSupport::unusable),
property(property),
grid (process_grid),
- n_rows(sizes.first),
- n_columns(sizes.second),
- row_block_size(block_sizes.first),
- column_block_size(block_sizes.second),
+ n_rows(n_rows_),
+ n_columns(n_columns_),
+ row_block_size(row_block_size_),
+ column_block_size(column_block_size_),
uplo('L'), // for non-symmetric matrices this is not needed
first_process_row(0),
first_process_column(0),
Assert (column_block_size <= n_columns,
ExcMessage("Column block size can not be greater than the number of columns of the matrix"));
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
// Get local sizes:
n_local_rows = numroc_(&n_rows, &row_block_size, &(grid->this_process_row), &first_process_row, &(grid->n_process_rows));
int lda = std::max(1,n_local_rows);
int info=0;
- descinit_(descriptor, &n_rows, &n_columns, &row_block_size, &column_block_size,&first_process_row,&first_process_column,&(grid->blacs_context), &lda, &info);
+ descinit_(descriptor, &n_rows, &n_columns,
+ &row_block_size, &column_block_size,
+ &first_process_row, &first_process_column,
+ &(grid->blacs_context), &lda, &info);
AssertThrow (info==0, LAPACKSupport::ExcErrorCode("descinit_", info));
this->reinit(n_local_rows, n_local_columns);
const size_type block_size,
const LAPACKSupport::Property property)
:
- ScaLAPACKMatrix<NumberType>(std::make_pair(size,size),
+ ScaLAPACKMatrix<NumberType>(size,
+ size,
process_grid,
- std::make_pair(block_size,block_size),
+ block_size,
+ block_size,
property)
{}
ScaLAPACKMatrix<NumberType>::operator = (const FullMatrix<NumberType> &matrix)
{
// FIXME: another way to copy is to use pdgeadd_ PBLAS routine.
- // This routine computes sum of two matrices B:=a*A+b*B.
- // Matrices can have different distribution,in particular matrixA can
+ // This routine computes the sum of two matrices B:=a*A+b*B.
+ // Matrices can have different distribution,in particular matrix A can
// be owned by only one process, so we can set a=1 and b=0 to copy
// non-distributed matrix A into distributed matrix B.
Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n()));
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
for (int i=0; i < n_local_rows; ++i)
{
Assert (n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
Assert (n_columns == int(matrix.n()), ExcDimensionMismatch(n_columns, matrix.n()));
- //if (active)
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
matrix = 0.;
for (int i=0; i < n_local_rows; ++i)
-template <typename NumberType>
-ScaLAPACKMatrix<NumberType>::~ScaLAPACKMatrix()
-{
-}
-
-
-
template <typename NumberType>
void ScaLAPACKMatrix<NumberType>::compute_cholesky_factorization()
{
Assert (n_columns == n_rows,
ExcMessage("Cholesky factorization can be applied to SPD matrices only."));
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
int info = 0;
NumberType *A_loc = &this->values[0];
if (state == LAPACKSupport::matrix)
compute_cholesky_factorization();
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
int info = 0;
- /*
- * matrix Z is not distributed as it will not be referenced by the
- * ScaLapack function call
- */
- std::vector<double> Z_loc;
NumberType *A_loc = &this->values[0];
pdpotri_ (&uplo,&n_columns, A_loc, &submatrix_row, &submatrix_column, descriptor,&info);
AssertThrow (info==0, LAPACKSupport::ExcErrorCode("pdpotri", info));
template <typename NumberType>
-void ScaLAPACKMatrix<NumberType>::eigenvalues_symmetric(std::vector<NumberType> &ev)
+std::vector<NumberType> ScaLAPACKMatrix<NumberType>::eigenvalues_symmetric()
{
Assert (state == LAPACKSupport::matrix,
ExcMessage("Matrix has to be in Matrix state before calling this function."));
Assert (property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
+ Threads::Mutex::ScopedLock lock (mutex);
ScaLAPACKMatrix<NumberType> Z (grid->n_mpi_processes, grid, 1);
- ev.resize (n_rows);
+ std::vector<NumberType> ev (n_rows);
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
int info = 0;
* including the diagonal, is destroyed. Therefore, the matrix is unusable
*/
state = LAPACKSupport::unusable;
+
+ return ev;
}
template <typename NumberType>
-void ScaLAPACKMatrix<NumberType>::eigenpairs_symmetric(std::vector<NumberType> &ev)
+std::vector<NumberType> ScaLAPACKMatrix<NumberType>::eigenpairs_symmetric()
{
Assert (state == LAPACKSupport::matrix,
ExcMessage("Matrix has to be in Matrix state before calling this function."));
Assert (property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
+ Threads::Mutex::ScopedLock lock (mutex);
+
ScaLAPACKMatrix<NumberType> eigenvectors (n_rows, grid, row_block_size);
eigenvectors.property = property;
- ev.resize (n_rows);
+ std::vector<NumberType> ev(n_rows);
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
int info = 0;
*/
property = LAPACKSupport::Property::general;
state = LAPACKSupport::eigenvalues;
+
+ return ev;
}
{
Assert (state == LAPACKSupport::cholesky,
ExcMessage("Matrix has to be in Cholesky state before calling this function."));
+ Threads::Mutex::ScopedLock lock (mutex);
NumberType rcond = 0.;
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
int lwork = 2 * n_local_rows + 3 * n_local_columns + column_block_size;
int liwork = n_local_rows;
Assert (state == LAPACKSupport::matrix ||
state == LAPACKSupport::inverse_matrix,
ExcMessage("norms can be called in matrix state only."));
-
+ Threads::Mutex::ScopedLock lock (mutex);
NumberType res = 0.;
- //if (active)
- if (grid->active)
+ if (grid->mpi_process_is_active)
{
//int IROFFA = MOD( IA-1, MB_A )
//int ICOFFA = MOD( JA-1, NB_A )