From e2e4bdfac23d2d65e34efae29df3b31e827d7e56 Mon Sep 17 00:00:00 2001 From: Denis Davydov Date: Wed, 29 Nov 2017 12:03:05 +0100 Subject: [PATCH] move ProcessGrid into a stand-alone header under Utilities::MPI namespace --- include/deal.II/base/process_grid.h | 230 ++++++++++ include/deal.II/lac/scalapack.h | 175 +------- include/deal.II/lac/scalapack.templates.h | 289 +++++++++++++ source/base/CMakeLists.txt | 1 + source/base/process_grid.cc | 246 +++++++++++ source/lac/scalapack.cc | 503 +--------------------- tests/quick_tests/scalapack.cc | 2 +- tests/scalapack/scalapack_01.cc | 2 +- tests/scalapack/scalapack_02.cc | 2 +- tests/scalapack/scalapack_03.cc | 2 +- tests/scalapack/scalapack_04.cc | 2 +- tests/scalapack/scalapack_05.cc | 2 +- tests/scalapack/scalapack_06.cc | 2 +- tests/scalapack/scalapack_07.cc | 2 +- 14 files changed, 786 insertions(+), 674 deletions(-) create mode 100644 include/deal.II/base/process_grid.h create mode 100644 include/deal.II/lac/scalapack.templates.h create mode 100644 source/base/process_grid.cc diff --git a/include/deal.II/base/process_grid.h b/include/deal.II/base/process_grid.h new file mode 100644 index 0000000000..8ad9d8be9d --- /dev/null +++ b/include/deal.II/base/process_grid.h @@ -0,0 +1,230 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii_process_grid_h +#define dealii_process_grid_h + +#include +#include +#include + +#ifdef DEAL_II_WITH_SCALAPACK + +DEAL_II_NAMESPACE_OPEN + + +// Forward declaration of class ScaLAPACKMatrix for ProcessGrid +template class ScaLAPACKMatrix; + + +namespace Utilities +{ + namespace MPI + { + /** + * A class taking care of setting up a two-dimensional processor grid. + * For example an MPI communicator with 5 processes can be arranged into a + * 2x2 grid with the 5-th processor being inactive: + * @code + * | 0 | 1 + * -----| ------- |----- + * 0 | P0 | P1 + * | | + * -----| ------- |----- + * 1 | P2 | P3 + * @endcode + * + * A shared pointer to this class is provided to ScaLAPACKMatrix matrices to + * perform block-cyclic distribution. + * + * Note that this class allows to setup a process grid which has fewer + * MPI cores than the total number of cores in the communicator. + * + * Currently the only place where one would use a ProcessGrid object is + * in connection with a ScaLAPACKMatrix object. + * + * @author Benjamin Brands, Denis Davydov, 2017 + */ + class ProcessGrid + { + public: + + /** + * Declare class ScaLAPACK as friend to provide access to private members. + */ + template friend class dealii::ScaLAPACKMatrix; + + /** + * Constructor for a process grid with @p n_rows and @p n_columns for a given @p mpi_communicator. + * The product of rows and columns should be less or equal to the total number of cores + * in the @p mpi_communicator. + */ + ProcessGrid(MPI_Comm mpi_communicator, + const unsigned int n_rows, + const unsigned int n_columns); + + /** + * Constructor for a process grid for a given @p mpi_communicator. + * In this case the process grid is heuristically chosen based on the + * dimensions and block-cyclic distribution of a target matrix provided + * in @p n_rows_matrix, @p n_columns_matrix, @p row_block_size and @p column_block_size. + * + * The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ + * are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of + * processes in the @p mpi_communicator. This function then creates a 2D processor grid + * assuming the ratio between number of process row $p$ and columns $q$ to be + * equal the ratio between matrix dimensions $M$ and $N$. + * + * For example, a square matrix $640x640$ with the block size $32$ + * and the @p mpi_communicator with 11 cores will result in the $3x3$ + * process grid. + */ + ProcessGrid(MPI_Comm mpi_communicator, + const unsigned int n_rows_matrix, + const unsigned int n_columns_matrix, + const unsigned int row_block_size, + const unsigned int column_block_size); + + /** + * Destructor. + */ + ~ProcessGrid(); + + /** + * Return the number of rows in the processes grid. + */ + unsigned int get_process_grid_rows() const; + + /** + * Return the number of columns in the processes grid. + */ + unsigned int get_process_grid_columns() const; + + /** + * Send @p count values stored consequently starting at @p value from + * the process with rank zero to processes which + * are not in the process grid. + */ + template + void send_to_inactive(NumberType *value, const int count=1) const; + + /** + * Return true if the process is active within the grid. + */ + bool is_process_active() const; + + private: + + /** + * A private constructor which takes grid dimensions as an std::pair. + */ + ProcessGrid(MPI_Comm mpi_communicator, + const std::pair &grid_dimensions); + + /** + * An MPI communicator with all processes (active and inactive). + */ + MPI_Comm mpi_communicator; + + /** + * An MPI communicator with inactive processes and the process with rank zero. + */ + MPI_Comm mpi_communicator_inactive_with_root; + + /** + * BLACS context. This is equivalent to MPI communicators and is + * used by ScaLAPACK. + */ + int blacs_context; + + /** + * Rank of this MPI process. + */ + const unsigned int this_mpi_process; + + /** + * Total number of MPI processes. + */ + const unsigned int n_mpi_processes; + + /** + * Number of rows in the processes grid. + */ + int n_process_rows; + + /** + * Number of columns in the processes grid. + */ + int n_process_columns; + + /** + * Row of this process in the grid. + * + * It's negative for in-active processes. + */ + int this_process_row; + + /** + * Column of this process in the grid. + * + * It's negative for in-active processes. + */ + int this_process_column; + + /** + * A flag which is true for processes within the 2D process grid. + */ + bool mpi_process_is_active; + }; + + /*----------------------- Inline functions ----------------------------------*/ + +#ifndef DOXYGEN + + inline + unsigned int ProcessGrid::get_process_grid_rows() const + { + return n_process_rows; + } + + + + inline + unsigned int ProcessGrid::get_process_grid_columns() const + { + return n_process_columns; + } + + + + inline + bool ProcessGrid::is_process_active() const + { + return mpi_process_is_active; + } + + +#endif // ifndef DOXYGEN + + } // end of namespace MPI + +} // end of namespace Utilities + + +DEAL_II_NAMESPACE_CLOSE + +#endif // DEAL_II_WITH_SCALAPACK + +#endif diff --git a/include/deal.II/lac/scalapack.h b/include/deal.II/lac/scalapack.h index ccb8c62957..9d93b05d80 100644 --- a/include/deal.II/lac/scalapack.h +++ b/include/deal.II/lac/scalapack.h @@ -21,6 +21,7 @@ #ifdef DEAL_II_WITH_SCALAPACK #include +#include #include #include #include @@ -31,168 +32,6 @@ DEAL_II_NAMESPACE_OPEN - -// Forward declaration of class ScaLAPACKMatrix for ProcessGrid -template class ScaLAPACKMatrix; - - -/** - * A class taking care of setting up a two-dimensional processor grid. - * For example an MPI communicator with 5 processes can be arranged into a - * 2x2 grid with the 5-th processor being inactive: - * @code - * | 0 | 1 - * -----| ------- |----- - * 0 | P0 | P1 - * | | - * -----| ------- |----- - * 1 | P2 | P3 - * @endcode - * - * A shared pointer to this class is provided to ScaLAPACKMatrix matrices to - * perform block-cyclic distribution. - * - * Note that this class allows to setup a process grid which has fewer - * MPI cores than the total number of cores in the communicator. - * - * Currently the only place where one would use a ProcessGrid object is - * in connection with a ScaLAPACKMatrix object. - * - * @author Benjamin Brands, 2017 - */ -class ProcessGrid -{ -public: - - /** - * Declare class ScaLAPACK as friend to provide access to private members. - */ - template friend class ScaLAPACKMatrix; - - /** - * Constructor for a process grid with @p n_rows and @p n_columns for a given @p mpi_communicator. - * The product of rows and columns should be less or equal to the total number of cores - * in the @p mpi_communicator. - */ - ProcessGrid(MPI_Comm mpi_communicator, - const unsigned int n_rows, - const unsigned int n_columns); - - /** - * Constructor for a process grid for a given @p mpi_communicator. - * In this case the process grid is heuristically chosen based on the - * dimensions and block-cyclic distribution of a target matrix provided - * in @p n_rows_matrix, @p n_columns_matrix, @p row_block_size and @p column_block_size. - * - * The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$ - * are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of - * processes in the @p mpi_communicator. This function then creates a 2D processor grid - * assuming the ratio between number of process row $p$ and columns $q$ to be - * equal the ratio between matrix dimensions $M$ and $N$. - * - * For example, a square matrix $640x640$ with the block size $32$ - * and the @p mpi_communicator with 11 cores will result in the $3x3$ - * process grid. - */ - ProcessGrid(MPI_Comm mpi_communicator, - const unsigned int n_rows_matrix, - const unsigned int n_columns_matrix, - const unsigned int row_block_size, - const unsigned int column_block_size); - - /** - * Destructor. - */ - ~ProcessGrid(); - - /** - * Return the number of rows in the processes grid. - */ - unsigned int get_process_grid_rows() const; - - /** - * Return the number of columns in the processes grid. - */ - unsigned int get_process_grid_columns() const; - - /** - * Send @p count values stored consequently starting at @p value from - * the process with rank zero to processes which - * are not in the process grid. - */ - template - void send_to_inactive(NumberType *value, const int count=1) const; - - /** - * Return true if the process is active within the grid. - */ - bool is_process_active() const; - -private: - - /** - * A private constructor which takes grid dimensions as an std::pair. - */ - ProcessGrid(MPI_Comm mpi_communicator, - const std::pair &grid_dimensions); - - /** - * An MPI communicator with all processes (active and inactive). - */ - MPI_Comm mpi_communicator; - - /** - * An MPI communicator with inactive processes and the process with rank zero. - */ - MPI_Comm mpi_communicator_inactive_with_root; - - /** - * BLACS context. This is equivalent to MPI communicators and is - * used by ScaLAPACK. - */ - int blacs_context; - - /** - * Rank of this MPI process. - */ - const unsigned int this_mpi_process; - - /** - * Total number of MPI processes. - */ - const unsigned int n_mpi_processes; - - /** - * Number of rows in the processes grid. - */ - int n_process_rows; - - /** - * Number of columns in the processes grid. - */ - int n_process_columns; - - /** - * Row of this process in the grid. - * - * It's negative for in-active processes. - */ - int this_process_row; - - /** - * Column of this process in the grid. - * - * It's negative for in-active processes. - */ - int this_process_column; - - /** - * A flag which is true for processes within the 2D process grid. - */ - bool mpi_process_is_active; -}; - - /** * A wrapper class around ScaLAPACK parallel dense linear algebra. * @@ -278,7 +117,7 @@ public: */ ScaLAPACKMatrix(const size_type n_rows, const size_type n_columns, - const std::shared_ptr process_grid, + const std::shared_ptr process_grid, const size_type row_block_size = 32, const size_type column_block_size = 32, const LAPACKSupport::Property property = LAPACKSupport::Property::general); @@ -288,7 +127,7 @@ public: * using the process grid in @p process_grid. */ ScaLAPACKMatrix(const size_type size, - const std::shared_ptr process_grid, + const std::shared_ptr process_grid, const size_type block_size = 32, const LAPACKSupport::Property property = LAPACKSupport::Property::symmetric); @@ -439,10 +278,10 @@ private: LAPACKSupport::Property property; /** - * A shared pointer to a ProcessGrid object which contains a BLACS context + * A shared pointer to a Utilities::MPI::ProcessGrid object which contains a BLACS context * and a MPI communicator, as well as other necessary data structures. */ - std::shared_ptr grid; + std::shared_ptr grid; /** * Number of rows in the matrix. @@ -527,6 +366,8 @@ private: // ----------------------- inline functions ---------------------------- +#ifndef DOXYGEN + template inline NumberType @@ -545,6 +386,8 @@ ScaLAPACKMatrix::local_el(const int loc_row, const int loc_column) return (*this)(loc_row,loc_column); } +#endif // DOXYGEN + DEAL_II_NAMESPACE_CLOSE #endif // DEAL_II_WITH_SCALAPACK diff --git a/include/deal.II/lac/scalapack.templates.h b/include/deal.II/lac/scalapack.templates.h new file mode 100644 index 0000000000..7a7d2e4565 --- /dev/null +++ b/include/deal.II/lac/scalapack.templates.h @@ -0,0 +1,289 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#ifndef dealii_scalapack_templates_h +#define dealii_scalapack_templates_h + + +#include + +#ifdef DEAL_II_WITH_SCALAPACK + +#include +#include + +// useful examples: +// https://stackoverflow.com/questions/14147705/cholesky-decomposition-scalapack-error/14203864 +// http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=139 // second post by Julien Langou +// https://andyspiros.wordpress.com/2011/07/08/an-example-of-blacs-with-c/ +// http://qboxcode.org/trac/browser/qb/tags/rel1_63_4/src/Matrix.C +// https://gitlab.phys.ethz.ch/lwossnig/lecture/blob/a534f562dfb2ad5c564abe5c2356d5d956fb7218/examples/mpi/scalapack.cpp +// https://github.com/elemental/Elemental/blob/master/src/core/imports/scalapack.cpp +// https://scicomp.stackexchange.com/questions/7766/performance-optimization-or-tuning-possible-for-scalapack-gemm +// +// info: +// http://www.netlib.org/scalapack/slug/index.html // User guide +// http://www.netlib.org/scalapack/slug/node135.html // How to Measure Errors + +extern "C" +{ + /* Basic Linear Algebra Communication Subprograms (BLACS) declarations */ + // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dinitb.htm#dinitb + + /** + * Determine how many processes are available and the current process rank. + * + * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbpnf.htm + */ + void Cblacs_pinfo(int *rank, int *nprocs); + + /** + * Return internal BLACS value in @p val based on the input @p what and @p icontxt. + * The most common use is in retrieving a default system context (@p what = 0, @p icontxt is ignored) + * to be used in BLACS_GRIDINIT or BLACS_GRIDMAP. + * + * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbget.htm + */ + void Cblacs_get(int icontxt, int what, int *val); + + /** + * Map the processes sequentially in row-major or column-major order + * into the process grid. Input arguments must be the same on every process. + * + * On return, @p context is the integer handle to the BLACS context, + * whereas on entry it is a system context to be used in creating the + * BLACS context. + * + * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbint.htm + */ + void Cblacs_gridinit(int *context, const char *order, int grid_height, int grid_width); + + /** + * Return the process row and column index. + * + * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbinfo.htm + */ + void Cblacs_gridinfo(int context, int *grid_height, int *grid_width, int *grid_row, int *grid_col); + + /** + * Given the system process number, return the row and column coordinates in the BLACS' process grid. + */ + void Cblacs_pcoord(int ictxt, int pnum, int *prow, int *pcol); + + /** + * Release a BLACS context. + */ + void Cblacs_gridexit(int context); + + /** + * This routines holds up execution of all processes within the indicated + * scope until they have all called the routine. + */ + void Cblacs_barrier(int, const char *); + + /** + * Free all BLACS contexts and releases all allocated memory. + */ + void Cblacs_exit(int error_code); + + /** + * Receives a message from a process @prsrc, @p csrc into a general rectangular matrix. + * + * https://software.intel.com/en-us/mkl-developer-reference-c-gerv2d + */ + void Cdgerv2d(int context, int M, int N, double *A, int lda, int rsrc, int csrc); + + /** + * Sends the general rectangular matrix A to the destination + * process @p rdest @p cdest in the process grid. + * + * https://software.intel.com/en-us/mkl-developer-reference-c-2018-beta-gesd2d + */ + void Cdgesd2d(int context , int M, int N, double *A, int lda, int rdest, int cdest); + + /** + * Get BLACS context from MPI @p comm. + */ + int Csys2blacs_handle(MPI_Comm comm); + + /** + * Compute how many rows and columns each process owns (NUMber of Rows Or Columns). + * + * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dnumy.htm + */ + int numroc_ (const int *n, const int *nb, const int *iproc, const int *isproc, const int *nprocs); + + /** + * Compute the Cholesky factorization of an N-by-N real + * symmetric positive definite distributed matrix sub( A ) denoting + * A(IA:IA+N-1, JA:JA+N-1). + * + * http://www.netlib.org/scalapack/explore-html/d5/d9e/pdpotrf_8f_source.html + * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotrf.htm + */ + void pdpotrf_(const char *UPLO, + const int *N, + double *A, const int *IA, const int *JA, const int *DESCA, + int *INFO); + + /** + * Compute the inverse of a real symmetric positive definite + * distributed matrix sub( A ) = A(IA:IA+N-1,JA:JA+N-1) using the + * Cholesky factorization sub( A ) = U**T*U or L*L**T computed by + * PDPOTRF. + * + * http://www.netlib.org/scalapack/explore-html/d2/d44/pdpotri_8f_source.html + * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotri.htm + * https://software.intel.com/en-us/mkl-developer-reference-c-p-potri + */ + void pdpotri_(const char *UPLO, + const int *N, + double *A, const int *IA, const int *JA, const int *DESCA, + int *INFO); + + /** + * Estimate the reciprocal of the condition number (in the + * l1-norm) of a real symmetric positive definite distributed matrix + * using the Cholesky factorization. + * + * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpocon.htm#lpocon + * http://www.netlib.org/scalapack/explore-html/d4/df7/pdpocon_8f.html + * https://software.intel.com/en-us/mkl-developer-reference-fortran-pocon + */ + void pdpocon_(const char *uplo, + const int *N, + const double *A, const int *IA, const int *JA, const int *DESCA, + const double *ANORM, double *RCOND, + double *WORK, const int *LWORK, + int *IWORK, const int *LIWORK, + int *INFO); + + /** + * Norm of a real symmetric matrix + * + * http://www.netlib.org/scalapack/explore-html/dd/d12/pdlansy_8f_source.html + * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_pdlansy.htm#pdlansy + */ + double pdlansy_(const char *norm, + const char *uplo, + const int *N, + const double *A, const int *IA, const int *JA, const int *DESCA, + double *work); + + /** + * Compute the Least Common Multiple (LCM) of two positive integers @p M and @p N. + * In fact the routine Compute the greatest common divisor (GCD) and + * use the fact that M*N = GCD*LCM. + * + * http://www.netlib.org/scalapack/explore-html/d0/d9b/ilcm_8f_source.html + */ + int ilcm_(const int *M, const int *N); + + /** + * Return the ceiling of the division of two integers. + * + * http://www.netlib.org/scalapack/explore-html/df/d07/iceil_8f_source.html + */ + int iceil_(const int *i1, const int *i2); + + /** + * Initialize the descriptor vector with the 8 input arguments + */ + void descinit_ (int *desc, + const int *m, const int *n, const int *mb, const int *nb, + const int *irsrc, const int *icsrc, + const int *ictxt, const int *lld, int *info); + + /** + * Compute the global index of a distributed matrix entry + * pointed to by the local index @p indxloc of the process indicated by + * @p iproc. + * + * @param indxloc The local index of the distributed matrix entry. + * @param nb Block size, size of the blocks the distributed matrix is split into. + * @param iproc The coordinate of the process whose local array row or column is to be determined + * @param isrcproc The coordinate of the process that possesses the first row/column of the distributed matrix + * @param nprocs The total number processes over which the distributed matrix is distributed + */ + int indxl2g_ (const int *indxloc, const int *nb, const int *iproc, const int *isrcproc, const int *nprocs); + + /** + * Compute the solution to a real system of linear equations + */ + void pdgesv_(const int *n, const int *nrhs, + double *A, const int *ia, const int *ja, const int *desca, + int *ipiv, + double *B, const int *ib, const int *jb, const int *descb, + int *info); + + /** + * Perform one of the matrix-matrix operations: + * sub( C ) := alpha*op( sub( A ) )*op( sub( B ) ) + beta*sub( C ), + * where + * sub( C ) denotes C(IC:IC+M-1,JC:JC+N-1), and, op( X ) is one of + * op( X ) = X or op( X ) = X'. + */ + void pdgemm_(const char *transa, const char *transb, + const int *m, const int *n, const int *k, + const double *alpha, + double *A, const int *IA, const int *JA, const int *DESCA, + double *B, const int *IB, const int *JB, const int *DESCB, + const double *beta, + double *C, const int *IC, const int *JC, const int *DESCC); + + /** + * Return the value of the one norm, or the Frobenius norm, or the infinity norm, + * or the element of largest absolute value of a distributed matrix + */ + double pdlange_(char const *norm, + int const &m, int const &n, + double *A, int const &ia, int const &ja, int *desca, + double *work); + + /** + * Compute the process coordinate which possesses the entry of a + * distributed matrix specified by a global index + */ + int indxg2p_(const int *glob, const int *nb, const int *iproc, const int *isproc, const int *nprocs); + + /** + * Compute all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A + * by calling the recommended sequence of ScaLAPACK routines. In its present form, the routine assumes a homogeneous system + * and makes no checks for consistency of the eigenvalues or eigenvectors across the different processes. + * Because of this, it is possible that a heterogeneous system may return incorrect results without any error messages. + * + * http://www.netlib.org/scalapack/explore-html/d0/d1a/pdsyev_8f.html + * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lsyev.htm#lsyev + */ + void pdsyev_(const char *jobz, const char *uplo, + const int *m, double *A, const int *ia, const int *ja, int *desca, + double *w, + double *z, const int *iz, const int *jz, int *descz, + double *work, const int *lwork, int *info); + + /** + * Copy all or a part of a distributed matrix A to another + * distributed matrix B. No communication is performed, pdlacpy + * performs a local copy sub(A) := sub(B), where sub(A) denotes + * A(ia:ia+m-1,ja:ja+n-1) and sub(B) denotes B(ib:ib+m-1,jb:jb+n-1) + */ + void pdlacpy_(const char *uplo, + const int *m, const int *n, double *A, const int *ia, const int *ja, int *desca, + double *B, const int *ib, const int *jb, int *descb); +} + +#endif // DEAL_II_WITH_SCALAPACK + +#endif // dealii_scalapack_templates_h diff --git a/source/base/CMakeLists.txt b/source/base/CMakeLists.txt index 39fef19053..c140d4c722 100644 --- a/source/base/CMakeLists.txt +++ b/source/base/CMakeLists.txt @@ -64,6 +64,7 @@ SET(_unity_include_src polynomials_piecewise.cc polynomials_rannacher_turek.cc polynomials_raviart_thomas.cc + process_grid.cc quadrature.cc quadrature_lib.cc quadrature_selector.cc diff --git a/source/base/process_grid.cc b/source/base/process_grid.cc new file mode 100644 index 0000000000..5b391b8786 --- /dev/null +++ b/source/base/process_grid.cc @@ -0,0 +1,246 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include + +#ifdef DEAL_II_WITH_SCALAPACK + +#include + +DEAL_II_NAMESPACE_OPEN + +namespace +{ + /** + * Internal function to determine dimension of process grid based on the total + * number of cores, matrix dimensions and matrix block sizes + * + * Amesos heuristics: + * https://github.com/trilinos/Trilinos/blob/master/packages/amesos/src/Amesos_Scalapack.cpp#L142-L166 + * + * Elemental default grid: El::Grid::Grid(mpi_comm,...) + * https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91 + */ + inline + std::pair compute_processor_grid_sizes(MPI_Comm mpi_comm, const unsigned int m, const unsigned int n, + const unsigned int block_size_m, const unsigned int block_size_n) + { + // Few notes from the ScaLAPACK user guide: + // It is possible to predict the best grid shape given the number of processes available: + // Pr x Pc <= P + // This, however, depends on the task to be done. + // LU , QR and QL factorizations perform better for “flat” process grids (Pr < Pc ) + // For large N, Pc = 2*Pr is a good choice, whereas for small N, one should choose small Pr + // Square or near square grids are more optimal for Cholesky factorization. + // LQ and RQ factorizations take advantage of “tall” grids (Pr > Pc ) + + // Below we always try to create 2D processor grids: + + int n_processes; + MPI_Comm_size(mpi_comm, &n_processes); + + // Get the total number of cores we can occupy in a rectangular dense matrix + // with rectangular blocks when every core owns only a single block: + const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))* + int(std::ceil((1.*n)/block_size_n)); + const int Np = std::min(n_processes_heuristic, n_processes); + + // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio + // Pc =: ratio * Pr + // therefore + // Np = Pc * Pc / ratio + // for quadratic matrices the ratio equals 1 + const double ratio = double(n)/m; + int Pc = std::floor(std::sqrt(ratio * Np)); + + // one could rounds up Pc to the number which has zero remainder from the division of Np + // while ( Np % Pc != 0 ) + // ++Pc; + // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes 2x5. + + // limit our estimate to be in [2, Np] + int n_process_columns = std::min (Np, std::max(2, Pc)); + // finally, get the rows: + int n_process_rows = Np / n_process_columns ; + + Assert (n_process_columns >=1 && n_process_rows >=1 && n_processes >= n_process_rows*n_process_columns, + ExcMessage("error in process grid: "+ + std::to_string(n_process_rows)+"x"+ + std::to_string(n_process_columns)+ + "="+ + std::to_string(n_process_rows*n_process_columns)+ + " out of " + + std::to_string(n_processes))); + + return std::make_pair(n_process_rows,n_process_columns); + + // For example, + // 320x320 with 32x32 blocks and 16 cores: + // Pc = 1.0 * Pr => 4x4 grid + // Pc = 0.5 * Pr => 8x2 grid + // Pc = 2.0 * Pr => 3x5 grid + } +} + +namespace Utilities +{ + namespace MPI + { + + ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, + const std::pair &grid_dimensions) + : + mpi_communicator(mpi_comm), + this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)), + n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)), + n_process_rows(grid_dimensions.first), + n_process_columns(grid_dimensions.second) + { + Assert (grid_dimensions.first > 0, + ExcMessage("Number of process grid rows has to be positive.")); + Assert (grid_dimensions.second > 0, + ExcMessage("Number of process grid columns has to be positive.")); + + Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes, + ExcMessage("Size of process grid is larger than number of available MPI processes.")); + + // processor grid order. + const bool column_major = false; + + // Initialize Cblas context from the provided communicator + blacs_context = Csys2blacs_handle(mpi_communicator); + const char *order = ( column_major ? "Col" : "Row" ); + // FIXME: blacs_context can be modified below. Thus Cblacs2sys_handle + // may not return the same MPI communicator + Cblacs_gridinit(&blacs_context, order, n_process_rows, n_process_columns); + + // Blacs may modify the grid size on processes which are not used + // in the grid. So provide copies below: + int procrows_ = n_process_rows; + int proccols_ = n_process_columns; + Cblacs_gridinfo( blacs_context, &procrows_, &proccols_, &this_process_row, &this_process_column ); + + // If this MPI core is not on the grid, flag it as inactive and + // skip all jobs + // FIXME: different condition is used here + // https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used + if (this_process_row < 0 || this_process_column < 0) + mpi_process_is_active = false; + else + mpi_process_is_active = true; + + // Create an auxiliary communicator which has root and all inactive cores + // Assume that inactive cores start with id=n_process_rows*n_process_columns + Assert (mpi_process_is_active || this_mpi_process >= n_process_rows*n_process_columns, + ExcInternalError()); + + std::vector inactive_with_root_ranks; + inactive_with_root_ranks.push_back(0); + for (int i = n_process_rows*n_process_columns; i < n_mpi_processes; ++i) + inactive_with_root_ranks.push_back(i); + + // Get the group of processes in mpi_communicator + int ierr = 0; + MPI_Group all_group; + ierr = MPI_Comm_group(mpi_communicator, &all_group); + AssertThrowMPI(ierr); + + // Construct the group containing all ranks we need: + MPI_Group inactive_with_root_group; + const int n = inactive_with_root_ranks.size(); + ierr = MPI_Group_incl(all_group, + n, inactive_with_root_ranks.data(), + &inactive_with_root_group); + AssertThrowMPI(ierr); + + // Create the communicator based on the group + // Note that, on most cores the communicator will be MPI_COMM_NULL + // FIXME: switch to MPI_Comm_create_group for MPI-3 so that only processes within the to-be subgroup call this + ierr = MPI_Comm_create(mpi_communicator, inactive_with_root_group, + &mpi_communicator_inactive_with_root); + AssertThrowMPI(ierr); + + MPI_Group_free(&all_group); + MPI_Group_free(&inactive_with_root_group); + + // Double check that the process with rank 0 in subgroup is active: +#ifdef DEBUG + if (mpi_communicator_inactive_with_root != MPI_COMM_NULL && + Utilities::MPI::this_mpi_process(mpi_communicator_inactive_with_root) == 0) + Assert (mpi_process_is_active, ExcInternalError()); +#endif + } + + + + ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, + const unsigned int n_rows_matrix, + const unsigned int n_columns_matrix, + const unsigned int row_block_size, + const unsigned int column_block_size) + : + ProcessGrid(mpi_comm, + compute_processor_grid_sizes(mpi_comm, n_rows_matrix, n_columns_matrix, + row_block_size, column_block_size) ) + {} + + + + ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, + const unsigned int n_rows, + const unsigned int n_columns) + : + ProcessGrid(mpi_comm, + std::make_pair(n_rows,n_columns)) + {} + + + + + ProcessGrid::~ProcessGrid() + { + if (mpi_process_is_active) + Cblacs_gridexit(blacs_context); + + MPI_Comm_free(&mpi_communicator_inactive_with_root); + } + + + + template + void ProcessGrid::send_to_inactive(NumberType *value, const int count) const + { + Assert (count>0, ExcInternalError()); + if (mpi_communicator_inactive_with_root != MPI_COMM_NULL) + { + const int ierr = + MPI_Bcast(value,count, + Utilities::MPI::internal::mpi_type_id (value), + 0/*from root*/, + mpi_communicator_inactive_with_root); + AssertThrowMPI(ierr); + } + } + + } +} + +// instantiations + +template void Utilities::MPI::ProcessGrid::send_to_inactive(double *, const int) const; + +DEAL_II_NAMESPACE_CLOSE + +#endif // DEAL_II_WITH_SCALAPACK diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index 400a242f7e..4d55ec7188 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -20,510 +20,14 @@ #include #include - -#include - -// useful examples: -// https://stackoverflow.com/questions/14147705/cholesky-decomposition-scalapack-error/14203864 -// http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=139 // second post by Julien Langou -// https://andyspiros.wordpress.com/2011/07/08/an-example-of-blacs-with-c/ -// http://qboxcode.org/trac/browser/qb/tags/rel1_63_4/src/Matrix.C -// https://gitlab.phys.ethz.ch/lwossnig/lecture/blob/a534f562dfb2ad5c564abe5c2356d5d956fb7218/examples/mpi/scalapack.cpp -// https://github.com/elemental/Elemental/blob/master/src/core/imports/scalapack.cpp -// https://scicomp.stackexchange.com/questions/7766/performance-optimization-or-tuning-possible-for-scalapack-gemm -// -// info: -// http://www.netlib.org/scalapack/slug/index.html // User guide -// http://www.netlib.org/scalapack/slug/node135.html // How to Measure Errors - -extern "C" -{ - /* Basic Linear Algebra Communication Subprograms (BLACS) declarations */ - // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dinitb.htm#dinitb - - /** - * Determine how many processes are available and the current process rank. - * - * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbpnf.htm - */ - void Cblacs_pinfo(int *rank, int *nprocs); - - /** - * Return internal BLACS value in @p val based on the input @p what and @p icontxt. - * The most common use is in retrieving a default system context (@p what = 0, @p icontxt is ignored) - * to be used in BLACS_GRIDINIT or BLACS_GRIDMAP. - * - * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbget.htm - */ - void Cblacs_get(int icontxt, int what, int *val); - - /** - * Map the processes sequentially in row-major or column-major order - * into the process grid. Input arguments must be the same on every process. - * - * On return, @p context is the integer handle to the BLACS context, - * whereas on entry it is a system context to be used in creating the - * BLACS context. - * - * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbint.htm - */ - void Cblacs_gridinit(int *context, const char *order, int grid_height, int grid_width); - - /** - * Return the process row and column index. - * - * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbinfo.htm - */ - void Cblacs_gridinfo(int context, int *grid_height, int *grid_width, int *grid_row, int *grid_col); - - /** - * Given the system process number, return the row and column coordinates in the BLACS' process grid. - */ - void Cblacs_pcoord(int ictxt, int pnum, int *prow, int *pcol); - - /** - * Release a BLACS context. - */ - void Cblacs_gridexit(int context); - - /** - * This routines holds up execution of all processes within the indicated - * scope until they have all called the routine. - */ - void Cblacs_barrier(int, const char *); - - /** - * Free all BLACS contexts and releases all allocated memory. - */ - void Cblacs_exit(int error_code); - - /** - * Receives a message from a process @prsrc, @p csrc into a general rectangular matrix. - * - * https://software.intel.com/en-us/mkl-developer-reference-c-gerv2d - */ - void Cdgerv2d(int context, int M, int N, double *A, int lda, int rsrc, int csrc); - - /** - * Sends the general rectangular matrix A to the destination - * process @p rdest @p cdest in the process grid. - * - * https://software.intel.com/en-us/mkl-developer-reference-c-2018-beta-gesd2d - */ - void Cdgesd2d(int context , int M, int N, double *A, int lda, int rdest, int cdest); - - /** - * Get BLACS context from MPI @p comm. - */ - int Csys2blacs_handle(MPI_Comm comm); - - /** - * Compute how many rows and columns each process owns (NUMber of Rows Or Columns). - * - * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dnumy.htm - */ - int numroc_ (const int *n, const int *nb, const int *iproc, const int *isproc, const int *nprocs); - - /** - * Compute the Cholesky factorization of an N-by-N real - * symmetric positive definite distributed matrix sub( A ) denoting - * A(IA:IA+N-1, JA:JA+N-1). - * - * http://www.netlib.org/scalapack/explore-html/d5/d9e/pdpotrf_8f_source.html - * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotrf.htm - */ - void pdpotrf_(const char *UPLO, - const int *N, - double *A, const int *IA, const int *JA, const int *DESCA, - int *INFO); - - /** - * Compute the inverse of a real symmetric positive definite - * distributed matrix sub( A ) = A(IA:IA+N-1,JA:JA+N-1) using the - * Cholesky factorization sub( A ) = U**T*U or L*L**T computed by - * PDPOTRF. - * - * http://www.netlib.org/scalapack/explore-html/d2/d44/pdpotri_8f_source.html - * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotri.htm - * https://software.intel.com/en-us/mkl-developer-reference-c-p-potri - */ - void pdpotri_(const char *UPLO, - const int *N, - double *A, const int *IA, const int *JA, const int *DESCA, - int *INFO); - - /** - * Estimate the reciprocal of the condition number (in the - * l1-norm) of a real symmetric positive definite distributed matrix - * using the Cholesky factorization. - * - * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpocon.htm#lpocon - * http://www.netlib.org/scalapack/explore-html/d4/df7/pdpocon_8f.html - * https://software.intel.com/en-us/mkl-developer-reference-fortran-pocon - */ - void pdpocon_(const char *uplo, - const int *N, - const double *A, const int *IA, const int *JA, const int *DESCA, - const double *ANORM, double *RCOND, - double *WORK, const int *LWORK, - int *IWORK, const int *LIWORK, - int *INFO); - - /** - * Norm of a real symmetric matrix - * - * http://www.netlib.org/scalapack/explore-html/dd/d12/pdlansy_8f_source.html - * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_pdlansy.htm#pdlansy - */ - double pdlansy_(const char *norm, - const char *uplo, - const int *N, - const double *A, const int *IA, const int *JA, const int *DESCA, - double *work); - - /** - * Compute the Least Common Multiple (LCM) of two positive integers @p M and @p N. - * In fact the routine Compute the greatest common divisor (GCD) and - * use the fact that M*N = GCD*LCM. - * - * http://www.netlib.org/scalapack/explore-html/d0/d9b/ilcm_8f_source.html - */ - int ilcm_(const int *M, const int *N); - - /** - * Return the ceiling of the division of two integers. - * - * http://www.netlib.org/scalapack/explore-html/df/d07/iceil_8f_source.html - */ - int iceil_(const int *i1, const int *i2); - - /** - * Initialize the descriptor vector with the 8 input arguments - */ - void descinit_ (int *desc, - const int *m, const int *n, const int *mb, const int *nb, - const int *irsrc, const int *icsrc, - const int *ictxt, const int *lld, int *info); - - /** - * Compute the global index of a distributed matrix entry - * pointed to by the local index @p indxloc of the process indicated by - * @p iproc. - * - * @param indxloc The local index of the distributed matrix entry. - * @param nb Block size, size of the blocks the distributed matrix is split into. - * @param iproc The coordinate of the process whose local array row or column is to be determined - * @param isrcproc The coordinate of the process that possesses the first row/column of the distributed matrix - * @param nprocs The total number processes over which the distributed matrix is distributed - */ - int indxl2g_ (const int *indxloc, const int *nb, const int *iproc, const int *isrcproc, const int *nprocs); - - /** - * Compute the solution to a real system of linear equations - */ - void pdgesv_(const int *n, const int *nrhs, - double *A, const int *ia, const int *ja, const int *desca, - int *ipiv, - double *B, const int *ib, const int *jb, const int *descb, - int *info); - - /** - * Perform one of the matrix-matrix operations: - * sub( C ) := alpha*op( sub( A ) )*op( sub( B ) ) + beta*sub( C ), - * where - * sub( C ) denotes C(IC:IC+M-1,JC:JC+N-1), and, op( X ) is one of - * op( X ) = X or op( X ) = X'. - */ - void pdgemm_(const char *transa, const char *transb, - const int *m, const int *n, const int *k, - const double *alpha, - double *A, const int *IA, const int *JA, const int *DESCA, - double *B, const int *IB, const int *JB, const int *DESCB, - const double *beta, - double *C, const int *IC, const int *JC, const int *DESCC); - - /** - * Return the value of the one norm, or the Frobenius norm, or the infinity norm, - * or the element of largest absolute value of a distributed matrix - */ - double pdlange_(char const *norm, - int const &m, int const &n, - double *A, int const &ia, int const &ja, int *desca, - double *work); - - /** - * Compute the process coordinate which possesses the entry of a - * distributed matrix specified by a global index - */ - int indxg2p_(const int *glob, const int *nb, const int *iproc, const int *isproc, const int *nprocs); - - /** - * Compute all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A - * by calling the recommended sequence of ScaLAPACK routines. In its present form, the routine assumes a homogeneous system - * and makes no checks for consistency of the eigenvalues or eigenvectors across the different processes. - * Because of this, it is possible that a heterogeneous system may return incorrect results without any error messages. - * - * http://www.netlib.org/scalapack/explore-html/d0/d1a/pdsyev_8f.html - * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lsyev.htm#lsyev - */ - void pdsyev_(const char *jobz, const char *uplo, - const int *m, double *A, const int *ia, const int *ja, int *desca, - double *w, - double *z, const int *iz, const int *jz, int *descz, - double *work, const int *lwork, int *info); - - /** - * Copy all or a part of a distributed matrix A to another - * distributed matrix B. No communication is performed, pdlacpy - * performs a local copy sub(A) := sub(B), where sub(A) denotes - * A(ia:ia+m-1,ja:ja+n-1) and sub(B) denotes B(ib:ib+m-1,jb:jb+n-1) - */ - void pdlacpy_(const char *uplo, - const int *m, const int *n, double *A, const int *ia, const int *ja, int *desca, - double *B, const int *ib, const int *jb, int *descb); -} - - +#include DEAL_II_NAMESPACE_OPEN -namespace -{ - /** - * Internal function to determine dimension of process grid based on the total - * number of cores, matrix dimensions and matrix block sizes - * - * Amesos heuristics: - * https://github.com/trilinos/Trilinos/blob/master/packages/amesos/src/Amesos_Scalapack.cpp#L142-L166 - * - * Elemental default grid: El::Grid::Grid(mpi_comm,...) - * https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91 - */ - inline - std::pair compute_processor_grid_sizes(MPI_Comm mpi_comm, const unsigned int m, const unsigned int n, - const unsigned int block_size_m, const unsigned int block_size_n) - { - // Few notes from the ScaLAPACK user guide: - // It is possible to predict the best grid shape given the number of processes available: - // Pr x Pc <= P - // This, however, depends on the task to be done. - // LU , QR and QL factorizations perform better for “flat” process grids (Pr < Pc ) - // For large N, Pc = 2*Pr is a good choice, whereas for small N, one should choose small Pr - // Square or near square grids are more optimal for Cholesky factorization. - // LQ and RQ factorizations take advantage of “tall” grids (Pr > Pc ) - - // Below we always try to create 2D processor grids: - - int n_processes; - MPI_Comm_size(mpi_comm, &n_processes); - - // Get the total number of cores we can occupy in a rectangular dense matrix - // with rectangular blocks when every core owns only a single block: - const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))* - int(std::ceil((1.*n)/block_size_n)); - const int Np = std::min(n_processes_heuristic, n_processes); - - // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio - // Pc =: ratio * Pr - // therefore - // Np = Pc * Pc / ratio - // for quadratic matrices the ratio equals 1 - const double ratio = double(n)/m; - int Pc = std::floor(std::sqrt(ratio * Np)); - - // one could rounds up Pc to the number which has zero remainder from the division of Np - // while ( Np % Pc != 0 ) - // ++Pc; - // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes 2x5. - - // limit our estimate to be in [2, Np] - int n_process_columns = std::min (Np, std::max(2, Pc)); - // finally, get the rows: - int n_process_rows = Np / n_process_columns ; - - Assert (n_process_columns >=1 && n_process_rows >=1 && n_processes >= n_process_rows*n_process_columns, - ExcMessage("error in process grid: "+ - std::to_string(n_process_rows)+"x"+ - std::to_string(n_process_columns)+ - "="+ - std::to_string(n_process_rows*n_process_columns)+ - " out of " + - std::to_string(n_processes))); - - return std::make_pair(n_process_rows,n_process_columns); - - // For example, - // 320x320 with 32x32 blocks and 16 cores: - // Pc = 1.0 * Pr => 4x4 grid - // Pc = 0.5 * Pr => 8x2 grid - // Pc = 2.0 * Pr => 3x5 grid - } -} - - - -ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, - const std::pair &grid_dimensions) - : - mpi_communicator(mpi_comm), - this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)), - n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)), - n_process_rows(grid_dimensions.first), - n_process_columns(grid_dimensions.second) -{ - Assert (grid_dimensions.first > 0, - ExcMessage("Number of process grid rows has to be positive.")); - Assert (grid_dimensions.second > 0, - ExcMessage("Number of process grid columns has to be positive.")); - - Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes, - ExcMessage("Size of process grid is larger than number of available MPI processes.")); - - // processor grid order. - const bool column_major = false; - - // Initialize Cblas context from the provided communicator - blacs_context = Csys2blacs_handle(mpi_communicator); - const char *order = ( column_major ? "Col" : "Row" ); - // FIXME: blacs_context can be modified below. Thus Cblacs2sys_handle - // may not return the same MPI communicator - Cblacs_gridinit(&blacs_context, order, n_process_rows, n_process_columns); - - // Blacs may modify the grid size on processes which are not used - // in the grid. So provide copies below: - int procrows_ = n_process_rows; - int proccols_ = n_process_columns; - Cblacs_gridinfo( blacs_context, &procrows_, &proccols_, &this_process_row, &this_process_column ); - - // If this MPI core is not on the grid, flag it as inactive and - // skip all jobs - // FIXME: different condition is used here - // https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used - if (this_process_row < 0 || this_process_column < 0) - mpi_process_is_active = false; - else - mpi_process_is_active = true; - - // Create an auxiliary communicator which has root and all inactive cores - // Assume that inactive cores start with id=n_process_rows*n_process_columns - Assert (mpi_process_is_active || this_mpi_process >= n_process_rows*n_process_columns, - ExcInternalError()); - - std::vector inactive_with_root_ranks; - inactive_with_root_ranks.push_back(0); - for (int i = n_process_rows*n_process_columns; i < n_mpi_processes; ++i) - inactive_with_root_ranks.push_back(i); - - // Get the group of processes in mpi_communicator - int ierr = 0; - MPI_Group all_group; - ierr = MPI_Comm_group(mpi_communicator, &all_group); - AssertThrowMPI(ierr); - - // Construct the group containing all ranks we need: - MPI_Group inactive_with_root_group; - const int n = inactive_with_root_ranks.size(); - ierr = MPI_Group_incl(all_group, - n, inactive_with_root_ranks.data(), - &inactive_with_root_group); - AssertThrowMPI(ierr); - - // Create the communicator based on the group - // Note that, on most cores the communicator will be MPI_COMM_NULL - // FIXME: switch to MPI_Comm_create_group for MPI-3 so that only processes within the to-be subgroup call this - ierr = MPI_Comm_create(mpi_communicator, inactive_with_root_group, - &mpi_communicator_inactive_with_root); - AssertThrowMPI(ierr); - - MPI_Group_free(&all_group); - MPI_Group_free(&inactive_with_root_group); - - // Double check that the process with rank 0 in subgroup is active: -#ifdef DEBUG - if (mpi_communicator_inactive_with_root != MPI_COMM_NULL && - Utilities::MPI::this_mpi_process(mpi_communicator_inactive_with_root) == 0) - Assert (mpi_process_is_active, ExcInternalError()); -#endif -} - - - -ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, - const unsigned int n_rows_matrix, - const unsigned int n_columns_matrix, - const unsigned int row_block_size, - const unsigned int column_block_size) - : - ProcessGrid(mpi_comm, - compute_processor_grid_sizes(mpi_comm, n_rows_matrix, n_columns_matrix, - row_block_size, column_block_size) ) -{} - - - -ProcessGrid::ProcessGrid(MPI_Comm mpi_comm, - const unsigned int n_rows, - const unsigned int n_columns) - : - ProcessGrid(mpi_comm, - std::make_pair(n_rows,n_columns)) -{} - - - - -ProcessGrid::~ProcessGrid() -{ - if (mpi_process_is_active) - Cblacs_gridexit(blacs_context); - - MPI_Comm_free(&mpi_communicator_inactive_with_root); -} - - - -unsigned int ProcessGrid::get_process_grid_rows() const -{ - return n_process_rows; -} - - - -unsigned int ProcessGrid::get_process_grid_columns() const -{ - return n_process_columns; -} - - - -bool ProcessGrid::is_process_active() const -{ - return mpi_process_is_active; -} - - - -template -void ProcessGrid::send_to_inactive(NumberType *value, const int count) const -{ - Assert (count>0, ExcInternalError()); - if (mpi_communicator_inactive_with_root != MPI_COMM_NULL) - { - const int ierr = - MPI_Bcast(value,count, - Utilities::MPI::internal::mpi_type_id (value), - 0/*from root*/, - mpi_communicator_inactive_with_root); - AssertThrowMPI(ierr); - } -} - - - template ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type n_rows_, const size_type n_columns_, - const std::shared_ptr process_grid, + const std::shared_ptr process_grid, const size_type row_block_size_, const size_type column_block_size_, const LAPACKSupport::Property property) @@ -583,7 +87,7 @@ ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type n_rows_, template ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type size, - const std::shared_ptr process_grid, + const std::shared_ptr process_grid, const size_type block_size, const LAPACKSupport::Property property) : @@ -982,7 +486,6 @@ NumberType ScaLAPACKMatrix::norm(const char type) const // instantiations template class ScaLAPACKMatrix; -template void ProcessGrid::send_to_inactive(double *, const int) const; DEAL_II_NAMESPACE_CLOSE diff --git a/tests/quick_tests/scalapack.cc b/tests/quick_tests/scalapack.cc index 1f99a5339e..2e3876b040 100644 --- a/tests/quick_tests/scalapack.cc +++ b/tests/quick_tests/scalapack.cc @@ -44,7 +44,7 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_in(size), inverse(size), full_out(size), diff(size); - std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); ScaLAPACKMatrix scalapack_matrix (size, grid, block_size, LAPACKSupport::Property::symmetric); diff --git a/tests/scalapack/scalapack_01.cc b/tests/scalapack/scalapack_01.cc index 0bdc181018..f5a99eeec5 100644 --- a/tests/scalapack/scalapack_01.cc +++ b/tests/scalapack/scalapack_01.cc @@ -42,7 +42,7 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_in(size), full_out(size), diff(size); - std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); ScaLAPACKMatrix scalapack_matrix (size, grid, block_size); pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl; diff --git a/tests/scalapack/scalapack_02.cc b/tests/scalapack/scalapack_02.cc index 3132609225..311a412085 100644 --- a/tests/scalapack/scalapack_02.cc +++ b/tests/scalapack/scalapack_02.cc @@ -44,7 +44,7 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_in(size), inverse(size), full_out(size), diff(size); - std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); ScaLAPACKMatrix scalapack_matrix (size, grid, block_size, LAPACKSupport::Property::symmetric); diff --git a/tests/scalapack/scalapack_03.cc b/tests/scalapack/scalapack_03.cc index bedc4fd0d6..dd55f1f00d 100644 --- a/tests/scalapack/scalapack_03.cc +++ b/tests/scalapack/scalapack_03.cc @@ -52,7 +52,7 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_in(size), inverse(size), full_out(size), diff(size), prod1(size), prod2(size), one(size); - std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); ScaLAPACKMatrix scalapack_matrix (size, grid, block_size, LAPACKSupport::Property::symmetric); diff --git a/tests/scalapack/scalapack_04.cc b/tests/scalapack/scalapack_04.cc index aa6c545076..adcfe259f0 100644 --- a/tests/scalapack/scalapack_04.cc +++ b/tests/scalapack/scalapack_04.cc @@ -43,7 +43,7 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_A(size); - std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); ScaLAPACKMatrix scalapack_A (size, grid, block_size, LAPACKSupport::Property::symmetric); diff --git a/tests/scalapack/scalapack_05.cc b/tests/scalapack/scalapack_05.cc index c06fbaee29..97b059aef6 100644 --- a/tests/scalapack/scalapack_05.cc +++ b/tests/scalapack/scalapack_05.cc @@ -43,7 +43,7 @@ void test(const unsigned int size, const unsigned int block_size) // Create SPD matrices of requested size: FullMatrix full_A(size), inv_A(size); - std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); ScaLAPACKMatrix scalapack_A (size, grid, block_size, LAPACKSupport::Property::symmetric); diff --git a/tests/scalapack/scalapack_06.cc b/tests/scalapack/scalapack_06.cc index bc77d19237..78e7e09128 100644 --- a/tests/scalapack/scalapack_06.cc +++ b/tests/scalapack/scalapack_06.cc @@ -52,7 +52,7 @@ void test(const unsigned int size, const unsigned int block_size) FullMatrix full_A(size); std::vector lapack_A(size*size); - std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); ScaLAPACKMatrix scalapack_A (size, grid, block_size, LAPACKSupport::Property::symmetric); diff --git a/tests/scalapack/scalapack_07.cc b/tests/scalapack/scalapack_07.cc index 25e980b6a1..d639eff029 100644 --- a/tests/scalapack/scalapack_07.cc +++ b/tests/scalapack/scalapack_07.cc @@ -53,7 +53,7 @@ void test(const unsigned int size, const unsigned int block_size) FullMatrix full_A(size); std::vector lapack_A(size*size); - std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); + std::shared_ptr grid = std::make_shared(mpi_communicator,size,size,block_size,block_size); ScaLAPACKMatrix scalapack_A (size, grid, block_size); scalapack_A.set_property(LAPACKSupport::Property::symmetric); -- 2.39.5