--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_process_grid_h
+#define dealii_process_grid_h
+
+#include <deal.II/base/config.h>
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/exceptions.h>
+
+#ifdef DEAL_II_WITH_SCALAPACK
+
+DEAL_II_NAMESPACE_OPEN
+
+
+// Forward declaration of class ScaLAPACKMatrix for ProcessGrid
+template <typename NumberType> class ScaLAPACKMatrix;
+
+
+namespace Utilities
+{
+ namespace MPI
+ {
+ /**
+ * A class taking care of setting up a two-dimensional processor grid.
+ * For example an MPI communicator with 5 processes can be arranged into a
+ * 2x2 grid with the 5-th processor being inactive:
+ * @code
+ * | 0 | 1
+ * -----| ------- |-----
+ * 0 | P0 | P1
+ * | |
+ * -----| ------- |-----
+ * 1 | P2 | P3
+ * @endcode
+ *
+ * A shared pointer to this class is provided to ScaLAPACKMatrix matrices to
+ * perform block-cyclic distribution.
+ *
+ * Note that this class allows to setup a process grid which has fewer
+ * MPI cores than the total number of cores in the communicator.
+ *
+ * Currently the only place where one would use a ProcessGrid object is
+ * in connection with a ScaLAPACKMatrix object.
+ *
+ * @author Benjamin Brands, Denis Davydov, 2017
+ */
+ class ProcessGrid
+ {
+ public:
+
+ /**
+ * Declare class ScaLAPACK as friend to provide access to private members.
+ */
+ template <typename NumberType> friend class dealii::ScaLAPACKMatrix;
+
+ /**
+ * Constructor for a process grid with @p n_rows and @p n_columns for a given @p mpi_communicator.
+ * The product of rows and columns should be less or equal to the total number of cores
+ * in the @p mpi_communicator.
+ */
+ ProcessGrid(MPI_Comm mpi_communicator,
+ const unsigned int n_rows,
+ const unsigned int n_columns);
+
+ /**
+ * Constructor for a process grid for a given @p mpi_communicator.
+ * In this case the process grid is heuristically chosen based on the
+ * dimensions and block-cyclic distribution of a target matrix provided
+ * in @p n_rows_matrix, @p n_columns_matrix, @p row_block_size and @p column_block_size.
+ *
+ * The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$
+ * are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of
+ * processes in the @p mpi_communicator. This function then creates a 2D processor grid
+ * assuming the ratio between number of process row $p$ and columns $q$ to be
+ * equal the ratio between matrix dimensions $M$ and $N$.
+ *
+ * For example, a square matrix $640x640$ with the block size $32$
+ * and the @p mpi_communicator with 11 cores will result in the $3x3$
+ * process grid.
+ */
+ ProcessGrid(MPI_Comm mpi_communicator,
+ const unsigned int n_rows_matrix,
+ const unsigned int n_columns_matrix,
+ const unsigned int row_block_size,
+ const unsigned int column_block_size);
+
+ /**
+ * Destructor.
+ */
+ ~ProcessGrid();
+
+ /**
+ * Return the number of rows in the processes grid.
+ */
+ unsigned int get_process_grid_rows() const;
+
+ /**
+ * Return the number of columns in the processes grid.
+ */
+ unsigned int get_process_grid_columns() const;
+
+ /**
+ * Send @p count values stored consequently starting at @p value from
+ * the process with rank zero to processes which
+ * are not in the process grid.
+ */
+ template <typename NumberType>
+ void send_to_inactive(NumberType *value, const int count=1) const;
+
+ /**
+ * Return <code>true</code> if the process is active within the grid.
+ */
+ bool is_process_active() const;
+
+ private:
+
+ /**
+ * A private constructor which takes grid dimensions as an <code>std::pair</code>.
+ */
+ ProcessGrid(MPI_Comm mpi_communicator,
+ const std::pair<unsigned int,unsigned int> &grid_dimensions);
+
+ /**
+ * An MPI communicator with all processes (active and inactive).
+ */
+ MPI_Comm mpi_communicator;
+
+ /**
+ * An MPI communicator with inactive processes and the process with rank zero.
+ */
+ MPI_Comm mpi_communicator_inactive_with_root;
+
+ /**
+ * BLACS context. This is equivalent to MPI communicators and is
+ * used by ScaLAPACK.
+ */
+ int blacs_context;
+
+ /**
+ * Rank of this MPI process.
+ */
+ const unsigned int this_mpi_process;
+
+ /**
+ * Total number of MPI processes.
+ */
+ const unsigned int n_mpi_processes;
+
+ /**
+ * Number of rows in the processes grid.
+ */
+ int n_process_rows;
+
+ /**
+ * Number of columns in the processes grid.
+ */
+ int n_process_columns;
+
+ /**
+ * Row of this process in the grid.
+ *
+ * It's negative for in-active processes.
+ */
+ int this_process_row;
+
+ /**
+ * Column of this process in the grid.
+ *
+ * It's negative for in-active processes.
+ */
+ int this_process_column;
+
+ /**
+ * A flag which is true for processes within the 2D process grid.
+ */
+ bool mpi_process_is_active;
+ };
+
+ /*----------------------- Inline functions ----------------------------------*/
+
+#ifndef DOXYGEN
+
+ inline
+ unsigned int ProcessGrid::get_process_grid_rows() const
+ {
+ return n_process_rows;
+ }
+
+
+
+ inline
+ unsigned int ProcessGrid::get_process_grid_columns() const
+ {
+ return n_process_columns;
+ }
+
+
+
+ inline
+ bool ProcessGrid::is_process_active() const
+ {
+ return mpi_process_is_active;
+ }
+
+
+#endif // ifndef DOXYGEN
+
+ } // end of namespace MPI
+
+} // end of namespace Utilities
+
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_WITH_SCALAPACK
+
+#endif
#ifdef DEAL_II_WITH_SCALAPACK
#include <deal.II/base/exceptions.h>
+#include <deal.II/base/process_grid.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/lapack_support.h>
#include <deal.II/base/mpi.h>
DEAL_II_NAMESPACE_OPEN
-
-// Forward declaration of class ScaLAPACKMatrix for ProcessGrid
-template <typename NumberType> class ScaLAPACKMatrix;
-
-
-/**
- * A class taking care of setting up a two-dimensional processor grid.
- * For example an MPI communicator with 5 processes can be arranged into a
- * 2x2 grid with the 5-th processor being inactive:
- * @code
- * | 0 | 1
- * -----| ------- |-----
- * 0 | P0 | P1
- * | |
- * -----| ------- |-----
- * 1 | P2 | P3
- * @endcode
- *
- * A shared pointer to this class is provided to ScaLAPACKMatrix matrices to
- * perform block-cyclic distribution.
- *
- * Note that this class allows to setup a process grid which has fewer
- * MPI cores than the total number of cores in the communicator.
- *
- * Currently the only place where one would use a ProcessGrid object is
- * in connection with a ScaLAPACKMatrix object.
- *
- * @author Benjamin Brands, 2017
- */
-class ProcessGrid
-{
-public:
-
- /**
- * Declare class ScaLAPACK as friend to provide access to private members.
- */
- template <typename NumberType> friend class ScaLAPACKMatrix;
-
- /**
- * Constructor for a process grid with @p n_rows and @p n_columns for a given @p mpi_communicator.
- * The product of rows and columns should be less or equal to the total number of cores
- * in the @p mpi_communicator.
- */
- ProcessGrid(MPI_Comm mpi_communicator,
- const unsigned int n_rows,
- const unsigned int n_columns);
-
- /**
- * Constructor for a process grid for a given @p mpi_communicator.
- * In this case the process grid is heuristically chosen based on the
- * dimensions and block-cyclic distribution of a target matrix provided
- * in @p n_rows_matrix, @p n_columns_matrix, @p row_block_size and @p column_block_size.
- *
- * The maximum number of MPI cores one can utilize is $\min\{\frac{M}{MB}\frac{N}{NB}, Np\}$, where $M,N$
- * are the matrix dimension and $MB,NB$ are the block sizes and $Np$ is the number of
- * processes in the @p mpi_communicator. This function then creates a 2D processor grid
- * assuming the ratio between number of process row $p$ and columns $q$ to be
- * equal the ratio between matrix dimensions $M$ and $N$.
- *
- * For example, a square matrix $640x640$ with the block size $32$
- * and the @p mpi_communicator with 11 cores will result in the $3x3$
- * process grid.
- */
- ProcessGrid(MPI_Comm mpi_communicator,
- const unsigned int n_rows_matrix,
- const unsigned int n_columns_matrix,
- const unsigned int row_block_size,
- const unsigned int column_block_size);
-
- /**
- * Destructor.
- */
- ~ProcessGrid();
-
- /**
- * Return the number of rows in the processes grid.
- */
- unsigned int get_process_grid_rows() const;
-
- /**
- * Return the number of columns in the processes grid.
- */
- unsigned int get_process_grid_columns() const;
-
- /**
- * Send @p count values stored consequently starting at @p value from
- * the process with rank zero to processes which
- * are not in the process grid.
- */
- template <typename NumberType>
- void send_to_inactive(NumberType *value, const int count=1) const;
-
- /**
- * Return <code>true</code> if the process is active within the grid.
- */
- bool is_process_active() const;
-
-private:
-
- /**
- * A private constructor which takes grid dimensions as an <code>std::pair</code>.
- */
- ProcessGrid(MPI_Comm mpi_communicator,
- const std::pair<unsigned int,unsigned int> &grid_dimensions);
-
- /**
- * An MPI communicator with all processes (active and inactive).
- */
- MPI_Comm mpi_communicator;
-
- /**
- * An MPI communicator with inactive processes and the process with rank zero.
- */
- MPI_Comm mpi_communicator_inactive_with_root;
-
- /**
- * BLACS context. This is equivalent to MPI communicators and is
- * used by ScaLAPACK.
- */
- int blacs_context;
-
- /**
- * Rank of this MPI process.
- */
- const unsigned int this_mpi_process;
-
- /**
- * Total number of MPI processes.
- */
- const unsigned int n_mpi_processes;
-
- /**
- * Number of rows in the processes grid.
- */
- int n_process_rows;
-
- /**
- * Number of columns in the processes grid.
- */
- int n_process_columns;
-
- /**
- * Row of this process in the grid.
- *
- * It's negative for in-active processes.
- */
- int this_process_row;
-
- /**
- * Column of this process in the grid.
- *
- * It's negative for in-active processes.
- */
- int this_process_column;
-
- /**
- * A flag which is true for processes within the 2D process grid.
- */
- bool mpi_process_is_active;
-};
-
-
/**
* A wrapper class around ScaLAPACK parallel dense linear algebra.
*
*/
ScaLAPACKMatrix(const size_type n_rows,
const size_type n_columns,
- const std::shared_ptr<const ProcessGrid> process_grid,
+ const std::shared_ptr<const Utilities::MPI::ProcessGrid> process_grid,
const size_type row_block_size = 32,
const size_type column_block_size = 32,
const LAPACKSupport::Property property = LAPACKSupport::Property::general);
* using the process grid in @p process_grid.
*/
ScaLAPACKMatrix(const size_type size,
- const std::shared_ptr<const ProcessGrid> process_grid,
+ const std::shared_ptr<const Utilities::MPI::ProcessGrid> process_grid,
const size_type block_size = 32,
const LAPACKSupport::Property property = LAPACKSupport::Property::symmetric);
LAPACKSupport::Property property;
/**
- * A shared pointer to a ProcessGrid object which contains a BLACS context
+ * A shared pointer to a Utilities::MPI::ProcessGrid object which contains a BLACS context
* and a MPI communicator, as well as other necessary data structures.
*/
- std::shared_ptr<const ProcessGrid> grid;
+ std::shared_ptr<const Utilities::MPI::ProcessGrid> grid;
/**
* Number of rows in the matrix.
// ----------------------- inline functions ----------------------------
+#ifndef DOXYGEN
+
template <typename NumberType>
inline
NumberType
return (*this)(loc_row,loc_column);
}
+#endif // DOXYGEN
+
DEAL_II_NAMESPACE_CLOSE
#endif // DEAL_II_WITH_SCALAPACK
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#ifndef dealii_scalapack_templates_h
+#define dealii_scalapack_templates_h
+
+
+#include <deal.II/base/config.h>
+
+#ifdef DEAL_II_WITH_SCALAPACK
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/mpi.templates.h>
+
+// useful examples:
+// https://stackoverflow.com/questions/14147705/cholesky-decomposition-scalapack-error/14203864
+// http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=139 // second post by Julien Langou
+// https://andyspiros.wordpress.com/2011/07/08/an-example-of-blacs-with-c/
+// http://qboxcode.org/trac/browser/qb/tags/rel1_63_4/src/Matrix.C
+// https://gitlab.phys.ethz.ch/lwossnig/lecture/blob/a534f562dfb2ad5c564abe5c2356d5d956fb7218/examples/mpi/scalapack.cpp
+// https://github.com/elemental/Elemental/blob/master/src/core/imports/scalapack.cpp
+// https://scicomp.stackexchange.com/questions/7766/performance-optimization-or-tuning-possible-for-scalapack-gemm
+//
+// info:
+// http://www.netlib.org/scalapack/slug/index.html // User guide
+// http://www.netlib.org/scalapack/slug/node135.html // How to Measure Errors
+
+extern "C"
+{
+ /* Basic Linear Algebra Communication Subprograms (BLACS) declarations */
+ // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dinitb.htm#dinitb
+
+ /**
+ * Determine how many processes are available and the current process rank.
+ *
+ * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbpnf.htm
+ */
+ void Cblacs_pinfo(int *rank, int *nprocs);
+
+ /**
+ * Return internal BLACS value in @p val based on the input @p what and @p icontxt.
+ * The most common use is in retrieving a default system context (@p what = 0, @p icontxt is ignored)
+ * to be used in BLACS_GRIDINIT or BLACS_GRIDMAP.
+ *
+ * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbget.htm
+ */
+ void Cblacs_get(int icontxt, int what, int *val);
+
+ /**
+ * Map the processes sequentially in row-major or column-major order
+ * into the process grid. Input arguments must be the same on every process.
+ *
+ * On return, @p context is the integer handle to the BLACS context,
+ * whereas on entry it is a system context to be used in creating the
+ * BLACS context.
+ *
+ * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbint.htm
+ */
+ void Cblacs_gridinit(int *context, const char *order, int grid_height, int grid_width);
+
+ /**
+ * Return the process row and column index.
+ *
+ * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbinfo.htm
+ */
+ void Cblacs_gridinfo(int context, int *grid_height, int *grid_width, int *grid_row, int *grid_col);
+
+ /**
+ * Given the system process number, return the row and column coordinates in the BLACS' process grid.
+ */
+ void Cblacs_pcoord(int ictxt, int pnum, int *prow, int *pcol);
+
+ /**
+ * Release a BLACS context.
+ */
+ void Cblacs_gridexit(int context);
+
+ /**
+ * This routines holds up execution of all processes within the indicated
+ * scope until they have all called the routine.
+ */
+ void Cblacs_barrier(int, const char *);
+
+ /**
+ * Free all BLACS contexts and releases all allocated memory.
+ */
+ void Cblacs_exit(int error_code);
+
+ /**
+ * Receives a message from a process @prsrc, @p csrc into a general rectangular matrix.
+ *
+ * https://software.intel.com/en-us/mkl-developer-reference-c-gerv2d
+ */
+ void Cdgerv2d(int context, int M, int N, double *A, int lda, int rsrc, int csrc);
+
+ /**
+ * Sends the general rectangular matrix A to the destination
+ * process @p rdest @p cdest in the process grid.
+ *
+ * https://software.intel.com/en-us/mkl-developer-reference-c-2018-beta-gesd2d
+ */
+ void Cdgesd2d(int context , int M, int N, double *A, int lda, int rdest, int cdest);
+
+ /**
+ * Get BLACS context from MPI @p comm.
+ */
+ int Csys2blacs_handle(MPI_Comm comm);
+
+ /**
+ * Compute how many rows and columns each process owns (NUMber of Rows Or Columns).
+ *
+ * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dnumy.htm
+ */
+ int numroc_ (const int *n, const int *nb, const int *iproc, const int *isproc, const int *nprocs);
+
+ /**
+ * Compute the Cholesky factorization of an N-by-N real
+ * symmetric positive definite distributed matrix sub( A ) denoting
+ * A(IA:IA+N-1, JA:JA+N-1).
+ *
+ * http://www.netlib.org/scalapack/explore-html/d5/d9e/pdpotrf_8f_source.html
+ * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotrf.htm
+ */
+ void pdpotrf_(const char *UPLO,
+ const int *N,
+ double *A, const int *IA, const int *JA, const int *DESCA,
+ int *INFO);
+
+ /**
+ * Compute the inverse of a real symmetric positive definite
+ * distributed matrix sub( A ) = A(IA:IA+N-1,JA:JA+N-1) using the
+ * Cholesky factorization sub( A ) = U**T*U or L*L**T computed by
+ * PDPOTRF.
+ *
+ * http://www.netlib.org/scalapack/explore-html/d2/d44/pdpotri_8f_source.html
+ * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotri.htm
+ * https://software.intel.com/en-us/mkl-developer-reference-c-p-potri
+ */
+ void pdpotri_(const char *UPLO,
+ const int *N,
+ double *A, const int *IA, const int *JA, const int *DESCA,
+ int *INFO);
+
+ /**
+ * Estimate the reciprocal of the condition number (in the
+ * l1-norm) of a real symmetric positive definite distributed matrix
+ * using the Cholesky factorization.
+ *
+ * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpocon.htm#lpocon
+ * http://www.netlib.org/scalapack/explore-html/d4/df7/pdpocon_8f.html
+ * https://software.intel.com/en-us/mkl-developer-reference-fortran-pocon
+ */
+ void pdpocon_(const char *uplo,
+ const int *N,
+ const double *A, const int *IA, const int *JA, const int *DESCA,
+ const double *ANORM, double *RCOND,
+ double *WORK, const int *LWORK,
+ int *IWORK, const int *LIWORK,
+ int *INFO);
+
+ /**
+ * Norm of a real symmetric matrix
+ *
+ * http://www.netlib.org/scalapack/explore-html/dd/d12/pdlansy_8f_source.html
+ * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_pdlansy.htm#pdlansy
+ */
+ double pdlansy_(const char *norm,
+ const char *uplo,
+ const int *N,
+ const double *A, const int *IA, const int *JA, const int *DESCA,
+ double *work);
+
+ /**
+ * Compute the Least Common Multiple (LCM) of two positive integers @p M and @p N.
+ * In fact the routine Compute the greatest common divisor (GCD) and
+ * use the fact that M*N = GCD*LCM.
+ *
+ * http://www.netlib.org/scalapack/explore-html/d0/d9b/ilcm_8f_source.html
+ */
+ int ilcm_(const int *M, const int *N);
+
+ /**
+ * Return the ceiling of the division of two integers.
+ *
+ * http://www.netlib.org/scalapack/explore-html/df/d07/iceil_8f_source.html
+ */
+ int iceil_(const int *i1, const int *i2);
+
+ /**
+ * Initialize the descriptor vector with the 8 input arguments
+ */
+ void descinit_ (int *desc,
+ const int *m, const int *n, const int *mb, const int *nb,
+ const int *irsrc, const int *icsrc,
+ const int *ictxt, const int *lld, int *info);
+
+ /**
+ * Compute the global index of a distributed matrix entry
+ * pointed to by the local index @p indxloc of the process indicated by
+ * @p iproc.
+ *
+ * @param indxloc The local index of the distributed matrix entry.
+ * @param nb Block size, size of the blocks the distributed matrix is split into.
+ * @param iproc The coordinate of the process whose local array row or column is to be determined
+ * @param isrcproc The coordinate of the process that possesses the first row/column of the distributed matrix
+ * @param nprocs The total number processes over which the distributed matrix is distributed
+ */
+ int indxl2g_ (const int *indxloc, const int *nb, const int *iproc, const int *isrcproc, const int *nprocs);
+
+ /**
+ * Compute the solution to a real system of linear equations
+ */
+ void pdgesv_(const int *n, const int *nrhs,
+ double *A, const int *ia, const int *ja, const int *desca,
+ int *ipiv,
+ double *B, const int *ib, const int *jb, const int *descb,
+ int *info);
+
+ /**
+ * Perform one of the matrix-matrix operations:
+ * sub( C ) := alpha*op( sub( A ) )*op( sub( B ) ) + beta*sub( C ),
+ * where
+ * sub( C ) denotes C(IC:IC+M-1,JC:JC+N-1), and, op( X ) is one of
+ * op( X ) = X or op( X ) = X'.
+ */
+ void pdgemm_(const char *transa, const char *transb,
+ const int *m, const int *n, const int *k,
+ const double *alpha,
+ double *A, const int *IA, const int *JA, const int *DESCA,
+ double *B, const int *IB, const int *JB, const int *DESCB,
+ const double *beta,
+ double *C, const int *IC, const int *JC, const int *DESCC);
+
+ /**
+ * Return the value of the one norm, or the Frobenius norm, or the infinity norm,
+ * or the element of largest absolute value of a distributed matrix
+ */
+ double pdlange_(char const *norm,
+ int const &m, int const &n,
+ double *A, int const &ia, int const &ja, int *desca,
+ double *work);
+
+ /**
+ * Compute the process coordinate which possesses the entry of a
+ * distributed matrix specified by a global index
+ */
+ int indxg2p_(const int *glob, const int *nb, const int *iproc, const int *isproc, const int *nprocs);
+
+ /**
+ * Compute all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A
+ * by calling the recommended sequence of ScaLAPACK routines. In its present form, the routine assumes a homogeneous system
+ * and makes no checks for consistency of the eigenvalues or eigenvectors across the different processes.
+ * Because of this, it is possible that a heterogeneous system may return incorrect results without any error messages.
+ *
+ * http://www.netlib.org/scalapack/explore-html/d0/d1a/pdsyev_8f.html
+ * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lsyev.htm#lsyev
+ */
+ void pdsyev_(const char *jobz, const char *uplo,
+ const int *m, double *A, const int *ia, const int *ja, int *desca,
+ double *w,
+ double *z, const int *iz, const int *jz, int *descz,
+ double *work, const int *lwork, int *info);
+
+ /**
+ * Copy all or a part of a distributed matrix A to another
+ * distributed matrix B. No communication is performed, pdlacpy
+ * performs a local copy sub(A) := sub(B), where sub(A) denotes
+ * A(ia:ia+m-1,ja:ja+n-1) and sub(B) denotes B(ib:ib+m-1,jb:jb+n-1)
+ */
+ void pdlacpy_(const char *uplo,
+ const int *m, const int *n, double *A, const int *ia, const int *ja, int *desca,
+ double *B, const int *ib, const int *jb, int *descb);
+}
+
+#endif // DEAL_II_WITH_SCALAPACK
+
+#endif // dealii_scalapack_templates_h
polynomials_piecewise.cc
polynomials_rannacher_turek.cc
polynomials_raviart_thomas.cc
+ process_grid.cc
quadrature.cc
quadrature_lib.cc
quadrature_selector.cc
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include <deal.II/base/process_grid.h>
+
+#ifdef DEAL_II_WITH_SCALAPACK
+
+#include <deal.II/lac/scalapack.templates.h>
+
+DEAL_II_NAMESPACE_OPEN
+
+namespace
+{
+ /**
+ * Internal function to determine dimension of process grid based on the total
+ * number of cores, matrix dimensions and matrix block sizes
+ *
+ * Amesos heuristics:
+ * https://github.com/trilinos/Trilinos/blob/master/packages/amesos/src/Amesos_Scalapack.cpp#L142-L166
+ *
+ * Elemental default grid: El::Grid::Grid(mpi_comm,...)
+ * https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91
+ */
+ inline
+ std::pair<int,int> compute_processor_grid_sizes(MPI_Comm mpi_comm, const unsigned int m, const unsigned int n,
+ const unsigned int block_size_m, const unsigned int block_size_n)
+ {
+ // Few notes from the ScaLAPACK user guide:
+ // It is possible to predict the best grid shape given the number of processes available:
+ // Pr x Pc <= P
+ // This, however, depends on the task to be done.
+ // LU , QR and QL factorizations perform better for “flat” process grids (Pr < Pc )
+ // For large N, Pc = 2*Pr is a good choice, whereas for small N, one should choose small Pr
+ // Square or near square grids are more optimal for Cholesky factorization.
+ // LQ and RQ factorizations take advantage of “tall” grids (Pr > Pc )
+
+ // Below we always try to create 2D processor grids:
+
+ int n_processes;
+ MPI_Comm_size(mpi_comm, &n_processes);
+
+ // Get the total number of cores we can occupy in a rectangular dense matrix
+ // with rectangular blocks when every core owns only a single block:
+ const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))*
+ int(std::ceil((1.*n)/block_size_n));
+ const int Np = std::min(n_processes_heuristic, n_processes);
+
+ // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio
+ // Pc =: ratio * Pr
+ // therefore
+ // Np = Pc * Pc / ratio
+ // for quadratic matrices the ratio equals 1
+ const double ratio = double(n)/m;
+ int Pc = std::floor(std::sqrt(ratio * Np));
+
+ // one could rounds up Pc to the number which has zero remainder from the division of Np
+ // while ( Np % Pc != 0 )
+ // ++Pc;
+ // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes 2x5.
+
+ // limit our estimate to be in [2, Np]
+ int n_process_columns = std::min (Np, std::max(2, Pc));
+ // finally, get the rows:
+ int n_process_rows = Np / n_process_columns ;
+
+ Assert (n_process_columns >=1 && n_process_rows >=1 && n_processes >= n_process_rows*n_process_columns,
+ ExcMessage("error in process grid: "+
+ std::to_string(n_process_rows)+"x"+
+ std::to_string(n_process_columns)+
+ "="+
+ std::to_string(n_process_rows*n_process_columns)+
+ " out of " +
+ std::to_string(n_processes)));
+
+ return std::make_pair(n_process_rows,n_process_columns);
+
+ // For example,
+ // 320x320 with 32x32 blocks and 16 cores:
+ // Pc = 1.0 * Pr => 4x4 grid
+ // Pc = 0.5 * Pr => 8x2 grid
+ // Pc = 2.0 * Pr => 3x5 grid
+ }
+}
+
+namespace Utilities
+{
+ namespace MPI
+ {
+
+ ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
+ const std::pair<unsigned int,unsigned int> &grid_dimensions)
+ :
+ mpi_communicator(mpi_comm),
+ this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)),
+ n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)),
+ n_process_rows(grid_dimensions.first),
+ n_process_columns(grid_dimensions.second)
+ {
+ Assert (grid_dimensions.first > 0,
+ ExcMessage("Number of process grid rows has to be positive."));
+ Assert (grid_dimensions.second > 0,
+ ExcMessage("Number of process grid columns has to be positive."));
+
+ Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes,
+ ExcMessage("Size of process grid is larger than number of available MPI processes."));
+
+ // processor grid order.
+ const bool column_major = false;
+
+ // Initialize Cblas context from the provided communicator
+ blacs_context = Csys2blacs_handle(mpi_communicator);
+ const char *order = ( column_major ? "Col" : "Row" );
+ // FIXME: blacs_context can be modified below. Thus Cblacs2sys_handle
+ // may not return the same MPI communicator
+ Cblacs_gridinit(&blacs_context, order, n_process_rows, n_process_columns);
+
+ // Blacs may modify the grid size on processes which are not used
+ // in the grid. So provide copies below:
+ int procrows_ = n_process_rows;
+ int proccols_ = n_process_columns;
+ Cblacs_gridinfo( blacs_context, &procrows_, &proccols_, &this_process_row, &this_process_column );
+
+ // If this MPI core is not on the grid, flag it as inactive and
+ // skip all jobs
+ // FIXME: different condition is used here
+ // https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used
+ if (this_process_row < 0 || this_process_column < 0)
+ mpi_process_is_active = false;
+ else
+ mpi_process_is_active = true;
+
+ // Create an auxiliary communicator which has root and all inactive cores
+ // Assume that inactive cores start with id=n_process_rows*n_process_columns
+ Assert (mpi_process_is_active || this_mpi_process >= n_process_rows*n_process_columns,
+ ExcInternalError());
+
+ std::vector<int> inactive_with_root_ranks;
+ inactive_with_root_ranks.push_back(0);
+ for (int i = n_process_rows*n_process_columns; i < n_mpi_processes; ++i)
+ inactive_with_root_ranks.push_back(i);
+
+ // Get the group of processes in mpi_communicator
+ int ierr = 0;
+ MPI_Group all_group;
+ ierr = MPI_Comm_group(mpi_communicator, &all_group);
+ AssertThrowMPI(ierr);
+
+ // Construct the group containing all ranks we need:
+ MPI_Group inactive_with_root_group;
+ const int n = inactive_with_root_ranks.size();
+ ierr = MPI_Group_incl(all_group,
+ n, inactive_with_root_ranks.data(),
+ &inactive_with_root_group);
+ AssertThrowMPI(ierr);
+
+ // Create the communicator based on the group
+ // Note that, on most cores the communicator will be MPI_COMM_NULL
+ // FIXME: switch to MPI_Comm_create_group for MPI-3 so that only processes within the to-be subgroup call this
+ ierr = MPI_Comm_create(mpi_communicator, inactive_with_root_group,
+ &mpi_communicator_inactive_with_root);
+ AssertThrowMPI(ierr);
+
+ MPI_Group_free(&all_group);
+ MPI_Group_free(&inactive_with_root_group);
+
+ // Double check that the process with rank 0 in subgroup is active:
+#ifdef DEBUG
+ if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
+ Utilities::MPI::this_mpi_process(mpi_communicator_inactive_with_root) == 0)
+ Assert (mpi_process_is_active, ExcInternalError());
+#endif
+ }
+
+
+
+ ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
+ const unsigned int n_rows_matrix,
+ const unsigned int n_columns_matrix,
+ const unsigned int row_block_size,
+ const unsigned int column_block_size)
+ :
+ ProcessGrid(mpi_comm,
+ compute_processor_grid_sizes(mpi_comm, n_rows_matrix, n_columns_matrix,
+ row_block_size, column_block_size) )
+ {}
+
+
+
+ ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
+ const unsigned int n_rows,
+ const unsigned int n_columns)
+ :
+ ProcessGrid(mpi_comm,
+ std::make_pair(n_rows,n_columns))
+ {}
+
+
+
+
+ ProcessGrid::~ProcessGrid()
+ {
+ if (mpi_process_is_active)
+ Cblacs_gridexit(blacs_context);
+
+ MPI_Comm_free(&mpi_communicator_inactive_with_root);
+ }
+
+
+
+ template <typename NumberType>
+ void ProcessGrid::send_to_inactive(NumberType *value, const int count) const
+ {
+ Assert (count>0, ExcInternalError());
+ if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
+ {
+ const int ierr =
+ MPI_Bcast(value,count,
+ Utilities::MPI::internal::mpi_type_id (value),
+ 0/*from root*/,
+ mpi_communicator_inactive_with_root);
+ AssertThrowMPI(ierr);
+ }
+ }
+
+ }
+}
+
+// instantiations
+
+template void Utilities::MPI::ProcessGrid::send_to_inactive<double>(double *, const int) const;
+
+DEAL_II_NAMESPACE_CLOSE
+
+#endif // DEAL_II_WITH_SCALAPACK
#include <deal.II/base/mpi.h>
#include <deal.II/base/mpi.templates.h>
-
-#include <deal.II/base/conditional_ostream.h>
-
-// useful examples:
-// https://stackoverflow.com/questions/14147705/cholesky-decomposition-scalapack-error/14203864
-// http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=139 // second post by Julien Langou
-// https://andyspiros.wordpress.com/2011/07/08/an-example-of-blacs-with-c/
-// http://qboxcode.org/trac/browser/qb/tags/rel1_63_4/src/Matrix.C
-// https://gitlab.phys.ethz.ch/lwossnig/lecture/blob/a534f562dfb2ad5c564abe5c2356d5d956fb7218/examples/mpi/scalapack.cpp
-// https://github.com/elemental/Elemental/blob/master/src/core/imports/scalapack.cpp
-// https://scicomp.stackexchange.com/questions/7766/performance-optimization-or-tuning-possible-for-scalapack-gemm
-//
-// info:
-// http://www.netlib.org/scalapack/slug/index.html // User guide
-// http://www.netlib.org/scalapack/slug/node135.html // How to Measure Errors
-
-extern "C"
-{
- /* Basic Linear Algebra Communication Subprograms (BLACS) declarations */
- // https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dinitb.htm#dinitb
-
- /**
- * Determine how many processes are available and the current process rank.
- *
- * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbpnf.htm
- */
- void Cblacs_pinfo(int *rank, int *nprocs);
-
- /**
- * Return internal BLACS value in @p val based on the input @p what and @p icontxt.
- * The most common use is in retrieving a default system context (@p what = 0, @p icontxt is ignored)
- * to be used in BLACS_GRIDINIT or BLACS_GRIDMAP.
- *
- * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbget.htm
- */
- void Cblacs_get(int icontxt, int what, int *val);
-
- /**
- * Map the processes sequentially in row-major or column-major order
- * into the process grid. Input arguments must be the same on every process.
- *
- * On return, @p context is the integer handle to the BLACS context,
- * whereas on entry it is a system context to be used in creating the
- * BLACS context.
- *
- * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbint.htm
- */
- void Cblacs_gridinit(int *context, const char *order, int grid_height, int grid_width);
-
- /**
- * Return the process row and column index.
- *
- * https://www.ibm.com/support/knowledgecenter/en/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dbinfo.htm
- */
- void Cblacs_gridinfo(int context, int *grid_height, int *grid_width, int *grid_row, int *grid_col);
-
- /**
- * Given the system process number, return the row and column coordinates in the BLACS' process grid.
- */
- void Cblacs_pcoord(int ictxt, int pnum, int *prow, int *pcol);
-
- /**
- * Release a BLACS context.
- */
- void Cblacs_gridexit(int context);
-
- /**
- * This routines holds up execution of all processes within the indicated
- * scope until they have all called the routine.
- */
- void Cblacs_barrier(int, const char *);
-
- /**
- * Free all BLACS contexts and releases all allocated memory.
- */
- void Cblacs_exit(int error_code);
-
- /**
- * Receives a message from a process @prsrc, @p csrc into a general rectangular matrix.
- *
- * https://software.intel.com/en-us/mkl-developer-reference-c-gerv2d
- */
- void Cdgerv2d(int context, int M, int N, double *A, int lda, int rsrc, int csrc);
-
- /**
- * Sends the general rectangular matrix A to the destination
- * process @p rdest @p cdest in the process grid.
- *
- * https://software.intel.com/en-us/mkl-developer-reference-c-2018-beta-gesd2d
- */
- void Cdgesd2d(int context , int M, int N, double *A, int lda, int rdest, int cdest);
-
- /**
- * Get BLACS context from MPI @p comm.
- */
- int Csys2blacs_handle(MPI_Comm comm);
-
- /**
- * Compute how many rows and columns each process owns (NUMber of Rows Or Columns).
- *
- * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_dnumy.htm
- */
- int numroc_ (const int *n, const int *nb, const int *iproc, const int *isproc, const int *nprocs);
-
- /**
- * Compute the Cholesky factorization of an N-by-N real
- * symmetric positive definite distributed matrix sub( A ) denoting
- * A(IA:IA+N-1, JA:JA+N-1).
- *
- * http://www.netlib.org/scalapack/explore-html/d5/d9e/pdpotrf_8f_source.html
- * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotrf.htm
- */
- void pdpotrf_(const char *UPLO,
- const int *N,
- double *A, const int *IA, const int *JA, const int *DESCA,
- int *INFO);
-
- /**
- * Compute the inverse of a real symmetric positive definite
- * distributed matrix sub( A ) = A(IA:IA+N-1,JA:JA+N-1) using the
- * Cholesky factorization sub( A ) = U**T*U or L*L**T computed by
- * PDPOTRF.
- *
- * http://www.netlib.org/scalapack/explore-html/d2/d44/pdpotri_8f_source.html
- * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpotri.htm
- * https://software.intel.com/en-us/mkl-developer-reference-c-p-potri
- */
- void pdpotri_(const char *UPLO,
- const int *N,
- double *A, const int *IA, const int *JA, const int *DESCA,
- int *INFO);
-
- /**
- * Estimate the reciprocal of the condition number (in the
- * l1-norm) of a real symmetric positive definite distributed matrix
- * using the Cholesky factorization.
- *
- * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lpocon.htm#lpocon
- * http://www.netlib.org/scalapack/explore-html/d4/df7/pdpocon_8f.html
- * https://software.intel.com/en-us/mkl-developer-reference-fortran-pocon
- */
- void pdpocon_(const char *uplo,
- const int *N,
- const double *A, const int *IA, const int *JA, const int *DESCA,
- const double *ANORM, double *RCOND,
- double *WORK, const int *LWORK,
- int *IWORK, const int *LIWORK,
- int *INFO);
-
- /**
- * Norm of a real symmetric matrix
- *
- * http://www.netlib.org/scalapack/explore-html/dd/d12/pdlansy_8f_source.html
- * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_pdlansy.htm#pdlansy
- */
- double pdlansy_(const char *norm,
- const char *uplo,
- const int *N,
- const double *A, const int *IA, const int *JA, const int *DESCA,
- double *work);
-
- /**
- * Compute the Least Common Multiple (LCM) of two positive integers @p M and @p N.
- * In fact the routine Compute the greatest common divisor (GCD) and
- * use the fact that M*N = GCD*LCM.
- *
- * http://www.netlib.org/scalapack/explore-html/d0/d9b/ilcm_8f_source.html
- */
- int ilcm_(const int *M, const int *N);
-
- /**
- * Return the ceiling of the division of two integers.
- *
- * http://www.netlib.org/scalapack/explore-html/df/d07/iceil_8f_source.html
- */
- int iceil_(const int *i1, const int *i2);
-
- /**
- * Initialize the descriptor vector with the 8 input arguments
- */
- void descinit_ (int *desc,
- const int *m, const int *n, const int *mb, const int *nb,
- const int *irsrc, const int *icsrc,
- const int *ictxt, const int *lld, int *info);
-
- /**
- * Compute the global index of a distributed matrix entry
- * pointed to by the local index @p indxloc of the process indicated by
- * @p iproc.
- *
- * @param indxloc The local index of the distributed matrix entry.
- * @param nb Block size, size of the blocks the distributed matrix is split into.
- * @param iproc The coordinate of the process whose local array row or column is to be determined
- * @param isrcproc The coordinate of the process that possesses the first row/column of the distributed matrix
- * @param nprocs The total number processes over which the distributed matrix is distributed
- */
- int indxl2g_ (const int *indxloc, const int *nb, const int *iproc, const int *isrcproc, const int *nprocs);
-
- /**
- * Compute the solution to a real system of linear equations
- */
- void pdgesv_(const int *n, const int *nrhs,
- double *A, const int *ia, const int *ja, const int *desca,
- int *ipiv,
- double *B, const int *ib, const int *jb, const int *descb,
- int *info);
-
- /**
- * Perform one of the matrix-matrix operations:
- * sub( C ) := alpha*op( sub( A ) )*op( sub( B ) ) + beta*sub( C ),
- * where
- * sub( C ) denotes C(IC:IC+M-1,JC:JC+N-1), and, op( X ) is one of
- * op( X ) = X or op( X ) = X'.
- */
- void pdgemm_(const char *transa, const char *transb,
- const int *m, const int *n, const int *k,
- const double *alpha,
- double *A, const int *IA, const int *JA, const int *DESCA,
- double *B, const int *IB, const int *JB, const int *DESCB,
- const double *beta,
- double *C, const int *IC, const int *JC, const int *DESCC);
-
- /**
- * Return the value of the one norm, or the Frobenius norm, or the infinity norm,
- * or the element of largest absolute value of a distributed matrix
- */
- double pdlange_(char const *norm,
- int const &m, int const &n,
- double *A, int const &ia, int const &ja, int *desca,
- double *work);
-
- /**
- * Compute the process coordinate which possesses the entry of a
- * distributed matrix specified by a global index
- */
- int indxg2p_(const int *glob, const int *nb, const int *iproc, const int *isproc, const int *nprocs);
-
- /**
- * Compute all eigenvalues and, optionally, eigenvectors of a real symmetric matrix A
- * by calling the recommended sequence of ScaLAPACK routines. In its present form, the routine assumes a homogeneous system
- * and makes no checks for consistency of the eigenvalues or eigenvectors across the different processes.
- * Because of this, it is possible that a heterogeneous system may return incorrect results without any error messages.
- *
- * http://www.netlib.org/scalapack/explore-html/d0/d1a/pdsyev_8f.html
- * https://www.ibm.com/support/knowledgecenter/SSNR5K_4.2.0/com.ibm.cluster.pessl.v4r2.pssl100.doc/am6gr_lsyev.htm#lsyev
- */
- void pdsyev_(const char *jobz, const char *uplo,
- const int *m, double *A, const int *ia, const int *ja, int *desca,
- double *w,
- double *z, const int *iz, const int *jz, int *descz,
- double *work, const int *lwork, int *info);
-
- /**
- * Copy all or a part of a distributed matrix A to another
- * distributed matrix B. No communication is performed, pdlacpy
- * performs a local copy sub(A) := sub(B), where sub(A) denotes
- * A(ia:ia+m-1,ja:ja+n-1) and sub(B) denotes B(ib:ib+m-1,jb:jb+n-1)
- */
- void pdlacpy_(const char *uplo,
- const int *m, const int *n, double *A, const int *ia, const int *ja, int *desca,
- double *B, const int *ib, const int *jb, int *descb);
-}
-
-
+#include <deal.II/lac/scalapack.templates.h>
DEAL_II_NAMESPACE_OPEN
-namespace
-{
- /**
- * Internal function to determine dimension of process grid based on the total
- * number of cores, matrix dimensions and matrix block sizes
- *
- * Amesos heuristics:
- * https://github.com/trilinos/Trilinos/blob/master/packages/amesos/src/Amesos_Scalapack.cpp#L142-L166
- *
- * Elemental default grid: El::Grid::Grid(mpi_comm,...)
- * https://github.com/elemental/Elemental/blob/master/src/core/Grid.cpp#L67-L91
- */
- inline
- std::pair<int,int> compute_processor_grid_sizes(MPI_Comm mpi_comm, const unsigned int m, const unsigned int n,
- const unsigned int block_size_m, const unsigned int block_size_n)
- {
- // Few notes from the ScaLAPACK user guide:
- // It is possible to predict the best grid shape given the number of processes available:
- // Pr x Pc <= P
- // This, however, depends on the task to be done.
- // LU , QR and QL factorizations perform better for “flat” process grids (Pr < Pc )
- // For large N, Pc = 2*Pr is a good choice, whereas for small N, one should choose small Pr
- // Square or near square grids are more optimal for Cholesky factorization.
- // LQ and RQ factorizations take advantage of “tall” grids (Pr > Pc )
-
- // Below we always try to create 2D processor grids:
-
- int n_processes;
- MPI_Comm_size(mpi_comm, &n_processes);
-
- // Get the total number of cores we can occupy in a rectangular dense matrix
- // with rectangular blocks when every core owns only a single block:
- const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))*
- int(std::ceil((1.*n)/block_size_n));
- const int Np = std::min(n_processes_heuristic, n_processes);
-
- // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio
- // Pc =: ratio * Pr
- // therefore
- // Np = Pc * Pc / ratio
- // for quadratic matrices the ratio equals 1
- const double ratio = double(n)/m;
- int Pc = std::floor(std::sqrt(ratio * Np));
-
- // one could rounds up Pc to the number which has zero remainder from the division of Np
- // while ( Np % Pc != 0 )
- // ++Pc;
- // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes 2x5.
-
- // limit our estimate to be in [2, Np]
- int n_process_columns = std::min (Np, std::max(2, Pc));
- // finally, get the rows:
- int n_process_rows = Np / n_process_columns ;
-
- Assert (n_process_columns >=1 && n_process_rows >=1 && n_processes >= n_process_rows*n_process_columns,
- ExcMessage("error in process grid: "+
- std::to_string(n_process_rows)+"x"+
- std::to_string(n_process_columns)+
- "="+
- std::to_string(n_process_rows*n_process_columns)+
- " out of " +
- std::to_string(n_processes)));
-
- return std::make_pair(n_process_rows,n_process_columns);
-
- // For example,
- // 320x320 with 32x32 blocks and 16 cores:
- // Pc = 1.0 * Pr => 4x4 grid
- // Pc = 0.5 * Pr => 8x2 grid
- // Pc = 2.0 * Pr => 3x5 grid
- }
-}
-
-
-
-ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
- const std::pair<unsigned int,unsigned int> &grid_dimensions)
- :
- mpi_communicator(mpi_comm),
- this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)),
- n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)),
- n_process_rows(grid_dimensions.first),
- n_process_columns(grid_dimensions.second)
-{
- Assert (grid_dimensions.first > 0,
- ExcMessage("Number of process grid rows has to be positive."));
- Assert (grid_dimensions.second > 0,
- ExcMessage("Number of process grid columns has to be positive."));
-
- Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes,
- ExcMessage("Size of process grid is larger than number of available MPI processes."));
-
- // processor grid order.
- const bool column_major = false;
-
- // Initialize Cblas context from the provided communicator
- blacs_context = Csys2blacs_handle(mpi_communicator);
- const char *order = ( column_major ? "Col" : "Row" );
- // FIXME: blacs_context can be modified below. Thus Cblacs2sys_handle
- // may not return the same MPI communicator
- Cblacs_gridinit(&blacs_context, order, n_process_rows, n_process_columns);
-
- // Blacs may modify the grid size on processes which are not used
- // in the grid. So provide copies below:
- int procrows_ = n_process_rows;
- int proccols_ = n_process_columns;
- Cblacs_gridinfo( blacs_context, &procrows_, &proccols_, &this_process_row, &this_process_column );
-
- // If this MPI core is not on the grid, flag it as inactive and
- // skip all jobs
- // FIXME: different condition is used here
- // https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used
- if (this_process_row < 0 || this_process_column < 0)
- mpi_process_is_active = false;
- else
- mpi_process_is_active = true;
-
- // Create an auxiliary communicator which has root and all inactive cores
- // Assume that inactive cores start with id=n_process_rows*n_process_columns
- Assert (mpi_process_is_active || this_mpi_process >= n_process_rows*n_process_columns,
- ExcInternalError());
-
- std::vector<int> inactive_with_root_ranks;
- inactive_with_root_ranks.push_back(0);
- for (int i = n_process_rows*n_process_columns; i < n_mpi_processes; ++i)
- inactive_with_root_ranks.push_back(i);
-
- // Get the group of processes in mpi_communicator
- int ierr = 0;
- MPI_Group all_group;
- ierr = MPI_Comm_group(mpi_communicator, &all_group);
- AssertThrowMPI(ierr);
-
- // Construct the group containing all ranks we need:
- MPI_Group inactive_with_root_group;
- const int n = inactive_with_root_ranks.size();
- ierr = MPI_Group_incl(all_group,
- n, inactive_with_root_ranks.data(),
- &inactive_with_root_group);
- AssertThrowMPI(ierr);
-
- // Create the communicator based on the group
- // Note that, on most cores the communicator will be MPI_COMM_NULL
- // FIXME: switch to MPI_Comm_create_group for MPI-3 so that only processes within the to-be subgroup call this
- ierr = MPI_Comm_create(mpi_communicator, inactive_with_root_group,
- &mpi_communicator_inactive_with_root);
- AssertThrowMPI(ierr);
-
- MPI_Group_free(&all_group);
- MPI_Group_free(&inactive_with_root_group);
-
- // Double check that the process with rank 0 in subgroup is active:
-#ifdef DEBUG
- if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
- Utilities::MPI::this_mpi_process(mpi_communicator_inactive_with_root) == 0)
- Assert (mpi_process_is_active, ExcInternalError());
-#endif
-}
-
-
-
-ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
- const unsigned int n_rows_matrix,
- const unsigned int n_columns_matrix,
- const unsigned int row_block_size,
- const unsigned int column_block_size)
- :
- ProcessGrid(mpi_comm,
- compute_processor_grid_sizes(mpi_comm, n_rows_matrix, n_columns_matrix,
- row_block_size, column_block_size) )
-{}
-
-
-
-ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
- const unsigned int n_rows,
- const unsigned int n_columns)
- :
- ProcessGrid(mpi_comm,
- std::make_pair(n_rows,n_columns))
-{}
-
-
-
-
-ProcessGrid::~ProcessGrid()
-{
- if (mpi_process_is_active)
- Cblacs_gridexit(blacs_context);
-
- MPI_Comm_free(&mpi_communicator_inactive_with_root);
-}
-
-
-
-unsigned int ProcessGrid::get_process_grid_rows() const
-{
- return n_process_rows;
-}
-
-
-
-unsigned int ProcessGrid::get_process_grid_columns() const
-{
- return n_process_columns;
-}
-
-
-
-bool ProcessGrid::is_process_active() const
-{
- return mpi_process_is_active;
-}
-
-
-
-template <typename NumberType>
-void ProcessGrid::send_to_inactive(NumberType *value, const int count) const
-{
- Assert (count>0, ExcInternalError());
- if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
- {
- const int ierr =
- MPI_Bcast(value,count,
- Utilities::MPI::internal::mpi_type_id (value),
- 0/*from root*/,
- mpi_communicator_inactive_with_root);
- AssertThrowMPI(ierr);
- }
-}
-
-
-
template <typename NumberType>
ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type n_rows_,
const size_type n_columns_,
- const std::shared_ptr<const ProcessGrid> process_grid,
+ const std::shared_ptr<const Utilities::MPI::ProcessGrid> process_grid,
const size_type row_block_size_,
const size_type column_block_size_,
const LAPACKSupport::Property property)
template <typename NumberType>
ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type size,
- const std::shared_ptr<const ProcessGrid> process_grid,
+ const std::shared_ptr<const Utilities::MPI::ProcessGrid> process_grid,
const size_type block_size,
const LAPACKSupport::Property property)
:
// instantiations
template class ScaLAPACKMatrix<double>;
-template void ProcessGrid::send_to_inactive<double>(double *, const int) const;
DEAL_II_NAMESPACE_CLOSE
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_in(size), inverse(size), full_out(size), diff(size);
- std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
ScaLAPACKMatrix<NumberType> scalapack_matrix (size, grid, block_size,
LAPACKSupport::Property::symmetric);
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_in(size), full_out(size), diff(size);
- std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
ScaLAPACKMatrix<NumberType> scalapack_matrix (size, grid, block_size);
pcout << size << " " << block_size << " " << grid->get_process_grid_rows() << " " << grid->get_process_grid_columns() << std::endl;
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_in(size), inverse(size), full_out(size), diff(size);
- std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
ScaLAPACKMatrix<NumberType> scalapack_matrix (size, grid, block_size,
LAPACKSupport::Property::symmetric);
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_in(size), inverse(size), full_out(size), diff(size), prod1(size), prod2(size), one(size);
- std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
ScaLAPACKMatrix<NumberType> scalapack_matrix (size, grid, block_size,
LAPACKSupport::Property::symmetric);
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_A(size);
- std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
ScaLAPACKMatrix<NumberType> scalapack_A (size, grid, block_size,
LAPACKSupport::Property::symmetric);
// Create SPD matrices of requested size:
FullMatrix<NumberType> full_A(size), inv_A(size);
- std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
ScaLAPACKMatrix<NumberType> scalapack_A (size, grid, block_size,
LAPACKSupport::Property::symmetric);
FullMatrix<NumberType> full_A(size);
std::vector<NumberType> lapack_A(size*size);
- std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
ScaLAPACKMatrix<NumberType> scalapack_A (size, grid, block_size,
LAPACKSupport::Property::symmetric);
FullMatrix<NumberType> full_A(size);
std::vector<NumberType> lapack_A(size*size);
- std::shared_ptr<ProcessGrid> grid = std::make_shared<ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size,block_size);
ScaLAPACKMatrix<NumberType> scalapack_A (size, grid, block_size);
scalapack_A.set_property(LAPACKSupport::Property::symmetric);