--- /dev/null
+New: Add save/load functions for ScaLAPACKMatrix to save/load distributed matrix to/from disc using HDF5. If HDF is configured with MPI, parallel I/O is used to save/load the matrix.
+<br>
+(Benjamin Brands, 2018/01/25)
+
* .
*
* If it is necessary to copy complete matrices with an identical block-cyclic distribution,
- * use copy_to(ScaLAPACKMatrix<NumberType> &dest) with only one argument to avoid communication
+ * use copy_to(ScaLAPACKMatrix<NumberType> &dest) with only one argument to avoid communication.
+ *
+ * The underlying process grids of the matrices @p A and @p B must have been built
+ * with the same MPI communicator.
*/
void copy_to(ScaLAPACKMatrix<NumberType> &B,
const std::pair<unsigned int,unsigned int> &offset_A,
const std::pair<unsigned int,unsigned int> &submatrix_size) const;
/**
- * Stores the distributed matrix in @p filename using HDF5
+ * Stores the distributed matrix in @p filename using HDF5.
*
* If HDF5 was build with MPI, parallel I/O is used to save the matrix.
- * Otherwise, just one process will do the output.
+ * Otherwise, just one process will do the output. This means that
+ * internally the distributed matrix is copied to one process, which
+ * does the output. Therefore, the matrix has to fit into the memory
+ * of one process.
*/
void save(const char *filename) const;
/**
* Loads the distributed matrix from file @p filename using HDF5.
*
- * The matrix must have the same dimensions as the matrix in stored in the file.
+ * The matrix must have the same dimensions as the matrix stored in the file.
*
* If HDF5 was build with MPI, parallel I/O is used to load the matrix.
* Otherwise, just one process will load the matrix from storage
#include <deal.II/base/mpi.h>
#include <deal.II/base/mpi.templates.h>
-# ifdef DEAL_II_WITH_HDF5
-#include <hdf5.h>
-# endif
-
// useful examples:
// https://stackoverflow.com/questions/14147705/cholesky-decomposition-scalapack-error/14203864
// http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=139 // second post by Julien Langou
psgels_(trans,m,n,nrhs,A,ia,ja,desca,B,ib,jb,descb,work,lwork,info);
}
-
-# ifdef DEAL_II_WITH_HDF5
-
-template<typename number>
-inline hid_t hdf5_type_id (const number *)
-{
- Assert (false, dealii::ExcNotImplemented());
- //don't know what to put here; it does not matter
- return -1;
-}
-
-inline hid_t hdf5_type_id (const double *)
-{
- return H5T_NATIVE_DOUBLE;
-}
-
-inline hid_t hdf5_type_id (const float *)
-{
- return H5T_NATIVE_FLOAT;
-}
-
-inline hid_t hdf5_type_id (const int *)
-{
- return H5T_NATIVE_INT;
-}
-
-inline hid_t hdf5_type_id (const unsigned int *)
-{
- return H5T_NATIVE_UINT;
-}
-
-inline hid_t hdf5_type_id (const char *)
-{
- return H5T_NATIVE_CHAR;
-}
-
-# endif // DEAL_II_WITH_HDF5
-
#endif // DEAL_II_WITH_SCALAPACK
#endif // dealii_scalapack_templates_h
#include <deal.II/base/mpi.templates.h>
#include <deal.II/lac/scalapack.templates.h>
-# ifdef DEAL_II_WITH_HDF5
+#ifdef DEAL_II_WITH_HDF5
#include <hdf5.h>
-# endif
+#endif
DEAL_II_NAMESPACE_OPEN
+#ifdef DEAL_II_WITH_HDF5
+
+template<typename number>
+inline hid_t hdf5_type_id (const number *)
+{
+ Assert (false, dealii::ExcNotImplemented());
+ //don't know what to put here; it does not matter
+ return -1;
+}
+
+inline hid_t hdf5_type_id (const double *)
+{
+ return H5T_NATIVE_DOUBLE;
+}
+
+inline hid_t hdf5_type_id (const float *)
+{
+ return H5T_NATIVE_FLOAT;
+}
+
+inline hid_t hdf5_type_id (const int *)
+{
+ return H5T_NATIVE_INT;
+}
+
+inline hid_t hdf5_type_id (const unsigned int *)
+{
+ return H5T_NATIVE_UINT;
+}
+
+inline hid_t hdf5_type_id (const char *)
+{
+ return H5T_NATIVE_CHAR;
+}
+#endif // DEAL_II_WITH_HDF5
+
+
+
template <typename NumberType>
ScaLAPACKMatrix<NumberType>::ScaLAPACKMatrix(const size_type n_rows_,
const size_type n_columns_,
//Currently, copying of matrices will only be supported if A and B share the same MPI communicator
int ierr, comparison;
ierr = MPI_Comm_compare(grid->mpi_communicator,B.grid->mpi_communicator,&comparison);
+ AssertThrowMPI(ierr);
Assert (comparison == MPI_IDENT,ExcMessage("Matrix A and B must have a common MPI Communicator"));
/*
* The routine pgemr2d requires a BLACS context resembling at least the union of process grids
- * described by the BLACS contexts of matrix A and B
+ * described by the BLACS contexts held by the ProcessGrids of matrix A and B.
+ * As A and B share the same MPI communicator, there is no need to create a union MPI
+ * communicator to initialise the BLACS context
*/
int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator);
const char *order = "Col";
int union_n_process_columns = 1;
Cblacs_gridinit(&union_blacs_context, order, union_n_process_rows, union_n_process_columns);
-
int n_grid_rows_A,n_grid_columns_A,my_row_A,my_column_A;
Cblacs_gridinfo(this->grid->blacs_context,&n_grid_rows_A,&n_grid_columns_A,&my_row_A,&my_column_A);
{
# ifndef DEAL_II_WITH_HDF5
(void)filename;
- AssertThrow(false, ExcMessage ("HDF5 support is disabled."));
+ Assert(false,ExcInternalError());
# else
/*
* The content of the distributed matrix is copied to a matrix using a 1x1 process grid.
* Therefore, one process has all the data and can write it to a file.
+ *
+ * Create a 1x1 column grid which will be used to initialize
+ * an effectively serial ScaLAPACK matrix to gather the contents from the current object
*/
- //create a 1x1 column grid with P being the number of MPI processes
std::shared_ptr<Utilities::MPI::ProcessGrid> column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,1);
const int MB=n_rows, NB=n_columns;
{
# ifndef DEAL_II_WITH_HDF5
(void)filename;
- AssertThrow(false, ExcMessage ("HDF5 support is disabled."));
+ Assert(false,ExcInternalError());
# else
const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
/*
* The content of the distributed matrix is copied to a matrix using a 1xn_processes process grid.
* Therefore, the processes hold contiguous chunks of the matrix, which they can write to the file
- */
- //create a 1xP column grid with P being the number of MPI processes
+ *
+ * Create a 1xn_processes column grid
+ */
std::shared_ptr<Utilities::MPI::ProcessGrid> column_grid = std::make_shared<Utilities::MPI::ProcessGrid>(this->grid->mpi_communicator,1,n_mpi_processes);
const int MB=n_rows, NB=std::ceil(n_columns/n_mpi_processes);
{
# ifndef DEAL_II_WITH_HDF5
(void)filename;
- AssertThrow(false, ExcMessage ("HDF5 support is disabled."));
+ Assert(false,ExcInternalError());
# else
/*
{
# ifndef DEAL_II_WITH_HDF5
(void)filename;
- AssertThrow(false, ExcMessage ("HDF5 support is disabled."));
+ Assert(false,ExcInternalError());
# else
# ifndef H5_HAVE_PARALLEL
- AssertThrow(false, ExcMessage ("HDF5 was not built with MPI."));
+ Assert(false,ExcInternalError());
# else
const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator));
//copying submatrices
unsigned int sub_size=100;
std::pair<unsigned int,unsigned int> offset_A = std::make_pair(49,99);
- std::pair<unsigned int,unsigned int> offset_B = std::make_pair(0,0);
+ std::pair<unsigned int,unsigned int> offset_B = std::make_pair(4,7);
std::pair<unsigned int,unsigned int> submatrix_size = std::make_pair(sub_size,sub_size);
- ScaLAPACKMatrix<NumberType> scalapack_matrix_dest(sub_size,sub_size,grid_2d,block_size_j,block_size_i);
+ ScaLAPACKMatrix<NumberType> scalapack_matrix_dest(sub_size+offset_B.first,sub_size+offset_B.second,grid_2d,block_size_j,block_size_i);
scalapack_matrix_2d.copy_to(scalapack_matrix_dest,offset_A,offset_B,submatrix_size);
- FullMatrix<NumberType> dest (sub_size,sub_size);
+ FullMatrix<NumberType> dest (sub_size+offset_B.first,sub_size+offset_B.second);
scalapack_matrix_dest.copy_to(dest);
- for (unsigned int i=0; i<dest.m(); ++i)
- for (unsigned int j=0; j<dest.n(); ++j)
- dest(i,j) -= full(offset_A.first+i,offset_A.second+j);
+ for (unsigned int i=0; i<sub_size; ++i)
+ for (unsigned int j=0; j<sub_size; ++j)
+ dest(i+offset_B.first,j+offset_B.second) -= full(offset_A.first+i,offset_A.second+j);
AssertThrow(dest.frobenius_norm() < 1e-12,ExcInternalError());
}