From 5706afb106c43a271655a8227488a620c1a5118b Mon Sep 17 00:00:00 2001 From: Benjamin Brands Date: Wed, 24 Jan 2018 09:14:49 +0100 Subject: [PATCH] corrections_I --- .../changes/minor/20180125BenjaminBrands_b | 4 ++ include/deal.II/lac/scalapack.h | 14 ++-- include/deal.II/lac/scalapack.templates.h | 42 ------------ source/lac/scalapack.cc | 67 +++++++++++++++---- tests/scalapack/scalapack_07.cc | 12 ++-- 5 files changed, 75 insertions(+), 64 deletions(-) create mode 100644 doc/news/changes/minor/20180125BenjaminBrands_b diff --git a/doc/news/changes/minor/20180125BenjaminBrands_b b/doc/news/changes/minor/20180125BenjaminBrands_b new file mode 100644 index 0000000000..260aa2a3d2 --- /dev/null +++ b/doc/news/changes/minor/20180125BenjaminBrands_b @@ -0,0 +1,4 @@ +New: Add save/load functions for ScaLAPACKMatrix to save/load distributed matrix to/from disc using HDF5. If HDF is configured with MPI, parallel I/O is used to save/load the matrix. +
+(Benjamin Brands, 2018/01/25) + diff --git a/include/deal.II/lac/scalapack.h b/include/deal.II/lac/scalapack.h index 5abfedd106..dbfd1c7abc 100644 --- a/include/deal.II/lac/scalapack.h +++ b/include/deal.II/lac/scalapack.h @@ -179,7 +179,10 @@ public: * . * * If it is necessary to copy complete matrices with an identical block-cyclic distribution, - * use copy_to(ScaLAPACKMatrix &dest) with only one argument to avoid communication + * use copy_to(ScaLAPACKMatrix &dest) with only one argument to avoid communication. + * + * The underlying process grids of the matrices @p A and @p B must have been built + * with the same MPI communicator. */ void copy_to(ScaLAPACKMatrix &B, const std::pair &offset_A, @@ -187,17 +190,20 @@ public: const std::pair &submatrix_size) const; /** - * Stores the distributed matrix in @p filename using HDF5 + * Stores the distributed matrix in @p filename using HDF5. * * If HDF5 was build with MPI, parallel I/O is used to save the matrix. - * Otherwise, just one process will do the output. + * Otherwise, just one process will do the output. This means that + * internally the distributed matrix is copied to one process, which + * does the output. Therefore, the matrix has to fit into the memory + * of one process. */ void save(const char *filename) const; /** * Loads the distributed matrix from file @p filename using HDF5. * - * The matrix must have the same dimensions as the matrix in stored in the file. + * The matrix must have the same dimensions as the matrix stored in the file. * * If HDF5 was build with MPI, parallel I/O is used to load the matrix. * Otherwise, just one process will load the matrix from storage diff --git a/include/deal.II/lac/scalapack.templates.h b/include/deal.II/lac/scalapack.templates.h index 1e57ba1d30..cf532069ef 100644 --- a/include/deal.II/lac/scalapack.templates.h +++ b/include/deal.II/lac/scalapack.templates.h @@ -24,10 +24,6 @@ #include #include -# ifdef DEAL_II_WITH_HDF5 -#include -# endif - // useful examples: // https://stackoverflow.com/questions/14147705/cholesky-decomposition-scalapack-error/14203864 // http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=139 // second post by Julien Langou @@ -1492,44 +1488,6 @@ inline void pgels(const char *trans, psgels_(trans,m,n,nrhs,A,ia,ja,desca,B,ib,jb,descb,work,lwork,info); } - -# ifdef DEAL_II_WITH_HDF5 - -template -inline hid_t hdf5_type_id (const number *) -{ - Assert (false, dealii::ExcNotImplemented()); - //don't know what to put here; it does not matter - return -1; -} - -inline hid_t hdf5_type_id (const double *) -{ - return H5T_NATIVE_DOUBLE; -} - -inline hid_t hdf5_type_id (const float *) -{ - return H5T_NATIVE_FLOAT; -} - -inline hid_t hdf5_type_id (const int *) -{ - return H5T_NATIVE_INT; -} - -inline hid_t hdf5_type_id (const unsigned int *) -{ - return H5T_NATIVE_UINT; -} - -inline hid_t hdf5_type_id (const char *) -{ - return H5T_NATIVE_CHAR; -} - -# endif // DEAL_II_WITH_HDF5 - #endif // DEAL_II_WITH_SCALAPACK #endif // dealii_scalapack_templates_h diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index ffafdf5d76..55d592b31a 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -24,12 +24,50 @@ #include #include -# ifdef DEAL_II_WITH_HDF5 +#ifdef DEAL_II_WITH_HDF5 #include -# endif +#endif DEAL_II_NAMESPACE_OPEN +#ifdef DEAL_II_WITH_HDF5 + +template +inline hid_t hdf5_type_id (const number *) +{ + Assert (false, dealii::ExcNotImplemented()); + //don't know what to put here; it does not matter + return -1; +} + +inline hid_t hdf5_type_id (const double *) +{ + return H5T_NATIVE_DOUBLE; +} + +inline hid_t hdf5_type_id (const float *) +{ + return H5T_NATIVE_FLOAT; +} + +inline hid_t hdf5_type_id (const int *) +{ + return H5T_NATIVE_INT; +} + +inline hid_t hdf5_type_id (const unsigned int *) +{ + return H5T_NATIVE_UINT; +} + +inline hid_t hdf5_type_id (const char *) +{ + return H5T_NATIVE_CHAR; +} +#endif // DEAL_II_WITH_HDF5 + + + template ScaLAPACKMatrix::ScaLAPACKMatrix(const size_type n_rows_, const size_type n_columns_, @@ -237,11 +275,14 @@ ScaLAPACKMatrix::copy_to(ScaLAPACKMatrix &B, //Currently, copying of matrices will only be supported if A and B share the same MPI communicator int ierr, comparison; ierr = MPI_Comm_compare(grid->mpi_communicator,B.grid->mpi_communicator,&comparison); + AssertThrowMPI(ierr); Assert (comparison == MPI_IDENT,ExcMessage("Matrix A and B must have a common MPI Communicator")); /* * The routine pgemr2d requires a BLACS context resembling at least the union of process grids - * described by the BLACS contexts of matrix A and B + * described by the BLACS contexts held by the ProcessGrids of matrix A and B. + * As A and B share the same MPI communicator, there is no need to create a union MPI + * communicator to initialise the BLACS context */ int union_blacs_context = Csys2blacs_handle(this->grid->mpi_communicator); const char *order = "Col"; @@ -249,7 +290,6 @@ ScaLAPACKMatrix::copy_to(ScaLAPACKMatrix &B, int union_n_process_columns = 1; Cblacs_gridinit(&union_blacs_context, order, union_n_process_rows, union_n_process_columns); - int n_grid_rows_A,n_grid_columns_A,my_row_A,my_column_A; Cblacs_gridinfo(this->grid->blacs_context,&n_grid_rows_A,&n_grid_columns_A,&my_row_A,&my_column_A); @@ -899,14 +939,16 @@ void ScaLAPACKMatrix::save_serial(const char *filename) const { # ifndef DEAL_II_WITH_HDF5 (void)filename; - AssertThrow(false, ExcMessage ("HDF5 support is disabled.")); + Assert(false,ExcInternalError()); # else /* * The content of the distributed matrix is copied to a matrix using a 1x1 process grid. * Therefore, one process has all the data and can write it to a file. + * + * Create a 1x1 column grid which will be used to initialize + * an effectively serial ScaLAPACK matrix to gather the contents from the current object */ - //create a 1x1 column grid with P being the number of MPI processes std::shared_ptr column_grid = std::make_shared(this->grid->mpi_communicator,1,1); const int MB=n_rows, NB=n_columns; @@ -963,7 +1005,7 @@ void ScaLAPACKMatrix::save_parallel(const char *filename) const { # ifndef DEAL_II_WITH_HDF5 (void)filename; - AssertThrow(false, ExcMessage ("HDF5 support is disabled.")); + Assert(false,ExcInternalError()); # else const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator)); @@ -971,8 +1013,9 @@ void ScaLAPACKMatrix::save_parallel(const char *filename) const /* * The content of the distributed matrix is copied to a matrix using a 1xn_processes process grid. * Therefore, the processes hold contiguous chunks of the matrix, which they can write to the file - */ - //create a 1xP column grid with P being the number of MPI processes + * + * Create a 1xn_processes column grid + */ std::shared_ptr column_grid = std::make_shared(this->grid->mpi_communicator,1,n_mpi_processes); const int MB=n_rows, NB=std::ceil(n_columns/n_mpi_processes); @@ -1089,7 +1132,7 @@ void ScaLAPACKMatrix::load_serial(const char *filename) { # ifndef DEAL_II_WITH_HDF5 (void)filename; - AssertThrow(false, ExcMessage ("HDF5 support is disabled.")); + Assert(false,ExcInternalError()); # else /* @@ -1169,10 +1212,10 @@ void ScaLAPACKMatrix::load_parallel(const char *filename) { # ifndef DEAL_II_WITH_HDF5 (void)filename; - AssertThrow(false, ExcMessage ("HDF5 support is disabled.")); + Assert(false,ExcInternalError()); # else # ifndef H5_HAVE_PARALLEL - AssertThrow(false, ExcMessage ("HDF5 was not built with MPI.")); + Assert(false,ExcInternalError()); # else const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(this->grid->mpi_communicator)); diff --git a/tests/scalapack/scalapack_07.cc b/tests/scalapack/scalapack_07.cc index 19f508613b..83dc712be2 100644 --- a/tests/scalapack/scalapack_07.cc +++ b/tests/scalapack/scalapack_07.cc @@ -94,15 +94,15 @@ void test(const unsigned int block_size_i, const unsigned int block_size_j) //copying submatrices unsigned int sub_size=100; std::pair offset_A = std::make_pair(49,99); - std::pair offset_B = std::make_pair(0,0); + std::pair offset_B = std::make_pair(4,7); std::pair submatrix_size = std::make_pair(sub_size,sub_size); - ScaLAPACKMatrix scalapack_matrix_dest(sub_size,sub_size,grid_2d,block_size_j,block_size_i); + ScaLAPACKMatrix scalapack_matrix_dest(sub_size+offset_B.first,sub_size+offset_B.second,grid_2d,block_size_j,block_size_i); scalapack_matrix_2d.copy_to(scalapack_matrix_dest,offset_A,offset_B,submatrix_size); - FullMatrix dest (sub_size,sub_size); + FullMatrix dest (sub_size+offset_B.first,sub_size+offset_B.second); scalapack_matrix_dest.copy_to(dest); - for (unsigned int i=0; i