/**
* Stores the distributed matrix in @p filename using HDF5.
+ *
* In case that deal.II was built without HDF5
* a call to this function will cause an exception to be thrown.
*
* internally the distributed matrix is copied to one process, which
* does the output. Therefore, the matrix has to fit into the memory
* of one process.
+ *
+ * To tweak the I/O performance, especially for parallel I/O, the user may define the optional parameter @p chunk_size.
+ * All MPI processes need to call the function with the same value.
+ * The matrix is written in chunks to the file, therefore the properties of the system define the optimal chunk size.
+ * Internally, HDF5 splits the matrix into <tt>chunk_size.first</tt> x <tt>chunk_size.second</tt> sized blocks,
+ * with <tt>chunk_size.first</tt> being the number of rows of a chunk and <tt>chunk_size.second</tt> the number of columns.
*/
- void save(const char *filename) const;
+ void save(const char *filename,
+ const std::pair<unsigned int,unsigned int> &chunk_size=std::make_pair(numbers::invalid_unsigned_int,numbers::invalid_unsigned_int)) const;
/**
* Loads the distributed matrix from file @p filename using HDF5.
* Stores the distributed matrix in @p filename
* using serial routines
*/
- void save_serial(const char *filename) const;
+ void save_serial(const char *filename,
+ const std::pair<unsigned int,unsigned int> &chunk_size) const;
/*
* Loads the distributed matrix from file @p filename
* Stores the distributed matrix in @p filename
* using parallel routines
*/
- void save_parallel(const char *filename) const;
+ void save_parallel(const char *filename,
+ const std::pair<unsigned int,unsigned int> &chunk_size) const;
/*
* Loads the distributed matrix from file @p filename
template <typename NumberType>
-void ScaLAPACKMatrix<NumberType>::save(const char *filename) const
+void ScaLAPACKMatrix<NumberType>::save(const char *filename,
+ const std::pair<unsigned int,unsigned int> &chunk_size) const
{
#ifndef DEAL_II_WITH_HDF5
(void)filename;
+ (void)chunk_size;
AssertThrow(false, ExcMessage ("HDF5 support is disabled."));
#else
+
+ Assert((chunk_size.first <= (unsigned int)n_rows) && (chunk_size.first>0),ExcIndexRange(chunk_size.first,1,n_rows+1));
+ Assert((chunk_size.second <= (unsigned int)n_columns) && (chunk_size.second>0),ExcIndexRange(chunk_size.second,1,n_columns+1));
+
+ std::pair<unsigned int,unsigned int> chunks_size_ = chunk_size;
+
+ if (chunks_size_.first==numbers::invalid_unsigned_int || chunks_size_.second==numbers::invalid_unsigned_int)
+ {
+ // default: store the matrix in chunks of columns
+ chunks_size_.first = n_rows;
+ chunks_size_.second = 1;
+ }
+
# ifdef H5_HAVE_PARALLEL
//implementation for configurations equipped with a parallel file system
- save_parallel(filename);
+ save_parallel(filename,chunks_size_);
# else
//implementation for configurations with no parallel file system
- save_serial(filename);
+ save_serial(filename,chunks_size_);
# endif
#endif
template <typename NumberType>
-void ScaLAPACKMatrix<NumberType>::save_serial(const char *filename) const
+void ScaLAPACKMatrix<NumberType>::save_serial(const char *filename,
+ const std::pair<unsigned int,unsigned int> &chunk_size) const
{
# ifndef DEAL_II_WITH_HDF5
(void)filename;
+ (void)chunk_size;
Assert(false,ExcInternalError());
# else
// create a new file using default properties
hid_t file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ // modify dataset creation properties, i.e. enable chunking
+ hsize_t chunk_dims[2];
+ //revert order of rows and columns as ScaLAPACK uses column-major ordering
+ chunk_dims[0] = chunk_size.second;
+ chunk_dims[1] = chunk_size.first;
+ hid_t property = H5Pcreate (H5P_DATASET_CREATE);
+ status = H5Pset_chunk (property, 2, chunk_dims);
+ AssertThrow(status >= 0, ExcIO());
+
// create the data space for the dataset
hsize_t dims[2];
//change order of rows and columns as ScaLAPACKMatrix uses column major ordering
dims[1] = n_rows;
hid_t dataspace_id = H5Screate_simple(2, dims, nullptr);
- // create the dataset
+ // create the dataset within the file using chunk creation properties
hid_t type_id = hdf5_type_id(&tmp.values[0]);
hid_t dataset_id = H5Dcreate2(file_id, "/matrix",
type_id, dataspace_id,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5P_DEFAULT, property, H5P_DEFAULT);
// write the dataset
status = H5Dwrite(dataset_id, type_id,
status = H5Sclose(dataspace_id);
AssertThrow(status >= 0, ExcIO());
+ // release the creation property
+ status = H5Pclose (property);
+ AssertThrow(status >= 0, ExcIO());
+
// close the file.
status = H5Fclose(file_id);
AssertThrow(status >= 0, ExcIO());
template <typename NumberType>
-void ScaLAPACKMatrix<NumberType>::save_parallel(const char *filename) const
+void ScaLAPACKMatrix<NumberType>::save_parallel(const char *filename,
+ const std::pair<unsigned int,unsigned int> &chunk_size) const
{
# ifndef DEAL_II_WITH_HDF5
(void)filename;
+ (void)chunk_size;
Assert(false,ExcInternalError());
# else
hid_t filespace = H5Screate_simple(2, dims, nullptr);
- // create the dataset with default properties and close filespace
+ // create the chunked dataset with default properties and close filespace
+ hsize_t chunk_dims[2];
+ //revert order of rows and columns as ScaLAPACK uses column-major ordering
+ chunk_dims[0] = chunk_size.second;
+ chunk_dims[1] = chunk_size.first;
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ H5Pset_chunk(plist_id, 2, chunk_dims);
hid_t type_id = hdf5_type_id(data);
hid_t dset_id = H5Dcreate2(file_id, "/matrix", type_id,
- filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ filespace, H5P_DEFAULT, plist_id, H5P_DEFAULT);
status = H5Sclose(filespace);
AssertThrow(status >= 0, ExcIO());
+ status = H5Pclose(plist_id);
+ AssertThrow(status >= 0, ExcIO());
// gather the number of local rows and columns from all processes
std::vector<int> proc_n_local_rows(n_mpi_processes), proc_n_local_columns(n_mpi_processes);
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test serial saving and loading of distributed ScaLAPACKMatrices with prescribed chunk sizes
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <cstdio>
+
+
+template <typename NumberType>
+void test(const std::pair<unsigned int,unsigned int> &size, const unsigned int block_size, const std::pair<unsigned int,unsigned int> &chunk_size)
+{
+ const std::string filename ("scalapck_10_b_test.h5");
+
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ FullMatrix<NumberType> full(size.first,size.second);
+ create_random(full);
+
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size.first,
+ size.second,block_size,block_size);
+
+ ScaLAPACKMatrix<NumberType> scalapack_matrix(size.first,size.second,grid,block_size,block_size);
+ ScaLAPACKMatrix<NumberType> scalapack_matrix_copy(size.first,size.second,grid,block_size,block_size);
+
+ scalapack_matrix = full;
+ scalapack_matrix.save(filename.c_str(),chunk_size);
+ scalapack_matrix_copy.load(filename.c_str());
+
+ FullMatrix<NumberType> copy(size.first,size.second);
+ scalapack_matrix_copy.copy_to(copy);
+ copy.add(-1,full);
+
+ pcout << size.first << "x" << size.second << " & "
+ << block_size << " & "
+ << chunk_size.first << "x" << chunk_size.second << std::endl;
+ AssertThrow(copy.frobenius_norm()<1e-12,ExcInternalError());
+ std::remove(filename.c_str());
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ std::vector<std::pair<unsigned int,unsigned int>> sizes;
+ sizes.push_back(std::make_pair(100,75));
+ sizes.push_back(std::make_pair(200,225));
+ sizes.push_back(std::make_pair(300,250));
+
+ const std::vector<unsigned int> block_sizes = {{1,16,32}};
+
+ std::vector<std::pair<unsigned int,unsigned int>> chunk_sizes;
+ chunk_sizes.push_back(std::make_pair(1,1));
+ chunk_sizes.push_back(std::make_pair(10,10));
+ chunk_sizes.push_back(std::make_pair(50,50));
+ chunk_sizes.push_back(std::make_pair(100,75));
+
+ for (unsigned int i=0; i<sizes.size(); ++i)
+ for (unsigned int j=0; j<block_sizes.size(); ++j)
+ for (unsigned int k=0; k<chunk_sizes.size(); ++k)
+ test<double>(sizes[i],block_sizes[j],chunk_sizes[k]);
+
+ for (unsigned int i=0; i<sizes.size(); ++i)
+ for (unsigned int j=0; j<block_sizes.size(); ++j)
+ for (unsigned int k=0; k<chunk_sizes.size(); ++k)
+ test<float>(sizes[i],block_sizes[j],chunk_sizes[k]);
+}
--- /dev/null
+100x75 & 1 & 1x1
+100x75 & 1 & 10x10
+100x75 & 1 & 50x50
+100x75 & 1 & 100x75
+100x75 & 16 & 1x1
+100x75 & 16 & 10x10
+100x75 & 16 & 50x50
+100x75 & 16 & 100x75
+100x75 & 32 & 1x1
+100x75 & 32 & 10x10
+100x75 & 32 & 50x50
+100x75 & 32 & 100x75
+200x225 & 1 & 1x1
+200x225 & 1 & 10x10
+200x225 & 1 & 50x50
+200x225 & 1 & 100x75
+200x225 & 16 & 1x1
+200x225 & 16 & 10x10
+200x225 & 16 & 50x50
+200x225 & 16 & 100x75
+200x225 & 32 & 1x1
+200x225 & 32 & 10x10
+200x225 & 32 & 50x50
+200x225 & 32 & 100x75
+300x250 & 1 & 1x1
+300x250 & 1 & 10x10
+300x250 & 1 & 50x50
+300x250 & 1 & 100x75
+300x250 & 16 & 1x1
+300x250 & 16 & 10x10
+300x250 & 16 & 50x50
+300x250 & 16 & 100x75
+300x250 & 32 & 1x1
+300x250 & 32 & 10x10
+300x250 & 32 & 50x50
+300x250 & 32 & 100x75
+100x75 & 1 & 1x1
+100x75 & 1 & 10x10
+100x75 & 1 & 50x50
+100x75 & 1 & 100x75
+100x75 & 16 & 1x1
+100x75 & 16 & 10x10
+100x75 & 16 & 50x50
+100x75 & 16 & 100x75
+100x75 & 32 & 1x1
+100x75 & 32 & 10x10
+100x75 & 32 & 50x50
+100x75 & 32 & 100x75
+200x225 & 1 & 1x1
+200x225 & 1 & 10x10
+200x225 & 1 & 50x50
+200x225 & 1 & 100x75
+200x225 & 16 & 1x1
+200x225 & 16 & 10x10
+200x225 & 16 & 50x50
+200x225 & 16 & 100x75
+200x225 & 32 & 1x1
+200x225 & 32 & 10x10
+200x225 & 32 & 50x50
+200x225 & 32 & 100x75
+300x250 & 1 & 1x1
+300x250 & 1 & 10x10
+300x250 & 1 & 50x50
+300x250 & 1 & 100x75
+300x250 & 16 & 1x1
+300x250 & 16 & 10x10
+300x250 & 16 & 50x50
+300x250 & 16 & 100x75
+300x250 & 32 & 1x1
+300x250 & 32 & 10x10
+300x250 & 32 & 50x50
+300x250 & 32 & 100x75
--- /dev/null
+100x75 & 1 & 1x1
+100x75 & 1 & 10x10
+100x75 & 1 & 50x50
+100x75 & 1 & 100x75
+100x75 & 16 & 1x1
+100x75 & 16 & 10x10
+100x75 & 16 & 50x50
+100x75 & 16 & 100x75
+100x75 & 32 & 1x1
+100x75 & 32 & 10x10
+100x75 & 32 & 50x50
+100x75 & 32 & 100x75
+200x225 & 1 & 1x1
+200x225 & 1 & 10x10
+200x225 & 1 & 50x50
+200x225 & 1 & 100x75
+200x225 & 16 & 1x1
+200x225 & 16 & 10x10
+200x225 & 16 & 50x50
+200x225 & 16 & 100x75
+200x225 & 32 & 1x1
+200x225 & 32 & 10x10
+200x225 & 32 & 50x50
+200x225 & 32 & 100x75
+300x250 & 1 & 1x1
+300x250 & 1 & 10x10
+300x250 & 1 & 50x50
+300x250 & 1 & 100x75
+300x250 & 16 & 1x1
+300x250 & 16 & 10x10
+300x250 & 16 & 50x50
+300x250 & 16 & 100x75
+300x250 & 32 & 1x1
+300x250 & 32 & 10x10
+300x250 & 32 & 50x50
+300x250 & 32 & 100x75
+100x75 & 1 & 1x1
+100x75 & 1 & 10x10
+100x75 & 1 & 50x50
+100x75 & 1 & 100x75
+100x75 & 16 & 1x1
+100x75 & 16 & 10x10
+100x75 & 16 & 50x50
+100x75 & 16 & 100x75
+100x75 & 32 & 1x1
+100x75 & 32 & 10x10
+100x75 & 32 & 50x50
+100x75 & 32 & 100x75
+200x225 & 1 & 1x1
+200x225 & 1 & 10x10
+200x225 & 1 & 50x50
+200x225 & 1 & 100x75
+200x225 & 16 & 1x1
+200x225 & 16 & 10x10
+200x225 & 16 & 50x50
+200x225 & 16 & 100x75
+200x225 & 32 & 1x1
+200x225 & 32 & 10x10
+200x225 & 32 & 50x50
+200x225 & 32 & 100x75
+300x250 & 1 & 1x1
+300x250 & 1 & 10x10
+300x250 & 1 & 50x50
+300x250 & 1 & 100x75
+300x250 & 16 & 1x1
+300x250 & 16 & 10x10
+300x250 & 16 & 50x50
+300x250 & 16 & 100x75
+300x250 & 32 & 1x1
+300x250 & 32 & 10x10
+300x250 & 32 & 50x50
+300x250 & 32 & 100x75
--- /dev/null
+100x75 & 1 & 1x1
+100x75 & 1 & 10x10
+100x75 & 1 & 50x50
+100x75 & 1 & 100x75
+100x75 & 16 & 1x1
+100x75 & 16 & 10x10
+100x75 & 16 & 50x50
+100x75 & 16 & 100x75
+100x75 & 32 & 1x1
+100x75 & 32 & 10x10
+100x75 & 32 & 50x50
+100x75 & 32 & 100x75
+200x225 & 1 & 1x1
+200x225 & 1 & 10x10
+200x225 & 1 & 50x50
+200x225 & 1 & 100x75
+200x225 & 16 & 1x1
+200x225 & 16 & 10x10
+200x225 & 16 & 50x50
+200x225 & 16 & 100x75
+200x225 & 32 & 1x1
+200x225 & 32 & 10x10
+200x225 & 32 & 50x50
+200x225 & 32 & 100x75
+300x250 & 1 & 1x1
+300x250 & 1 & 10x10
+300x250 & 1 & 50x50
+300x250 & 1 & 100x75
+300x250 & 16 & 1x1
+300x250 & 16 & 10x10
+300x250 & 16 & 50x50
+300x250 & 16 & 100x75
+300x250 & 32 & 1x1
+300x250 & 32 & 10x10
+300x250 & 32 & 50x50
+300x250 & 32 & 100x75
+100x75 & 1 & 1x1
+100x75 & 1 & 10x10
+100x75 & 1 & 50x50
+100x75 & 1 & 100x75
+100x75 & 16 & 1x1
+100x75 & 16 & 10x10
+100x75 & 16 & 50x50
+100x75 & 16 & 100x75
+100x75 & 32 & 1x1
+100x75 & 32 & 10x10
+100x75 & 32 & 50x50
+100x75 & 32 & 100x75
+200x225 & 1 & 1x1
+200x225 & 1 & 10x10
+200x225 & 1 & 50x50
+200x225 & 1 & 100x75
+200x225 & 16 & 1x1
+200x225 & 16 & 10x10
+200x225 & 16 & 50x50
+200x225 & 16 & 100x75
+200x225 & 32 & 1x1
+200x225 & 32 & 10x10
+200x225 & 32 & 50x50
+200x225 & 32 & 100x75
+300x250 & 1 & 1x1
+300x250 & 1 & 10x10
+300x250 & 1 & 50x50
+300x250 & 1 & 100x75
+300x250 & 16 & 1x1
+300x250 & 16 & 10x10
+300x250 & 16 & 50x50
+300x250 & 16 & 100x75
+300x250 & 32 & 1x1
+300x250 & 32 & 10x10
+300x250 & 32 & 50x50
+300x250 & 32 & 100x75
--- /dev/null
+100x75 & 1 & 1x1
+100x75 & 1 & 10x10
+100x75 & 1 & 50x50
+100x75 & 1 & 100x75
+100x75 & 16 & 1x1
+100x75 & 16 & 10x10
+100x75 & 16 & 50x50
+100x75 & 16 & 100x75
+100x75 & 32 & 1x1
+100x75 & 32 & 10x10
+100x75 & 32 & 50x50
+100x75 & 32 & 100x75
+200x225 & 1 & 1x1
+200x225 & 1 & 10x10
+200x225 & 1 & 50x50
+200x225 & 1 & 100x75
+200x225 & 16 & 1x1
+200x225 & 16 & 10x10
+200x225 & 16 & 50x50
+200x225 & 16 & 100x75
+200x225 & 32 & 1x1
+200x225 & 32 & 10x10
+200x225 & 32 & 50x50
+200x225 & 32 & 100x75
+300x250 & 1 & 1x1
+300x250 & 1 & 10x10
+300x250 & 1 & 50x50
+300x250 & 1 & 100x75
+300x250 & 16 & 1x1
+300x250 & 16 & 10x10
+300x250 & 16 & 50x50
+300x250 & 16 & 100x75
+300x250 & 32 & 1x1
+300x250 & 32 & 10x10
+300x250 & 32 & 50x50
+300x250 & 32 & 100x75
+100x75 & 1 & 1x1
+100x75 & 1 & 10x10
+100x75 & 1 & 50x50
+100x75 & 1 & 100x75
+100x75 & 16 & 1x1
+100x75 & 16 & 10x10
+100x75 & 16 & 50x50
+100x75 & 16 & 100x75
+100x75 & 32 & 1x1
+100x75 & 32 & 10x10
+100x75 & 32 & 50x50
+100x75 & 32 & 100x75
+200x225 & 1 & 1x1
+200x225 & 1 & 10x10
+200x225 & 1 & 50x50
+200x225 & 1 & 100x75
+200x225 & 16 & 1x1
+200x225 & 16 & 10x10
+200x225 & 16 & 50x50
+200x225 & 16 & 100x75
+200x225 & 32 & 1x1
+200x225 & 32 & 10x10
+200x225 & 32 & 50x50
+200x225 & 32 & 100x75
+300x250 & 1 & 1x1
+300x250 & 1 & 10x10
+300x250 & 1 & 50x50
+300x250 & 1 & 100x75
+300x250 & 16 & 1x1
+300x250 & 16 & 10x10
+300x250 & 16 & 50x50
+300x250 & 16 & 100x75
+300x250 & 32 & 1x1
+300x250 & 32 & 10x10
+300x250 & 32 & 50x50
+300x250 & 32 & 100x75