From: Timo Heister Date: Wed, 6 Jul 2022 20:30:28 +0000 (-0400) Subject: implement write_deal_II_intermediate_in_parallel X-Git-Tag: v9.5.0-rc1~1100^2~10 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=759d17f00d061b8c9e82370480daea1eaef4d254;p=dealii.git implement write_deal_II_intermediate_in_parallel --- diff --git a/include/deal.II/base/data_out_base.h b/include/deal.II/base/data_out_base.h index 7dc0447392..1db8385a99 100644 --- a/include/deal.II/base/data_out_base.h +++ b/include/deal.II/base/data_out_base.h @@ -2343,6 +2343,30 @@ namespace DataOutBase const Deal_II_IntermediateFlags &flags, std::ostream & out); + /** + * Like write_deal_II_intermediate() but write all patches from all ranks + * using MPI I/O + * into a single file with name @p name. Compression using zlib is optional and controlled + * using @p compression. + * + * The files typically have the extension .d2_par_patches. + */ + template + void + write_deal_II_intermediate_in_parallel( + const std::vector> &patches, + const std::vector & data_names, + const std::vector< + std::tuple> + & nonscalar_data_ranges, + const Deal_II_IntermediateFlags & flags, + const std::string & filename, + const MPI_Comm & comm, + const VtkFlags::ZlibCompressionLevel compression); + /** * Write the data in @p data_filter to a single HDF5 file containing both the * mesh and solution values. @@ -2826,6 +2850,19 @@ public: void write_deal_II_intermediate(std::ostream &out) const; + /** + * Obtain data through get_patches() and write it using MPI I/O in parallel + * to the file @p filename in the parallel + * deal.II intermediate format. See + * DataOutBase::write_deal_II_intermediate_in_parallel(). + * + */ + void + write_deal_II_intermediate_in_parallel( + const std::string & filename, + const MPI_Comm & comm, + const DataOutBase::VtkFlags::ZlibCompressionLevel compression) const; + /** * Create an XDMFEntry based on the data in the data_filter. This assumes * the mesh and solution data were written to a single file. See diff --git a/source/base/data_out_base.cc b/source/base/data_out_base.cc index 9c13da3b6b..9e566d75a5 100644 --- a/source/base/data_out_base.cc +++ b/source/base/data_out_base.cc @@ -43,6 +43,7 @@ #include #include +#include #include #include #include @@ -50,12 +51,11 @@ #include #include #include - -// we use std::uint32_t and std::uint8_t below, which are declared here: -#include #include #ifdef DEAL_II_WITH_ZLIB +# include + # include #endif @@ -63,6 +63,9 @@ # include #endif +#include +#include + DEAL_II_NAMESPACE_OPEN @@ -7540,6 +7543,136 @@ namespace DataOutBase } + template + void + write_deal_II_intermediate_in_parallel( + const std::vector> &patches, + const std::vector & data_names, + const std::vector< + std::tuple> + & nonscalar_data_ranges, + const Deal_II_IntermediateFlags & flags, + const std::string & filename, + const MPI_Comm & comm, + const VtkFlags::ZlibCompressionLevel compression) + { +#ifndef DEAL_II_WITH_MPI + + (void)comm; +#else + // First generate my data by writing (optionally compressed) data into + // my_buffer: + std::vector my_buffer; + { + boost::iostreams::filtering_ostream f; + + if (compression != VtkFlags::no_compression) + f.push(boost::iostreams::zlib_compressor()); + + boost::iostreams::back_insert_device> inserter( + my_buffer); + f.push(inserter); + + write_deal_II_intermediate( + patches, data_names, nonscalar_data_ranges, flags, f); + } + const std::uint64_t my_size = my_buffer.size(); + + const unsigned int my_rank = Utilities::MPI::this_mpi_process(comm); + const std::uint64_t num_ranks = Utilities::MPI::n_mpi_processes(comm); + const std::uint64_t num_patches = Utilities::MPI::sum(patches.size(), comm); + + struct HeaderType + { + std::uint64_t magic; + std::uint64_t version; + std::uint64_t compression; + std::uint64_t dimension; + std::uint64_t space_dimension; + std::uint64_t num_ranks; + std::uint64_t num_patches; + }; + + const HeaderType header{0x00dea111, + Deal_II_IntermediateFlags::format_version, + compression, + dim, + spacedim, + num_ranks, + num_patches}; + + // rank 0 also collects and writes the size of the data from each rank in + // bytes: + std::vector chunk_sizes(num_ranks); + int ierr = MPI_Gather( + &my_size, 1, MPI_UINT64_T, chunk_sizes.data(), 1, MPI_UINT64_T, 0, comm); + AssertThrowMPI(ierr); + + MPI_Info info; + MPI_Info_create(&info); + AssertThrowMPI(ierr); + MPI_File fh; + ierr = MPI_File_open( + comm, filename.c_str(), MPI_MODE_CREATE | MPI_MODE_WRONLY, info, &fh); + AssertThrow(ierr == MPI_SUCCESS, ExcFileNotOpen(filename)); + + ierr = MPI_File_set_size(fh, 0); // delete the file contents + AssertThrowMPI(ierr); + // this barrier is necessary, because otherwise others might already write + // while one core is still setting the size to zero. + ierr = MPI_Barrier(comm); + AssertThrowMPI(ierr); + ierr = MPI_Info_free(&info); + AssertThrowMPI(ierr); + + // write header + if (my_rank == 0) + { + ierr = Utilities::MPI::LargeCount::File_write_at_c( + fh, 0, &header, sizeof(header), MPI_CHAR, MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + ierr = Utilities::MPI::LargeCount::File_write_at_c(fh, + sizeof(header), + chunk_sizes.data(), + chunk_sizes.size(), + MPI_UINT64_T, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + + // wite main part + { + std::uint64_t prefix_sum = 0; + ierr = MPI_Exscan(&my_size, &prefix_sum, 1, MPI_UINT64_T, MPI_SUM, comm); + AssertThrowMPI(ierr); + + // Locate specific offset for each processor. + const MPI_Offset offset = static_cast(sizeof(header)) + + num_ranks * sizeof(std::uint64_t) + prefix_sum; + + ierr = Utilities::MPI::LargeCount::File_write_at_all_c( + fh, offset, my_buffer.data(), my_size, MPI_CHAR, MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + + // Make sure we sync to disk. As written in the standard, + // MPI_File_close() actually already implies a sync but there seems + // to be a bug on at least one configuration (running with multiple + // nodes using OpenMPI 4.1) that requires it. Without this call, the + // footer is sometimes missing. + ierr = MPI_File_sync(fh); + AssertThrowMPI(ierr); + + ierr = MPI_File_close(&fh); + AssertThrowMPI(ierr); +#endif + } + + std::pair determine_intermediate_format_dimensions(std::istream &input) @@ -7924,6 +8057,26 @@ DataOutInterface::write_deal_II_intermediate( } + +template +void +DataOutInterface::write_deal_II_intermediate_in_parallel( + const std::string & filename, + const MPI_Comm & comm, + const DataOutBase::VtkFlags::ZlibCompressionLevel compression) const +{ + DataOutBase::write_deal_II_intermediate_in_parallel( + get_patches(), + get_dataset_names(), + get_nonscalar_data_ranges(), + deal_II_intermediate_flags, + filename, + comm, + compression); +} + + + template XDMFEntry DataOutInterface::create_xdmf_entry( diff --git a/source/base/data_out_base.inst.in b/source/base/data_out_base.inst.in index 9218d409d1..56730ced65 100644 --- a/source/base/data_out_base.inst.in +++ b/source/base/data_out_base.inst.in @@ -203,6 +203,22 @@ for (deal_II_dimension : OUTPUT_DIMENSIONS; const Deal_II_IntermediateFlags &flags, std::ostream & out); + template void + write_deal_II_intermediate_in_parallel( + const std::vector> + & patches, + const std::vector &data_names, + const std::vector< + std::tuple> + & nonscalar_data_ranges, + const Deal_II_IntermediateFlags & flags, + const std::string & filename, + const MPI_Comm & comm, + const VtkFlags::ZlibCompressionLevel compression); + template void write_hdf5_parallel( const std::vector>