* into a single file with name @p name. Compression using zlib is optional and controlled
* using @p compression.
*
- * The files typically have the extension <tt>.d2_par_patches</tt>.
+ * The files typically have the extension <tt>.pd2</tt>.
*/
template <int dim, int spacedim>
void
void
read(std::istream &in);
+ /**
+ * Read all data previously written using
+ * DataOutBase::write_deal_II_intermediate_in_parallel() from all
+ * MPI ranks into this data structure.
+ */
+ void
+ read_whole_parallel_file(std::istream &in);
+
/**
* This function can be used to merge the patches read by the other object
* into the patches that this present object stores. This is sometimes handy
# include <hdf5.h>
#endif
+#include <boost/iostreams/copy.hpp>
#include <boost/iostreams/device/back_inserter.hpp>
#include <boost/iostreams/filtering_stream.hpp>
}
}
#endif
+
+
+ /**
+ * The header in binary format that the parallel intermediate files
+ * start with.
+ */
+ struct ParallelIntermediateHeaderType
+ {
+ std::uint64_t magic;
+ std::uint64_t version;
+ std::uint64_t compression;
+ std::uint64_t dimension;
+ std::uint64_t space_dimension;
+ std::uint64_t num_ranks;
+ std::uint64_t num_patches;
+ };
} // namespace
(void)comm;
#else
+
+ // We write a simple format based on the text format of
+ // write_deal_II_intermediate() on each MPI rank. The text format
+ // is quite verbose and we should probably change this to a more
+ // efficient binary representation at some point. The file layout
+ // is as follows:
+ //
+ // 1. A binary header with layout struct
+ // ParallelIntermediateHeaderType.
+ // 2. A list of uint64_t with one value per rank denoting the
+ // compressed size of the chunks of the next step.
+ // 3. The (potentially compressed) chunks as generated by
+ // write_deal_II_intermediate() on each MPI rank.
+
// First generate my data by writing (optionally compressed) data into
// my_buffer:
std::vector<char> my_buffer;
const std::uint64_t num_ranks = Utilities::MPI::n_mpi_processes(comm);
const std::uint64_t num_patches = Utilities::MPI::sum(patches.size(), comm);
- struct HeaderType
- {
- std::uint64_t magic;
- std::uint64_t version;
- std::uint64_t compression;
- std::uint64_t dimension;
- std::uint64_t space_dimension;
- std::uint64_t num_ranks;
- std::uint64_t num_patches;
- };
-
- const HeaderType header{0x00dea111,
- Deal_II_IntermediateFlags::format_version,
- compression,
- dim,
- spacedim,
- num_ranks,
- num_patches};
+ const ParallelIntermediateHeaderType header{
+ 0x00dea111,
+ Deal_II_IntermediateFlags::format_version,
+ compression,
+ dim,
+ spacedim,
+ num_ranks,
+ num_patches};
// rank 0 also collects and writes the size of the data from each rank in
// bytes:
+template <int dim, int spacedim>
+void
+DataOutReader<dim, spacedim>::read_whole_parallel_file(std::istream &in)
+{
+ AssertThrow(in.fail() == false, ExcIO());
+
+ ParallelIntermediateHeaderType header;
+ in.read(reinterpret_cast<char *>(&header), sizeof(header));
+
+ std::vector<std::uint64_t> chunk_sizes(header.num_ranks);
+ in.read(reinterpret_cast<char *>(chunk_sizes.data()),
+ header.num_ranks * sizeof(std::uint64_t));
+
+ for (unsigned int n = 0; n < header.num_ranks; ++n)
+ {
+ // First read the compressed data into temp_buffer and then
+ // decompress and put into datastream
+ std::vector<char> temp_buffer(chunk_sizes[n]);
+ in.read(temp_buffer.data(), chunk_sizes[n]);
+
+ boost::iostreams::filtering_istreambuf f;
+ if (header.compression)
+ f.push(boost::iostreams::zlib_decompressor());
+
+ boost::iostreams::basic_array_source<char> source(temp_buffer.data(),
+ temp_buffer.size());
+ f.push(source);
+
+ std::stringstream datastream;
+ boost::iostreams::copy(f, datastream);
+
+ // Now we can load the data and merge this chunk into *this
+ if (n == 0)
+ {
+ read(datastream);
+ }
+ else
+ {
+ DataOutReader<dim, spacedim> temp_reader;
+ temp_reader.read(datastream);
+ merge(temp_reader);
+ }
+ }
+}
+
+
+
template <int dim, int spacedim>
void
DataOutReader<dim, spacedim>::merge(const DataOutReader<dim, spacedim> &source)
//
// ---------------------------------------------------------------------
-// Test DataOut::write_deal_II_intermediate_in_parallel()
+// Test DataOut::write_deal_II_intermediate_in_parallel() and
+// DataOutReader::read_whole_parallel_file()
#include <deal.II/base/mpi.h>
data_out.build_patches();
data_out.write_deal_II_intermediate_in_parallel(
- "test.d2p", MPI_COMM_WORLD, DataOutBase::VtkFlags::no_compression);
+ "test.pd2", MPI_COMM_WORLD, DataOutBase::VtkFlags::no_compression);
const unsigned int my_rank =
dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
if (my_rank == 0)
{
- // can't checksum as the file contains "[written by deal.II
- // 9.5.0-pre]" which would change often, so we just look at the
- // size for now. This will of course fail once we go to
- // 10.0. :-)
- std::ifstream in("test.d2p", std::ifstream::ate | std::ifstream::binary);
+ // Read the data back in and dump it into the deallog:
+ std::ifstream in("test.pd2");
Assert(in, dealii::ExcIO());
- deallog << "size: " << in.tellg() << std::endl;
+ DataOutReader<dim, dim> reader;
+ reader.read_whole_parallel_file(in);
+ reader.write_deal_II_intermediate(deallog.get_file_stream());
}
deallog << "OK" << std::endl;
-DEAL:0::size: 1244
+2 2
+[deal.II intermediate format graphics data]
+[written by deal.II x.y.z]
+[Version: 4]
+2
+solution
+cell_data
+8
+[deal.II intermediate Patch<2,2>]
+3
+0.00000 0.00000 0.500000 0.00000 0.00000 0.500000 0.500000 0.500000
+4294967295 1 4294967295 2
+0 1
+0
+2 4
+0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
+
+
+[deal.II intermediate Patch<2,2>]
+3
+0.500000 0.00000 1.00000 0.00000 0.500000 0.500000 1.00000 0.500000
+0 4294967295 4294967295 3
+1 1
+0
+2 4
+0.00000 0.00000 0.00000 0.00000 1.00000 1.00000 1.00000 1.00000
+
+
+[deal.II intermediate Patch<2,2>]
+3
+0.00000 0.500000 0.500000 0.500000 0.00000 1.00000 0.500000 1.00000
+4294967295 3 0 4294967295
+2 1
+0
+2 4
+0.00000 0.00000 0.00000 0.00000 2.00000 2.00000 2.00000 2.00000
+
+
+[deal.II intermediate Patch<2,2>]
+3
+0.500000 0.500000 1.00000 0.500000 0.500000 1.00000 1.00000 1.00000
+2 4294967295 1 4294967295
+3 1
+0
+2 4
+0.00000 0.00000 0.00000 0.00000 3.00000 3.00000 3.00000 3.00000
+
+
+[deal.II intermediate Patch<2,2>]
+3
+0.00000 0.00000 0.500000 0.00000 0.00000 0.500000 0.500000 0.500000
+4294967295 5 4294967295 6
+4 1
+0
+2 4
+0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
+
+
+[deal.II intermediate Patch<2,2>]
+3
+0.500000 0.00000 1.00000 0.00000 0.500000 0.500000 1.00000 0.500000
+4 4294967295 4294967295 7
+5 1
+0
+2 4
+0.00000 0.00000 0.00000 0.00000 1.00000 1.00000 1.00000 1.00000
+
+
+[deal.II intermediate Patch<2,2>]
+3
+0.00000 0.500000 0.500000 0.500000 0.00000 1.00000 0.500000 1.00000
+4294967295 7 4 4294967295
+6 1
+0
+2 4
+0.00000 0.00000 0.00000 0.00000 2.00000 2.00000 2.00000 2.00000
+
+
+[deal.II intermediate Patch<2,2>]
+3
+0.500000 0.500000 1.00000 0.500000 0.500000 1.00000 1.00000 1.00000
+6 4294967295 5 4294967295
+7 1
+0
+2 4
+0.00000 0.00000 0.00000 0.00000 3.00000 3.00000 3.00000 3.00000
+
+
+0
+
DEAL:0::OK
DEAL:1::OK