From: Marc Fehling Date: Wed, 2 May 2018 21:47:13 +0000 (-0600) Subject: Introduce option to force no compression in pack/unpack. X-Git-Tag: v9.1.0-rc1~1115^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3c9ee750289b76eb183e3ff2222f3d52df1feb87;p=dealii.git Introduce option to force no compression in pack/unpack. --- diff --git a/include/deal.II/base/utilities.h b/include/deal.II/base/utilities.h index ad70c706ac..24525103d6 100644 --- a/include/deal.II/base/utilities.h +++ b/include/deal.II/base/utilities.h @@ -436,7 +436,8 @@ namespace Utilities * function below. * * If the library has been compiled with ZLIB enabled, then the output buffer - * is compressed. + * can be compressed. This can be triggered with the parameter + * @p allow_compression, and is only of effect if ZLIB is enabled. * * If many consecutive calls with the same buffer are considered, it is * recommended for reasons of performance to ensure that its capacity is @@ -445,7 +446,9 @@ namespace Utilities * @author Timo Heister, Wolfgang Bangerth, 2017. */ template - size_t pack (const T &object, std::vector &dest_buffer); + size_t pack (const T &object, + std::vector &dest_buffer, + const bool allow_compression = true); /** * Creates and returns a buffer solely for the given object, using the @@ -491,11 +494,16 @@ namespace Utilities * Same unpack function as above, but takes constant iterators on * (a fraction of) a given packed buffer of type std::vector instead. * + * The @p allow_compression parameter denotes if the buffer to + * read from could have been previously compressed with ZLIB, and + * is only of effect if ZLIB is enabled. + * * @author Timo Heister, Wolfgang Bangerth, 2017. */ template - T unpack (const std::vector::iterator &begin, - const std::vector::iterator &end); + T unpack (const std::vector::const_iterator &cbegin, + const std::vector::const_iterator &cend, + const bool allow_compression = true); /** * Given a vector of characters, obtained through a call to the function @@ -535,12 +543,17 @@ namespace Utilities * Same unpack function as above, but takes constant iterators on * (a fraction of) a given packed buffer of type std::vector instead. * + * The @p allow_compression parameter denotes if the buffer to + * read from could have been previously compressed with ZLIB, and + * is only of effect if ZLIB is enabled. + * * @author Timo Heister, Wolfgang Bangerth, 2017. */ template - void unpack (const std::vector::iterator &begin, - const std::vector::iterator &end, - T (&unpacked_object)[N]); + void unpack (const std::vector::const_iterator &cbegin, + const std::vector::const_iterator &cend, + T (&unpacked_object)[N], + const bool allow_compression = true); /** * Convert an object of type `std::unique_ptr` to an object of @@ -1000,7 +1013,9 @@ namespace Utilities // --------------------- non-inline functions template - size_t pack (const T &object, std::vector &dest_buffer) + size_t pack (const T &object, + std::vector &dest_buffer, + const bool allow_compression) { // see if the object is small and copyable via memcpy. if so, use // this fast path. otherwise, we have to go through the BOOST @@ -1030,27 +1045,31 @@ namespace Utilities // use buffer as the target of a compressing // stream into which we serialize the current object const size_t previous_size = dest_buffer.size(); - { #ifdef DEAL_II_WITH_ZLIB - boost::iostreams::filtering_ostream out; - out.push(boost::iostreams::gzip_compressor - (boost::iostreams::gzip_params - (boost::iostreams::gzip::best_compression))); - out.push(boost::iostreams::back_inserter(dest_buffer)); - - boost::archive::binary_oarchive archive(out); - archive << object; - out.flush(); -#else - std::ostringstream out; - boost::archive::binary_oarchive archive(out); - archive << object; - - const std::string &s = out.str(); - dest_buffer.reserve (dest_buffer.size() + s.size()); - std::move (s.begin(), s.end(), std::back_inserter(dest_buffer)); + if (allow_compression) + { + boost::iostreams::filtering_ostream out; + out.push(boost::iostreams::gzip_compressor + (boost::iostreams::gzip_params + (boost::iostreams::gzip::best_compression))); + out.push(boost::iostreams::back_inserter(dest_buffer)); + + boost::archive::binary_oarchive archive(out); + archive << object; + out.flush(); + } + else #endif - } + { + std::ostringstream out; + boost::archive::binary_oarchive archive(out); + archive << object; + + const std::string &s = out.str(); + dest_buffer.reserve (dest_buffer.size() + s.size()); + std::move (s.begin(), s.end(), std::back_inserter(dest_buffer)); + } + return (dest_buffer.size() - previous_size); } } @@ -1067,7 +1086,8 @@ namespace Utilities template T unpack (const std::vector::const_iterator &cbegin, - const std::vector::const_iterator &cend) + const std::vector::const_iterator &cend, + const bool allow_compression) { // see if the object is small and copyable via memcpy. if so, use // this fast path. otherwise, we have to go through the BOOST @@ -1096,16 +1116,19 @@ namespace Utilities T object; // first decompress the buffer - { #ifdef DEAL_II_WITH_ZLIB - boost::iostreams::filtering_ostream decompressing_stream; - decompressing_stream.push(boost::iostreams::gzip_decompressor()); - decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); - decompressing_stream.write (&*cbegin, std::distance(cbegin, cend)); -#else - decompressed_buffer.assign (cbegin, cend); + if (allow_compression) + { + boost::iostreams::filtering_ostream decompressing_stream; + decompressing_stream.push(boost::iostreams::gzip_decompressor()); + decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); + decompressing_stream.write (&*cbegin, std::distance(cbegin, cend)); + } + else #endif - } + { + decompressed_buffer.assign (cbegin, cend); + } // then restore the object from the buffer std::istringstream in(decompressed_buffer); @@ -1127,7 +1150,8 @@ namespace Utilities template void unpack (const std::vector::const_iterator &cbegin, const std::vector::const_iterator &cend, - T (&unpacked_object)[N]) + T (&unpacked_object)[N], + const bool allow_compression) { // see if the object is small and copyable via memcpy. if so, use // this fast path. otherwise, we have to go through the BOOST @@ -1153,16 +1177,19 @@ namespace Utilities std::string decompressed_buffer; // first decompress the buffer - { #ifdef DEAL_II_WITH_ZLIB - boost::iostreams::filtering_ostream decompressing_stream; - decompressing_stream.push(boost::iostreams::gzip_decompressor()); - decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); - decompressing_stream.write (&*cbegin, std::distance(cbegin, cend)); -#else - decompressed_buffer.assign (cbegin, cend); + if (allow_compression) + { + boost::iostreams::filtering_ostream decompressing_stream; + decompressing_stream.push(boost::iostreams::gzip_decompressor()); + decompressing_stream.push(boost::iostreams::back_inserter(decompressed_buffer)); + decompressing_stream.write (&*cbegin, std::distance(cbegin, cend)); + } + else #endif - } + { + decompressed_buffer.assign (cbegin, cend); + } // then restore the object from the buffer std::istringstream in(decompressed_buffer); diff --git a/tests/base/utilities_pack_unpack_06.cc b/tests/base/utilities_pack_unpack_06.cc new file mode 100644 index 0000000000..26c0026052 --- /dev/null +++ b/tests/base/utilities_pack_unpack_06.cc @@ -0,0 +1,158 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + +// tests the 'allow for compression' feature of +// Utilities::pack/unpack +// (based upon "utilities_pack_unpack_04" and "*_05") + + +#include "../tests.h" + +#include +#include + + +using namespace dealii; + + +template +void check (const double (&array)[N], const Point (&point)) +{ + // ----- PACK ----- + std::vector array_compressed, array_uncompressed; + std::vector point_compressed, point_uncompressed; + + // default option should work for compression + Utilities::pack (array, array_compressed); + Utilities::pack (point, point_compressed); + + Utilities::pack (array, array_uncompressed, false); + Utilities::pack (point, point_uncompressed, false); + + // check if compression has been invoked by comparing sizes + deallog << "check packed array with compression: " + << (sizeof(array) > sizeof(char)*array_compressed.size() ? "OK" : "Failed") << std::endl; + deallog << "check packed point with compression: " + << (sizeof(point) > sizeof(char)*point_compressed.size() ? "OK" : "Failed") << std::endl; + + deallog << "check packed array without compression: " + << (sizeof(array) <= sizeof(char)*array_uncompressed.size() ? "OK" : "Failed") << std::endl; + deallog << "check packed point without compression: " + << (sizeof(point) <= sizeof(char)*point_uncompressed.size() ? "OK" : "Failed") << std::endl; + + + + // ----- UNPACK ----- + // default option should work for compression + double unpacked_array_compressed[N]; + Utilities::unpack (array_compressed.cbegin(), array_compressed.cend(), + unpacked_array_compressed); + Point unpacked_point_compressed = + Utilities::unpack> (point_compressed.cbegin(), point_compressed.cend()); + + double unpacked_array_uncompressed[N]; + Utilities::unpack (array_uncompressed.cbegin(), array_uncompressed.cend(), + unpacked_array_uncompressed, false); + Point unpacked_point_uncompressed = + Utilities::unpack> (point_uncompressed.cbegin(), point_uncompressed.cend(), false); + + // check if unpacked results are okay + bool equal_array = true; + for (unsigned int i=0; i forbidden + = Utilities::unpack> (point_compressed.cbegin(), point_compressed.cend(), false); + } + catch (boost::archive::archive_exception) + { + deallog << "unpacking compressed point without decompression failed!" << std::endl; + } + try + { + double forbidden[N]; + Utilities::unpack (array_compressed.cbegin(), array_compressed.cend(), + forbidden, false); + } + catch (boost::archive::archive_exception) + { + deallog << "unpacking compressed array without decompression failed!" << std::endl; + } + + try + { + Point forbidden + = Utilities::unpack> (point_uncompressed.cbegin(), point_uncompressed.cend(), true); + } + catch (boost::archive::archive_exception) + { + deallog << "unpacking uncompressed point with decompression failed!" << std::endl; + } + try + { + double forbidden[N]; + Utilities::unpack (array_uncompressed.cbegin(), array_uncompressed.cend(), + forbidden, true); + } + catch (boost::archive::archive_exception) + { + deallog << "unpacking uncompressed array with decompression failed!" << std::endl; + } +} + + +void test() +{ + // pick large data types and arrays that could be compressed, + // and check for both compression options + const unsigned int N=10000; + Point p2 = random_point(); + double x2[N]; + for (unsigned int i=0; i