From f176784460cbacf8a5e1488d7613264de3dfc442 Mon Sep 17 00:00:00 2001 From: Marc Fehling Date: Fri, 6 Jul 2018 15:11:33 -0600 Subject: [PATCH] Implements variable size serialization. --- include/deal.II/distributed/tria.h | 17 +- source/distributed/tria.cc | 420 +++++++++++++----- tests/mpi/attach_data_02.cc | 2 +- tests/mpi/p4est_save_05.cc | 187 ++++++++ ...st_save_05.with_p4est=true.mpirun=5.output | 66 +++ 5 files changed, 565 insertions(+), 127 deletions(-) create mode 100644 tests/mpi/p4est_save_05.cc create mode 100644 tests/mpi/p4est_save_05.with_p4est=true.mpirun=5.output diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index b093783fde..1f09092bf2 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -1032,9 +1032,10 @@ namespace parallel * * The data will be written in a separate file, whose name * consists of the stem @p filename and an attached identifier - * -fixed.data. + * -fixed.data for fixed size data and -variable.data + * for variable size data. * - * All processors write into that file simultaneously via MPIIO. + * All processors write into these files simultaneously via MPIIO. * Each processor's position to write to will be determined * from the provided @p parallel_forest. * @@ -1050,10 +1051,13 @@ namespace parallel * * The data will be read from separate file, whose name * consists of the stem @p filename and an attached identifier - * -fixed.data. The @p n_attached_deserialize parameter - * is required to gather the memory offsets for each callback. + * -fixed.data for fixed size data and -variable.data + * for variable size data. + * The @p n_attached_deserialize_fixed and @p n_attached_deserialize_variable + * parameters are required to gather the memory offsets for each + * callback. * - * All processors read from that file simultaneously via MPIIO. + * All processors read from these files simultaneously via MPIIO. * Each processor's position to read from will be determined * from the provided @p parallel_forest. * @@ -1064,7 +1068,8 @@ namespace parallel load(const typename dealii::internal::p4est::types::forest * parallel_forest, const char * filename, - const unsigned int n_attached_deserialize); + const unsigned int n_attached_deserialize_fixed, + const unsigned int n_attached_deserialize_variable); /** * Clears all containers and associated data, and resets member diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 2609b936de..48988eeec5 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1788,72 +1788,160 @@ namespace parallel * parallel_forest, const char *filename) const { - Assert(variable_size_data_stored == false, ExcNotImplemented()); + // Large fractions of this function have been copied from + // DataOutInterface::write_vtu_in_parallel. + // TODO: Write general MPIIO interface. Assert(sizes_fixed_cumulative.size() > 0, ExcMessage("No data has been packed!")); - const std::string fname_fixed = std::string(filename) + "_fixed.data"; - - // ----- copied ----- - // from DataOutInterface::write_vtu_in_parallel - // TODO: write general MPIIO interface const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator); - MPI_Info info; - int ierr = MPI_Info_create(&info); - AssertThrowMPI(ierr); - MPI_File fh; - ierr = MPI_File_open(mpi_communicator, - const_cast(fname_fixed.c_str()), - MPI_MODE_CREATE | MPI_MODE_WRONLY, - info, - &fh); - AssertThrowMPI(ierr); - - ierr = MPI_File_set_size(fh, 0); // delete the file contents - AssertThrowMPI(ierr); - // this barrier is necessary, because otherwise others might already - // write while one core is still setting the size to zero. - ierr = MPI_Barrier(mpi_communicator); - AssertThrowMPI(ierr); - ierr = MPI_Info_free(&info); - AssertThrowMPI(ierr); - // ------------------ - - // Check if number of processors is lined up with p4est partitioning. - Assert(myrank < parallel_forest->mpisize, ExcInternalError()); - - // Write cumulative sizes to file. - // Since each processor owns the same information about the data sizes, - // it is sufficient to let only the first processor perform this task. - if (myrank == 0) + // + // ---------- Fixed size data ---------- + // + { + const std::string fname_fixed = std::string(filename) + "_fixed.data"; + + MPI_Info info; + int ierr = MPI_Info_create(&info); + AssertThrowMPI(ierr); + + MPI_File fh; + ierr = MPI_File_open(mpi_communicator, + fname_fixed.c_str(), + MPI_MODE_CREATE | MPI_MODE_WRONLY, + info, + &fh); + AssertThrowMPI(ierr); + + ierr = MPI_File_set_size(fh, 0); // delete the file contents + AssertThrowMPI(ierr); + // this barrier is necessary, because otherwise others might already + // write while one core is still setting the size to zero. + ierr = MPI_Barrier(mpi_communicator); + AssertThrowMPI(ierr); + ierr = MPI_Info_free(&info); + AssertThrowMPI(ierr); + // ------------------ + + // Check if number of processors is lined up with p4est partitioning. + Assert(myrank < parallel_forest->mpisize, ExcInternalError()); + + // Write cumulative sizes to file. + // Since each processor owns the same information about the data sizes, + // it is sufficient to let only the first processor perform this task. + if (myrank == 0) + { + ierr = MPI_File_write_at(fh, + 0, + sizes_fixed_cumulative.data(), + sizes_fixed_cumulative.size(), + MPI_UNSIGNED, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + } + + // Write packed data to file simultaneously. + const unsigned int offset_fixed = + sizes_fixed_cumulative.size() * sizeof(unsigned int); + + ierr = MPI_File_write_at( + fh, + offset_fixed + + parallel_forest->global_first_quadrant[myrank] * + sizes_fixed_cumulative.back(), // global position in file + src_data_fixed.data(), + src_data_fixed.size(), // local buffer + MPI_CHAR, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + ierr = MPI_File_close(&fh); + AssertThrowMPI(ierr); + } + + // + // ---------- Variable size data ---------- + // + if (variable_size_data_stored) { - ierr = MPI_File_write_at(fh, - 0, - sizes_fixed_cumulative.data(), - sizes_fixed_cumulative.size(), - MPI_UNSIGNED, - MPI_STATUS_IGNORE); + const std::string fname_variable = + std::string(filename) + "_variable.data"; + + const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator); + + MPI_Info info; + int ierr = MPI_Info_create(&info); AssertThrowMPI(ierr); - } - // Write packed data to file simultaneously. - const unsigned int offset = - sizes_fixed_cumulative.size() * sizeof(unsigned int); - - ierr = MPI_File_write_at( - fh, - offset + parallel_forest->global_first_quadrant[myrank] * - sizes_fixed_cumulative.back(), // global position in file - src_data_fixed.data(), - src_data_fixed.size(), // local buffer - MPI_CHAR, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - - ierr = MPI_File_close(&fh); - AssertThrowMPI(ierr); + MPI_File fh; + ierr = MPI_File_open(mpi_communicator, + fname_variable.c_str(), + MPI_MODE_CREATE | MPI_MODE_WRONLY, + info, + &fh); + AssertThrowMPI(ierr); + + ierr = MPI_File_set_size(fh, 0); // delete the file contents + AssertThrowMPI(ierr); + // this barrier is necessary, because otherwise others might already + // write while one core is still setting the size to zero. + ierr = MPI_Barrier(mpi_communicator); + AssertThrowMPI(ierr); + ierr = MPI_Info_free(&info); + AssertThrowMPI(ierr); + + // Write sizes of each cell into file simultaneously. + ierr = + MPI_File_write_at(fh, + parallel_forest->global_first_quadrant[myrank] * + sizeof(int), // global position in file + src_sizes_variable.data(), + src_sizes_variable.size(), // local buffer + MPI_INT, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + const unsigned int offset_variable = + parallel_forest->global_num_quadrants * sizeof(int); + + // Gather size of data in bytes we want to store from this processor. + const unsigned int size_on_proc = src_data_variable.size(); + + // Share information among all processors. + std::vector sizes_on_all_procs(n_procs); + ierr = MPI_Allgather(&size_on_proc, + 1, + MPI_UNSIGNED, + sizes_on_all_procs.data(), + 1, + MPI_UNSIGNED, + mpi_communicator); + AssertThrowMPI(ierr); + + // Generate accumulated sum to get an offset for writing variable + // size data. + std::partial_sum(sizes_on_all_procs.begin(), + sizes_on_all_procs.end(), + sizes_on_all_procs.begin()); + + // Write data consecutively into file. + ierr = MPI_File_write_at( + fh, + offset_variable + + ((myrank == 0) ? + 0 : + sizes_on_all_procs[myrank - 1]), // global position in file + src_data_variable.data(), + src_data_variable.size(), // local buffer + MPI_CHAR, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + ierr = MPI_File_close(&fh); + AssertThrowMPI(ierr); + } } @@ -1864,68 +1952,152 @@ namespace parallel const typename dealii::internal::p4est::types::forest * parallel_forest, const char * filename, - const unsigned int n_attached_deserialize) + const unsigned int n_attached_deserialize_fixed, + const unsigned int n_attached_deserialize_variable) { + // Large fractions of this function have been copied from + // DataOutInterface::write_vtu_in_parallel. + // TODO: Write general MPIIO interface. + Assert(dest_data_fixed.size() == 0, ExcMessage("Previously loaded data has not been released yet!")); - const std::string fname_fixed = std::string(filename) + "_fixed.data"; + variable_size_data_stored = (n_attached_deserialize_variable > 0); - // ----- copied ----- - // from DataOutInterface::write_vtu_in_parallel - // TODO: write general MPIIO interface const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator); - MPI_Info info; - int ierr = MPI_Info_create(&info); - AssertThrowMPI(ierr); - MPI_File fh; - ierr = MPI_File_open(mpi_communicator, - const_cast(fname_fixed.c_str()), - MPI_MODE_RDONLY, - info, - &fh); - AssertThrowMPI(ierr); - - ierr = MPI_Info_free(&info); - AssertThrowMPI(ierr); - // ------------------ - - // Check if number of processors is lined up with p4est partitioning. - Assert(myrank < parallel_forest->mpisize, ExcInternalError()); - - // Read cumulative sizes from file. - // Since all processors need the same information about the data sizes, - // let each of them gain it by reading from the same location in the file. - sizes_fixed_cumulative.resize(1 + n_attached_deserialize); - ierr = MPI_File_read_at(fh, - 0, - sizes_fixed_cumulative.data(), - sizes_fixed_cumulative.size(), - MPI_UNSIGNED, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); + // + // ---------- Fixed size data ---------- + // + { + const std::string fname_fixed = std::string(filename) + "_fixed.data"; + + MPI_Info info; + int ierr = MPI_Info_create(&info); + AssertThrowMPI(ierr); + + MPI_File fh; + ierr = MPI_File_open( + mpi_communicator, fname_fixed.c_str(), MPI_MODE_RDONLY, info, &fh); + AssertThrowMPI(ierr); + + ierr = MPI_Info_free(&info); + AssertThrowMPI(ierr); + + // Check if number of processors is lined up with p4est partitioning. + Assert(myrank < parallel_forest->mpisize, ExcInternalError()); + + // Read cumulative sizes from file. + // Since all processors need the same information about the data sizes, + // let each of them retrieve it by reading from the same location in + // the file. + sizes_fixed_cumulative.resize(1 + n_attached_deserialize_fixed + + (variable_size_data_stored ? 1 : 0)); + ierr = MPI_File_read_at(fh, + 0, + sizes_fixed_cumulative.data(), + sizes_fixed_cumulative.size(), + MPI_UNSIGNED, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + // Allocate sufficient memory. + dest_data_fixed.resize(parallel_forest->local_num_quadrants * + sizes_fixed_cumulative.back()); + + // Read packed data from file simultaneously. + const unsigned int offset = + sizes_fixed_cumulative.size() * sizeof(unsigned int); + + ierr = MPI_File_read_at( + fh, + offset + parallel_forest->global_first_quadrant[myrank] * + sizes_fixed_cumulative.back(), // global position in file + dest_data_fixed.data(), + dest_data_fixed.size(), // local buffer + MPI_CHAR, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); - // Allocate sufficient memory. - dest_data_fixed.resize(parallel_forest->local_num_quadrants * - sizes_fixed_cumulative.back()); + ierr = MPI_File_close(&fh); + AssertThrowMPI(ierr); + } + + // + // ---------- Variable size data ---------- + // + if (variable_size_data_stored) + { + const std::string fname_variable = + std::string(filename) + "_variable.data"; + + const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator); + + MPI_Info info; + int ierr = MPI_Info_create(&info); + AssertThrowMPI(ierr); - // Read packed data from file simultaneously. - const unsigned int offset = - sizes_fixed_cumulative.size() * sizeof(unsigned int); - - ierr = MPI_File_read_at( - fh, - offset + parallel_forest->global_first_quadrant[myrank] * - sizes_fixed_cumulative.back(), // global position in file - dest_data_fixed.data(), - dest_data_fixed.size(), // local buffer - MPI_CHAR, - MPI_STATUS_IGNORE); - AssertThrowMPI(ierr); - - ierr = MPI_File_close(&fh); - AssertThrowMPI(ierr); + MPI_File fh; + ierr = MPI_File_open(mpi_communicator, + fname_variable.c_str(), + MPI_MODE_RDONLY, + info, + &fh); + AssertThrowMPI(ierr); + + ierr = MPI_Info_free(&info); + AssertThrowMPI(ierr); + + // Read sizes of all locally owned cells. + dest_sizes_variable.resize(parallel_forest->local_num_quadrants); + ierr = + MPI_File_read_at(fh, + parallel_forest->global_first_quadrant[myrank] * + sizeof(int), + dest_sizes_variable.data(), + dest_sizes_variable.size(), + MPI_INT, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + const unsigned int offset = + parallel_forest->global_num_quadrants * sizeof(int); + + const unsigned int size_on_proc = + std::accumulate(dest_sizes_variable.begin(), + dest_sizes_variable.end(), + 0); + + // share information among all processors + std::vector sizes_on_all_procs(n_procs); + ierr = MPI_Allgather(&size_on_proc, + 1, + MPI_UNSIGNED, + sizes_on_all_procs.data(), + 1, + MPI_UNSIGNED, + mpi_communicator); + AssertThrowMPI(ierr); + + // generate accumulated sum + std::partial_sum(sizes_on_all_procs.begin(), + sizes_on_all_procs.end(), + sizes_on_all_procs.begin()); + + dest_data_variable.resize(size_on_proc); + ierr = MPI_File_read_at(fh, + offset + ((myrank == 0) ? + 0 : + sizes_on_all_procs[myrank - 1]), + dest_data_variable.data(), + dest_data_variable.size(), + MPI_CHAR, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + ierr = MPI_File_close(&fh); + AssertThrowMPI(ierr); + } } @@ -2530,10 +2702,12 @@ namespace parallel { std::string fname = std::string(filename) + ".info"; std::ofstream f(fname.c_str()); - f << "version nproc n_attached_objs n_coarse_cells" << std::endl - << 3 << " " + f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_coarse_cells" + << std::endl + << 4 << " " << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " " << cell_attached_data.pack_callbacks_fixed.size() << " " + << cell_attached_data.pack_callbacks_variable.size() << " " << this->n_cells(0) << std::endl; } @@ -2613,25 +2787,28 @@ namespace parallel connectivity); connectivity = nullptr; - unsigned int version, numcpus, attached_count, n_coarse_cells; + unsigned int version, numcpus, attached_count_fixed, + attached_count_variable, n_coarse_cells; { std::string fname = std::string(filename) + ".info"; std::ifstream f(fname.c_str()); AssertThrow(f, ExcIO()); std::string firstline; getline(f, firstline); // skip first line - f >> version >> numcpus >> attached_count >> n_coarse_cells; + f >> version >> numcpus >> attached_count_fixed >> + attached_count_variable >> n_coarse_cells; } - Assert(version == 3, - ExcMessage("Incompatible version found in .info file.")); + AssertThrow(version == 4, + ExcMessage("Incompatible version found in .info file.")); Assert(this->n_cells(0) == n_coarse_cells, ExcMessage("Number of coarse cells differ!")); // clear all of the callback data, as explained in the documentation of // register_data_attach() - cell_attached_data.n_attached_data_sets = 0; - cell_attached_data.n_attached_deserialize = attached_count; + cell_attached_data.n_attached_data_sets = 0; + cell_attached_data.n_attached_deserialize = + attached_count_fixed + attached_count_variable; parallel_forest = dealii::internal::p4est::functions::load_ext( filename, @@ -2665,9 +2842,12 @@ namespace parallel } // load saved data, if any was stored - if (attached_count > 0) + if (cell_attached_data.n_attached_deserialize > 0) { - data_transfer.load(parallel_forest, filename, attached_count); + data_transfer.load(parallel_forest, + filename, + attached_count_fixed, + attached_count_variable); data_transfer.unpack_cell_status(local_quadrant_cell_relations); diff --git a/tests/mpi/attach_data_02.cc b/tests/mpi/attach_data_02.cc index 0c6c4f4fb8..68d02d7602 100644 --- a/tests/mpi/attach_data_02.cc +++ b/tests/mpi/attach_data_02.cc @@ -170,7 +170,7 @@ test() unsigned int handle = tr.register_data_attach(pack_function, - /*packs_variable_size_data=*/true); + /*returns_variable_size_data=*/true); deallog << "handle=" << handle << std::endl; tr.execute_coarsening_and_refinement(); diff --git a/tests/mpi/p4est_save_05.cc b/tests/mpi/p4est_save_05.cc new file mode 100644 index 0000000000..e45bddb808 --- /dev/null +++ b/tests/mpi/p4est_save_05.cc @@ -0,0 +1,187 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// save and load a triangulation with a different number of cpus +// with variable size data attach +// this is a combination of tests p4est_save_04 and attach_data_02 + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "../tests.h" + + + +template +std::vector +pack_function( + const typename parallel::distributed::Triangulation::cell_iterator + &cell, + const typename parallel::distributed::Triangulation::CellStatus + status) +{ + static int some_number = 1; + std::vector some_vector(some_number); + for (unsigned int i = 0; i < some_number; ++i) + some_vector[i] = i; + + std::vector buffer; + buffer.reserve(some_number * sizeof(unsigned int)); + for (auto vector_it = some_vector.cbegin(); vector_it != some_vector.cend(); + ++vector_it) + { + Utilities::pack(*vector_it, buffer, /*allow_compression=*/false); + } + + deallog << "packing cell " << cell->id() + << " with data size=" << buffer.size() << " accumulated data=" + << std::accumulate(some_vector.begin(), some_vector.end(), 0) + << std::endl; + + Assert((status == + parallel::distributed::Triangulation::CELL_PERSIST), + ExcInternalError()); + + ++some_number; + return buffer; +} + + + +template +void +unpack_function( + const typename parallel::distributed::Triangulation::cell_iterator + &cell, + const typename parallel::distributed::Triangulation::CellStatus + status, + const boost::iterator_range::const_iterator> &data_range) +{ + const unsigned int data_in_bytes = + std::distance(data_range.begin(), data_range.end()); + + std::vector intdatavector(data_in_bytes / sizeof(unsigned int)); + + auto vector_it = intdatavector.begin(); + auto data_it = data_range.begin(); + for (; data_it != data_range.end(); + ++vector_it, data_it += sizeof(unsigned int)) + { + *vector_it = + Utilities::unpack(data_it, + data_it + sizeof(unsigned int), + /*allow_compression=*/false); + } + + deallog << "unpacking cell " << cell->id() << " with data size=" + << std::distance(data_range.begin(), data_range.end()) + << " accumulated data=" + << std::accumulate(intdatavector.begin(), intdatavector.end(), 0) + << std::endl; + + Assert((status == + parallel::distributed::Triangulation::CELL_PERSIST), + ExcInternalError()); +} + + + +template +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + MPI_Comm com_all = MPI_COMM_WORLD; + MPI_Comm com_small; + + // split the communicator in proc 0,1,2 and 3,4 + MPI_Comm_split(com_all, (myid < 3) ? 0 : 1, myid, &com_small); + + // write with small com + if (myid < 3) + { + deallog << "writing with " << Utilities::MPI::n_mpi_processes(com_small) + << std::endl; + + parallel::distributed::Triangulation tr(com_small); + GridGenerator::subdivided_hyper_cube(tr, 2); + tr.refine_global(1); + + typename Triangulation::active_cell_iterator cell; + for (cell = tr.begin_active(); cell != tr.end(); ++cell) + { + if (cell->is_locally_owned()) + { + if (cell->id().to_string() == "0_1:0") + cell->set_refine_flag(); + else if (cell->parent()->id().to_string() == "3_0:") + cell->set_coarsen_flag(); + } + } + tr.execute_coarsening_and_refinement(); + + unsigned int handle = + tr.register_data_attach(pack_function, + /*returns_variable_size_data=*/true); + + tr.save("file"); + deallog << "#cells = " << tr.n_global_active_cells() << std::endl; + deallog << "Checksum: " << tr.get_checksum() << std::endl; + } + + MPI_Barrier(MPI_COMM_WORLD); + + deallog << "reading with " << Utilities::MPI::n_mpi_processes(com_all) + << std::endl; + + { + parallel::distributed::Triangulation tr(com_all); + + GridGenerator::subdivided_hyper_cube(tr, 2); + tr.load("file"); + + unsigned int handle = + tr.register_data_attach(pack_function, + /*returns_variable_size_data=*/true); + + tr.notify_ready_to_unpack(handle, unpack_function); + + deallog << "#cells = " << tr.n_global_active_cells() << std::endl; + deallog << "Checksum: " << tr.get_checksum() << std::endl; + } + + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + deallog << "OK" << std::endl; +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + test<2>(); +} diff --git a/tests/mpi/p4est_save_05.with_p4est=true.mpirun=5.output b/tests/mpi/p4est_save_05.with_p4est=true.mpirun=5.output new file mode 100644 index 0000000000..a11cc69425 --- /dev/null +++ b/tests/mpi/p4est_save_05.with_p4est=true.mpirun=5.output @@ -0,0 +1,66 @@ + +DEAL:0::writing with 3 +DEAL:0::packing cell 0_2:00 with data size=4 accumulated data=0 +DEAL:0::packing cell 0_2:01 with data size=8 accumulated data=1 +DEAL:0::packing cell 0_2:02 with data size=12 accumulated data=3 +DEAL:0::packing cell 0_2:03 with data size=16 accumulated data=6 +DEAL:0::packing cell 0_1:1 with data size=20 accumulated data=10 +DEAL:0::#cells = 16 +DEAL:0::Checksum: 2822439380 +DEAL:0::reading with 5 +DEAL:0::unpacking cell 0_2:00 with data size=4 accumulated data=0 +DEAL:0::unpacking cell 0_2:01 with data size=8 accumulated data=1 +DEAL:0::unpacking cell 0_2:02 with data size=12 accumulated data=3 +DEAL:0::unpacking cell 0_2:03 with data size=16 accumulated data=6 +DEAL:0::#cells = 16 +DEAL:0::Checksum: 2822439380 +DEAL:0::OK + +DEAL:1::writing with 3 +DEAL:1::packing cell 0_1:2 with data size=4 accumulated data=0 +DEAL:1::packing cell 0_1:3 with data size=8 accumulated data=1 +DEAL:1::packing cell 1_1:0 with data size=12 accumulated data=3 +DEAL:1::packing cell 1_1:1 with data size=16 accumulated data=6 +DEAL:1::packing cell 1_1:2 with data size=20 accumulated data=10 +DEAL:1::packing cell 1_1:3 with data size=24 accumulated data=15 +DEAL:1::#cells = 16 +DEAL:1::Checksum: 0 +DEAL:1::reading with 5 +DEAL:1::unpacking cell 0_1:1 with data size=20 accumulated data=10 +DEAL:1::unpacking cell 0_1:2 with data size=4 accumulated data=0 +DEAL:1::#cells = 16 +DEAL:1::Checksum: 0 + + +DEAL:2::writing with 3 +DEAL:2::packing cell 2_1:0 with data size=4 accumulated data=0 +DEAL:2::packing cell 2_1:1 with data size=8 accumulated data=1 +DEAL:2::packing cell 2_1:2 with data size=12 accumulated data=3 +DEAL:2::packing cell 2_1:3 with data size=16 accumulated data=6 +DEAL:2::packing cell 3_0: with data size=20 accumulated data=10 +DEAL:2::#cells = 16 +DEAL:2::Checksum: 0 +DEAL:2::reading with 5 +DEAL:2::unpacking cell 0_1:3 with data size=8 accumulated data=1 +DEAL:2::unpacking cell 1_1:0 with data size=12 accumulated data=3 +DEAL:2::unpacking cell 1_1:1 with data size=16 accumulated data=6 +DEAL:2::unpacking cell 1_1:2 with data size=20 accumulated data=10 +DEAL:2::unpacking cell 1_1:3 with data size=24 accumulated data=15 +DEAL:2::#cells = 16 +DEAL:2::Checksum: 0 + + +DEAL:3::reading with 5 +DEAL:3::#cells = 16 +DEAL:3::Checksum: 0 + + +DEAL:4::reading with 5 +DEAL:4::unpacking cell 2_1:0 with data size=4 accumulated data=0 +DEAL:4::unpacking cell 2_1:1 with data size=8 accumulated data=1 +DEAL:4::unpacking cell 2_1:2 with data size=12 accumulated data=3 +DEAL:4::unpacking cell 2_1:3 with data size=16 accumulated data=6 +DEAL:4::unpacking cell 3_0: with data size=20 accumulated data=10 +DEAL:4::#cells = 16 +DEAL:4::Checksum: 0 + -- 2.39.5