From: Rene Gassmoeller Date: Thu, 31 Jan 2019 19:45:03 +0000 (-0800) Subject: Fix serialization of variable data attachements X-Git-Tag: v9.1.0-rc1~382^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1e94813d078896e77e318f8ec92977073e40580e;p=dealii.git Fix serialization of variable data attachements --- diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 99068c23da..5fe3afb1c8 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -2751,6 +2751,7 @@ namespace parallel tria->cell_attached_data.n_attached_data_sets = 0; tria->cell_attached_data.pack_callbacks_fixed.clear(); + tria->cell_attached_data.pack_callbacks_variable.clear(); } } diff --git a/tests/mpi/p4est_save_07.cc b/tests/mpi/p4est_save_07.cc new file mode 100644 index 0000000000..e409ce4fec --- /dev/null +++ b/tests/mpi/p4est_save_07.cc @@ -0,0 +1,205 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// save and load a triangulation with a different number of cpus +// with variable size data attach +// this is a variation of p4est_save_05, but it saves the triangulation +// multiple times before loading to catch a bug that was not properly +// clearing variables upon save (i.e. multiple saves before a load would +// cause corrupted checkpoints for variable data size attachments). + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "../tests.h" + + + +template +std::vector +pack_function( + const typename parallel::distributed::Triangulation::cell_iterator + &cell, + const typename parallel::distributed::Triangulation::CellStatus + status) +{ + static unsigned int some_number = 1; + std::vector some_vector(some_number); + for (unsigned int i = 0; i < some_number; ++i) + some_vector[i] = i; + + std::vector buffer; + buffer.reserve(some_number * sizeof(unsigned int)); + for (auto vector_it = some_vector.cbegin(); vector_it != some_vector.cend(); + ++vector_it) + { + Utilities::pack(*vector_it, buffer, /*allow_compression=*/false); + } + + deallog << "packing cell " << cell->id() + << " with data size=" << buffer.size() << " accumulated data=" + << std::accumulate(some_vector.begin(), some_vector.end(), 0) + << std::endl; + + Assert((status == + parallel::distributed::Triangulation::CELL_PERSIST), + ExcInternalError()); + + ++some_number; + return buffer; +} + + + +template +void +unpack_function( + const typename parallel::distributed::Triangulation::cell_iterator + &cell, + const typename parallel::distributed::Triangulation::CellStatus + status, + const boost::iterator_range::const_iterator> &data_range) +{ + const unsigned int data_in_bytes = + std::distance(data_range.begin(), data_range.end()); + + std::vector intdatavector(data_in_bytes / sizeof(unsigned int)); + + auto vector_it = intdatavector.begin(); + auto data_it = data_range.begin(); + for (; data_it != data_range.end(); + ++vector_it, data_it += sizeof(unsigned int)) + { + *vector_it = + Utilities::unpack(data_it, + data_it + sizeof(unsigned int), + /*allow_compression=*/false); + } + + deallog << "unpacking cell " << cell->id() << " with data size=" + << std::distance(data_range.begin(), data_range.end()) + << " accumulated data=" + << std::accumulate(intdatavector.begin(), intdatavector.end(), 0) + << std::endl; + + Assert((status == + parallel::distributed::Triangulation::CELL_PERSIST), + ExcInternalError()); +} + + + +template +void +save(MPI_Comm com_small, parallel::distributed::Triangulation &tr) +{ + deallog << "writing with " << Utilities::MPI::n_mpi_processes(com_small) + << std::endl; + + unsigned int handle = + tr.register_data_attach(pack_function, + /*returns_variable_size_data=*/true); + + tr.save("file"); + deallog << "#cells = " << tr.n_global_active_cells() << std::endl; + deallog << "Checksum: " << tr.get_checksum() << std::endl; +} + + + +template +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + MPI_Comm com_all = MPI_COMM_WORLD; + MPI_Comm com_small; + + // split the communicator in proc 0,1,2 and 3,4 + MPI_Comm_split(com_all, (myid < 3) ? 0 : 1, myid, &com_small); + + // write with small com + if (myid < 3) + { + parallel::distributed::Triangulation tr(com_small); + GridGenerator::subdivided_hyper_cube(tr, 2); + tr.refine_global(1); + + typename Triangulation::active_cell_iterator cell; + for (cell = tr.begin_active(); cell != tr.end(); ++cell) + { + if (cell->is_locally_owned()) + { + if (cell->id().to_string() == "0_1:0") + cell->set_refine_flag(); + else if (cell->parent()->id().to_string() == "3_0:") + cell->set_coarsen_flag(); + } + } + tr.execute_coarsening_and_refinement(); + + // save two times into the same file, overwrite it the second time + save(com_small, tr); + save(com_small, tr); + } + + MPI_Barrier(MPI_COMM_WORLD); + + deallog << "reading with " << Utilities::MPI::n_mpi_processes(com_all) + << std::endl; + + { + parallel::distributed::Triangulation tr(com_all); + + GridGenerator::subdivided_hyper_cube(tr, 2); + tr.load("file"); + + unsigned int handle = + tr.register_data_attach(pack_function, + /*returns_variable_size_data=*/true); + + tr.notify_ready_to_unpack(handle, unpack_function); + + deallog << "#cells = " << tr.n_global_active_cells() << std::endl; + deallog << "Checksum: " << tr.get_checksum() << std::endl; + + // now check if saving again succeeds. This checks for the correct number of + // attached data packs in tr and caught the bug. + save(com_all, tr); + } + + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + deallog << "OK" << std::endl; +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + test<2>(); +} diff --git a/tests/mpi/p4est_save_07.with_p4est=true.mpirun=5.output b/tests/mpi/p4est_save_07.with_p4est=true.mpirun=5.output new file mode 100644 index 0000000000..06bf11bda4 --- /dev/null +++ b/tests/mpi/p4est_save_07.with_p4est=true.mpirun=5.output @@ -0,0 +1,122 @@ + +DEAL:0::writing with 3 +DEAL:0::packing cell 0_2:00 with data size=4 accumulated data=0 +DEAL:0::packing cell 0_2:01 with data size=8 accumulated data=1 +DEAL:0::packing cell 0_2:02 with data size=12 accumulated data=3 +DEAL:0::packing cell 0_2:03 with data size=16 accumulated data=6 +DEAL:0::packing cell 0_1:1 with data size=20 accumulated data=10 +DEAL:0::#cells = 16 +DEAL:0::Checksum: 2822439380 +DEAL:0::writing with 3 +DEAL:0::packing cell 0_2:00 with data size=24 accumulated data=15 +DEAL:0::packing cell 0_2:01 with data size=28 accumulated data=21 +DEAL:0::packing cell 0_2:02 with data size=32 accumulated data=28 +DEAL:0::packing cell 0_2:03 with data size=36 accumulated data=36 +DEAL:0::packing cell 0_1:1 with data size=40 accumulated data=45 +DEAL:0::#cells = 16 +DEAL:0::Checksum: 2822439380 +DEAL:0::reading with 5 +DEAL:0::unpacking cell 0_2:00 with data size=24 accumulated data=15 +DEAL:0::unpacking cell 0_2:01 with data size=28 accumulated data=21 +DEAL:0::unpacking cell 0_2:02 with data size=32 accumulated data=28 +DEAL:0::unpacking cell 0_2:03 with data size=36 accumulated data=36 +DEAL:0::#cells = 16 +DEAL:0::Checksum: 2822439380 +DEAL:0::writing with 5 +DEAL:0::packing cell 0_2:00 with data size=44 accumulated data=55 +DEAL:0::packing cell 0_2:01 with data size=48 accumulated data=66 +DEAL:0::packing cell 0_2:02 with data size=52 accumulated data=78 +DEAL:0::packing cell 0_2:03 with data size=56 accumulated data=91 +DEAL:0::#cells = 16 +DEAL:0::Checksum: 2822439380 +DEAL:0::OK + +DEAL:1::writing with 3 +DEAL:1::packing cell 0_1:2 with data size=4 accumulated data=0 +DEAL:1::packing cell 0_1:3 with data size=8 accumulated data=1 +DEAL:1::packing cell 1_1:0 with data size=12 accumulated data=3 +DEAL:1::packing cell 1_1:1 with data size=16 accumulated data=6 +DEAL:1::packing cell 1_1:2 with data size=20 accumulated data=10 +DEAL:1::packing cell 1_1:3 with data size=24 accumulated data=15 +DEAL:1::#cells = 16 +DEAL:1::Checksum: 0 +DEAL:1::writing with 3 +DEAL:1::packing cell 0_1:2 with data size=28 accumulated data=21 +DEAL:1::packing cell 0_1:3 with data size=32 accumulated data=28 +DEAL:1::packing cell 1_1:0 with data size=36 accumulated data=36 +DEAL:1::packing cell 1_1:1 with data size=40 accumulated data=45 +DEAL:1::packing cell 1_1:2 with data size=44 accumulated data=55 +DEAL:1::packing cell 1_1:3 with data size=48 accumulated data=66 +DEAL:1::#cells = 16 +DEAL:1::Checksum: 0 +DEAL:1::reading with 5 +DEAL:1::unpacking cell 0_1:1 with data size=40 accumulated data=45 +DEAL:1::unpacking cell 0_1:2 with data size=28 accumulated data=21 +DEAL:1::#cells = 16 +DEAL:1::Checksum: 0 +DEAL:1::writing with 5 +DEAL:1::packing cell 0_1:1 with data size=52 accumulated data=78 +DEAL:1::packing cell 0_1:2 with data size=56 accumulated data=91 +DEAL:1::#cells = 16 +DEAL:1::Checksum: 0 + + +DEAL:2::writing with 3 +DEAL:2::packing cell 2_1:0 with data size=4 accumulated data=0 +DEAL:2::packing cell 2_1:1 with data size=8 accumulated data=1 +DEAL:2::packing cell 2_1:2 with data size=12 accumulated data=3 +DEAL:2::packing cell 2_1:3 with data size=16 accumulated data=6 +DEAL:2::packing cell 3_0: with data size=20 accumulated data=10 +DEAL:2::#cells = 16 +DEAL:2::Checksum: 0 +DEAL:2::writing with 3 +DEAL:2::packing cell 2_1:0 with data size=24 accumulated data=15 +DEAL:2::packing cell 2_1:1 with data size=28 accumulated data=21 +DEAL:2::packing cell 2_1:2 with data size=32 accumulated data=28 +DEAL:2::packing cell 2_1:3 with data size=36 accumulated data=36 +DEAL:2::packing cell 3_0: with data size=40 accumulated data=45 +DEAL:2::#cells = 16 +DEAL:2::Checksum: 0 +DEAL:2::reading with 5 +DEAL:2::unpacking cell 0_1:3 with data size=32 accumulated data=28 +DEAL:2::unpacking cell 1_1:0 with data size=36 accumulated data=36 +DEAL:2::unpacking cell 1_1:1 with data size=40 accumulated data=45 +DEAL:2::unpacking cell 1_1:2 with data size=44 accumulated data=55 +DEAL:2::unpacking cell 1_1:3 with data size=48 accumulated data=66 +DEAL:2::#cells = 16 +DEAL:2::Checksum: 0 +DEAL:2::writing with 5 +DEAL:2::packing cell 0_1:3 with data size=44 accumulated data=55 +DEAL:2::packing cell 1_1:0 with data size=48 accumulated data=66 +DEAL:2::packing cell 1_1:1 with data size=52 accumulated data=78 +DEAL:2::packing cell 1_1:2 with data size=56 accumulated data=91 +DEAL:2::packing cell 1_1:3 with data size=60 accumulated data=105 +DEAL:2::#cells = 16 +DEAL:2::Checksum: 0 + + +DEAL:3::reading with 5 +DEAL:3::#cells = 16 +DEAL:3::Checksum: 0 +DEAL:3::writing with 5 +DEAL:3::#cells = 16 +DEAL:3::Checksum: 0 + + +DEAL:4::reading with 5 +DEAL:4::unpacking cell 2_1:0 with data size=24 accumulated data=15 +DEAL:4::unpacking cell 2_1:1 with data size=28 accumulated data=21 +DEAL:4::unpacking cell 2_1:2 with data size=32 accumulated data=28 +DEAL:4::unpacking cell 2_1:3 with data size=36 accumulated data=36 +DEAL:4::unpacking cell 3_0: with data size=40 accumulated data=45 +DEAL:4::#cells = 16 +DEAL:4::Checksum: 0 +DEAL:4::writing with 5 +DEAL:4::packing cell 2_1:0 with data size=4 accumulated data=0 +DEAL:4::packing cell 2_1:1 with data size=8 accumulated data=1 +DEAL:4::packing cell 2_1:2 with data size=12 accumulated data=3 +DEAL:4::packing cell 2_1:3 with data size=16 accumulated data=6 +DEAL:4::packing cell 3_0: with data size=20 accumulated data=10 +DEAL:4::#cells = 16 +DEAL:4::Checksum: 0 + diff --git a/tests/mpi/p4est_save_07.with_petsc=true.mpirun=4.output b/tests/mpi/p4est_save_07.with_petsc=true.mpirun=4.output new file mode 100644 index 0000000000..bf97dfd8ef --- /dev/null +++ b/tests/mpi/p4est_save_07.with_petsc=true.mpirun=4.output @@ -0,0 +1,10 @@ + +DEAL:0:2d::hyper_cube +DEAL:0:2d::#cells = 19 +DEAL:0:2d::cells(0) = 10 +DEAL:0:2d::Checksum: 136119115 +DEAL:0:2d::#cells = 19 +DEAL:0:2d::cells(0) = 10 +DEAL:0:2d::Checksum: 136119115 +DEAL:0:2d::sum: 435.000 870.000 +DEAL:0:2d::OK