From 8d7df273d8db86e1feacab3310012825f6a67bf4 Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Thu, 14 Oct 2021 23:05:53 -0400 Subject: [PATCH] fix checkpointing for >4GB files We incorrectly compute MPI_Offset for MPI IO for checkpointing using SolutionTransfer using 32 bit indices, which means that files larger than 4GB end up being corrupted. This manifests in errors like n error occurred in line <749> of file <../source/distributed/tria_base.cc> in function void dealii::parallel::DistributedTriangulationBase::load_attached_data(unsigned int, unsigned int, unsigned int, const string&, unsigned int, unsigned int) [with int dim = 3; int spacedim = 3; std::string = std::__cxx11::basic_string] The violated condition was: (cell_rel.second == parallel::DistributedTriangulationBase::CELL_PERSIST) part of #12752 --- source/distributed/tria_base.cc | 45 ++-- tests/distributed_grids/checkpointing_02.cc | 246 ++++++++++++++++++ ...ting_02.with_trilinos=true.mpirun=2.output | 19 ++ 3 files changed, 291 insertions(+), 19 deletions(-) create mode 100644 tests/distributed_grids/checkpointing_02.cc create mode 100644 tests/distributed_grids/checkpointing_02.with_trilinos=true.mpirun=2.output diff --git a/source/distributed/tria_base.cc b/source/distributed/tria_base.cc index 5ffb512adf..95e9266bc4 100644 --- a/source/distributed/tria_base.cc +++ b/source/distributed/tria_base.cc @@ -1441,20 +1441,23 @@ namespace parallel } // Write packed data to file simultaneously. - const unsigned int offset_fixed = + const MPI_Offset size_header = sizes_fixed_cumulative.size() * sizeof(unsigned int); + // Make sure we do the following computation in 64bit integers to be able + // to handle 4GB+ files: + const MPI_Offset my_offset = + size_header + static_cast(global_first_cell) * + sizes_fixed_cumulative.back(); + const char *data = src_data_fixed.data(); - ierr = MPI_File_write_at( - fh, - offset_fixed + - global_first_cell * - sizes_fixed_cumulative.back(), // global position in file - DEAL_II_MPI_CONST_CAST(data), - src_data_fixed.size(), // local buffer - MPI_CHAR, - MPI_STATUS_IGNORE); + ierr = MPI_File_write_at(fh, + my_offset, + DEAL_II_MPI_CONST_CAST(data), + src_data_fixed.size(), // local buffer + MPI_CHAR, + MPI_STATUS_IGNORE); AssertThrowMPI(ierr); ierr = MPI_File_close(&fh); @@ -1606,17 +1609,21 @@ namespace parallel dest_data_fixed.resize(local_num_cells * sizes_fixed_cumulative.back()); // Read packed data from file simultaneously. - const unsigned int offset = + const MPI_Offset size_header = sizes_fixed_cumulative.size() * sizeof(unsigned int); - ierr = MPI_File_read_at( - fh, - offset + global_first_cell * - sizes_fixed_cumulative.back(), // global position in file - dest_data_fixed.data(), - dest_data_fixed.size(), // local buffer - MPI_CHAR, - MPI_STATUS_IGNORE); + // Make sure we do the following computation in 64bit integers to be able + // to handle 4GB+ files: + const MPI_Offset my_offset = + size_header + static_cast(global_first_cell) * + sizes_fixed_cumulative.back(); + + ierr = MPI_File_read_at(fh, + my_offset, + dest_data_fixed.data(), + dest_data_fixed.size(), // local buffer + MPI_CHAR, + MPI_STATUS_IGNORE); AssertThrowMPI(ierr); ierr = MPI_File_close(&fh); diff --git a/tests/distributed_grids/checkpointing_02.cc b/tests/distributed_grids/checkpointing_02.cc new file mode 100644 index 0000000000..e3704d7ad9 --- /dev/null +++ b/tests/distributed_grids/checkpointing_02.cc @@ -0,0 +1,246 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2021 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + */ + +// Test (de)serialization of Trilinos vectors with checkpointing files >4GB. The +// test here is of course simplified to run quickly. + +// set to true to run a test that generates a 5GB file: +const bool big = false; + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "../tests.h" + + +template +class LaplaceProblem +{ +public: + LaplaceProblem(); + + void + run(unsigned int n_cycles_global, unsigned int n_cycles_adaptive); + +private: + void + setup_system(); + void + refine_grid(); + void + output_results(const unsigned int cycle) const; + + MPI_Comm mpi_communicator; + + parallel::distributed::Triangulation triangulation; + + FE_Q fe; + DoFHandler dof_handler; + + IndexSet locally_owned_dofs; + IndexSet locally_relevant_dofs; +}; + +template +LaplaceProblem::LaplaceProblem() + : mpi_communicator(MPI_COMM_WORLD) + , triangulation( + mpi_communicator, + typename Triangulation::MeshSmoothing( + Triangulation::smoothing_on_refinement | + Triangulation::smoothing_on_coarsening), + parallel::distributed::Triangulation::construct_multigrid_hierarchy) + , fe(2) + , dof_handler(triangulation) +{} + + +template +void +LaplaceProblem::setup_system() +{ + dof_handler.distribute_dofs(fe); + locally_owned_dofs = dof_handler.locally_owned_dofs(); + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); +} + +template +void +LaplaceProblem::refine_grid() +{ + // refine into a corner + Vector estimated_error_per_cell(triangulation.n_active_cells()); + for (auto &cell : triangulation.active_cell_iterators()) + { + if (cell->is_locally_owned()) + estimated_error_per_cell(cell->active_cell_index()) = + cell->center().norm() * std::pow(cell->diameter(), 0.5); + } + + parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number( + triangulation, estimated_error_per_cell, 0.20, 0.0); + + triangulation.execute_coarsening_and_refinement(); +} + + + +template +void +LaplaceProblem::run(unsigned int n_cycles_global, + unsigned int n_cycles_adaptive) +{ + using VectorType = TrilinosWrappers::MPI::Vector; + + for (unsigned int cycle = 0; cycle < n_cycles_adaptive; ++cycle) + { + deallog << "Cycle " << 1 + cycle << " / " << n_cycles_adaptive << ':' + << std::endl; + + if (cycle == 0) + { + GridGenerator::subdivided_hyper_cube(triangulation, 10); + triangulation.refine_global(n_cycles_global); + } + else + refine_grid(); + + deallog << "n_global_active_cells: " + << triangulation.n_global_active_cells() + << " n_global_levels: " << triangulation.n_global_levels() + << " ghost_owners.size: " << triangulation.ghost_owners().size() + << " level_ghost_owners.size: " + << triangulation.level_ghost_owners().size() << std::endl; + + setup_system(); + + const unsigned int n_vectors = (big) ? 50 : 2; + { + deallog << "checkpointing..." << std::endl; + std::vector vectors(n_vectors); + VectorType x(locally_owned_dofs, mpi_communicator); + for (unsigned int i = 0; i < n_vectors; ++i) + { + vectors[i].reinit(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + vectors[i] = x; + x.add(1.0); + } + + std::vector x_system(n_vectors); + int i = 0; + for (auto &v : x_system) + { + v = &vectors[i]; + ++i; + } + + VectorType y(locally_owned_dofs, + locally_relevant_dofs, + mpi_communicator); + y = x; + + // To be sure, use two SolutionTransfer objects, because the second one + // will get a large offset + parallel::distributed::SolutionTransfer system_trans( + dof_handler); + parallel::distributed::SolutionTransfer trans2( + dof_handler); + + system_trans.prepare_for_serialization(x_system); + trans2.prepare_for_serialization(y); + triangulation.save("restart.mesh"); + } + + { + deallog << "resume..." << std::endl; + std::vector vectors(n_vectors); + for (unsigned int i = 0; i < n_vectors; ++i) + vectors[i].reinit(locally_owned_dofs, mpi_communicator); + + std::vector x_system(n_vectors); + int i = 0; + for (auto &v : x_system) + { + v = &vectors[i]; + ++i; + } + + VectorType y(locally_owned_dofs, mpi_communicator); + + triangulation.coarsen_global(99); + triangulation.load("restart.mesh"); + + parallel::distributed::SolutionTransfer system_trans( + dof_handler); + parallel::distributed::SolutionTransfer trans2( + dof_handler); + system_trans.deserialize(x_system); + trans2.deserialize(y); + + for (unsigned int i = 0; i < n_vectors; ++i) + deallog << "vec " << i << ": " << vectors[i].linfty_norm() + << std::endl; + deallog << "vec y: " << y.linfty_norm() << std::endl; + } + + deallog << std::endl; + } +} + + +int +main(int argc, char *argv[]) +{ + unsigned int n_cycles_global = (big) ? 3 : 1; + unsigned int n_cycles_adaptive = 1; + + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + LaplaceProblem<3> laplace_problem; + laplace_problem.run(n_cycles_global, n_cycles_adaptive); +} diff --git a/tests/distributed_grids/checkpointing_02.with_trilinos=true.mpirun=2.output b/tests/distributed_grids/checkpointing_02.with_trilinos=true.mpirun=2.output new file mode 100644 index 0000000000..42abe5c2a7 --- /dev/null +++ b/tests/distributed_grids/checkpointing_02.with_trilinos=true.mpirun=2.output @@ -0,0 +1,19 @@ + +DEAL:0::Cycle 1 / 1: +DEAL:0::n_global_active_cells: 8000 n_global_levels: 2 ghost_owners.size: 1 level_ghost_owners.size: 1 +DEAL:0::checkpointing... +DEAL:0::resume... +DEAL:0::vec 0: 0.00000 +DEAL:0::vec 1: 1.00000 +DEAL:0::vec y: 2.00000 +DEAL:0:: + +DEAL:1::Cycle 1 / 1: +DEAL:1::n_global_active_cells: 8000 n_global_levels: 2 ghost_owners.size: 1 level_ghost_owners.size: 1 +DEAL:1::checkpointing... +DEAL:1::resume... +DEAL:1::vec 0: 0.00000 +DEAL:1::vec 1: 1.00000 +DEAL:1::vec y: 2.00000 +DEAL:1:: + -- 2.39.5