--- /dev/null
+/* ---------------------------------------------------------------------
+ *
+ * Copyright (C) 2021 by the deal.II authors
+ *
+ * This file is part of the deal.II library.
+ *
+ * The deal.II library is free software; you can use it, redistribute
+ * it, and/or modify it under the terms of the GNU Lesser General
+ * Public License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ * The full text of the license can be found in the file LICENSE.md at
+ * the top level directory of deal.II.
+ *
+ * ---------------------------------------------------------------------
+ */
+
+// Test (de)serialization of Trilinos vectors with checkpointing files
+// >4GB. The test here is of course simplified to run quickly. This is
+// the hp (variable transfer) version of checkpointing_02.cc
+
+// set to true to run a test that generates a 5GB file:
+const bool big = false;
+
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/function.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/quadrature_lib.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/grid_refinement.h>
+#include <deal.II/distributed/solution_transfer.h>
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_accessor.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_values.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/lac/vector.h>
+
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/numerics/error_estimator.h>
+#include <deal.II/numerics/vector_tools.h>
+
+#include <fstream>
+#include <iostream>
+
+#include "../tests.h"
+
+
+template <int dim>
+class LaplaceProblem
+{
+public:
+ LaplaceProblem();
+
+ void
+ run(unsigned int n_cycles_global, unsigned int n_cycles_adaptive);
+
+private:
+ void
+ setup_system();
+ void
+ refine_grid();
+ void
+ output_results(const unsigned int cycle) const;
+
+ MPI_Comm mpi_communicator;
+
+ parallel::distributed::Triangulation<dim> triangulation;
+
+ hp::FECollection<dim> fe;
+ DoFHandler<dim> dof_handler;
+
+ IndexSet locally_owned_dofs;
+ IndexSet locally_relevant_dofs;
+};
+
+template <int dim>
+LaplaceProblem<dim>::LaplaceProblem()
+ : mpi_communicator(MPI_COMM_WORLD)
+ , triangulation(
+ mpi_communicator,
+ typename Triangulation<dim>::MeshSmoothing(
+ Triangulation<dim>::smoothing_on_refinement |
+ Triangulation<dim>::smoothing_on_coarsening),
+ parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy)
+ , dof_handler(triangulation)
+{
+ fe.push_back(FE_Q<dim>(2));
+ fe.push_back(FE_Q<dim>(1));
+}
+
+
+template <int dim>
+void
+LaplaceProblem<dim>::setup_system()
+{
+ dof_handler.distribute_dofs(fe);
+ locally_owned_dofs = dof_handler.locally_owned_dofs();
+ DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
+}
+
+template <int dim>
+void
+LaplaceProblem<dim>::refine_grid()
+{
+ // refine into a corner
+ Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
+ for (auto &cell : triangulation.active_cell_iterators())
+ {
+ if (cell->is_locally_owned())
+ estimated_error_per_cell(cell->active_cell_index()) =
+ cell->center().norm() * std::pow(cell->diameter(), 0.5);
+ }
+
+ parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
+ triangulation, estimated_error_per_cell, 0.20, 0.0);
+
+ triangulation.execute_coarsening_and_refinement();
+}
+
+
+
+template <int dim>
+void
+LaplaceProblem<dim>::run(unsigned int n_cycles_global,
+ unsigned int n_cycles_adaptive)
+{
+ using VectorType = TrilinosWrappers::MPI::Vector;
+
+ for (unsigned int cycle = 0; cycle < n_cycles_adaptive; ++cycle)
+ {
+ deallog << "Cycle " << 1 + cycle << " / " << n_cycles_adaptive << ':'
+ << std::endl;
+
+ if (cycle == 0)
+ {
+ GridGenerator::subdivided_hyper_cube(triangulation, 10);
+ triangulation.refine_global(n_cycles_global);
+ }
+ else
+ refine_grid();
+
+ deallog << "n_global_active_cells: "
+ << triangulation.n_global_active_cells()
+ << " n_global_levels: " << triangulation.n_global_levels()
+ << " ghost_owners.size: " << triangulation.ghost_owners().size()
+ << " level_ghost_owners.size: "
+ << triangulation.level_ghost_owners().size() << std::endl;
+
+ setup_system();
+
+ const unsigned int n_vectors = (big) ? 50 : 2;
+ {
+ deallog << "checkpointing..." << std::endl;
+ std::vector<VectorType> vectors(n_vectors);
+ VectorType x(locally_owned_dofs, mpi_communicator);
+ for (unsigned int i = 0; i < n_vectors; ++i)
+ {
+ vectors[i].reinit(locally_owned_dofs,
+ locally_relevant_dofs,
+ mpi_communicator);
+ vectors[i] = x;
+ x.add(1.0);
+ }
+
+ std::vector<const VectorType *> x_system(n_vectors);
+ int i = 0;
+ for (auto &v : x_system)
+ {
+ v = &vectors[i];
+ ++i;
+ }
+
+ VectorType y(locally_owned_dofs,
+ locally_relevant_dofs,
+ mpi_communicator);
+ y = x;
+
+ // To be sure, use two SolutionTransfer objects, because the second one
+ // will get a large offset
+ parallel::distributed::SolutionTransfer<dim, VectorType> system_trans(
+ dof_handler);
+ parallel::distributed::SolutionTransfer<dim, VectorType> trans2(
+ dof_handler);
+
+
+ dof_handler.prepare_for_serialization_of_active_fe_indices();
+ system_trans.prepare_for_serialization(x_system);
+ trans2.prepare_for_serialization(y);
+ triangulation.save("restart.mesh");
+ }
+
+ {
+ deallog << "resume..." << std::endl;
+ std::vector<VectorType> vectors(n_vectors);
+ for (unsigned int i = 0; i < n_vectors; ++i)
+ vectors[i].reinit(locally_owned_dofs, mpi_communicator);
+
+ std::vector<VectorType *> x_system(n_vectors);
+ int i = 0;
+ for (auto &v : x_system)
+ {
+ v = &vectors[i];
+ ++i;
+ }
+
+ VectorType y(locally_owned_dofs, mpi_communicator);
+
+ triangulation.coarsen_global(99);
+ triangulation.load("restart.mesh");
+
+ parallel::distributed::SolutionTransfer<dim, VectorType> system_trans(
+ dof_handler);
+ parallel::distributed::SolutionTransfer<dim, VectorType> trans2(
+ dof_handler);
+
+ dof_handler.deserialize_active_fe_indices();
+ system_trans.deserialize(x_system);
+ trans2.deserialize(y);
+
+ for (unsigned int i = 0; i < n_vectors; ++i)
+ deallog << "vec " << i << ": " << vectors[i].linfty_norm()
+ << std::endl;
+ deallog << "vec y: " << y.linfty_norm() << std::endl;
+ }
+
+ deallog << std::endl;
+ }
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ unsigned int n_cycles_global = (big) ? 3 : 1;
+ unsigned int n_cycles_adaptive = 1;
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ LaplaceProblem<3> laplace_problem;
+ laplace_problem.run(n_cycles_global, n_cycles_adaptive);
+}