From: Timo Heister Date: Tue, 31 May 2022 19:53:58 +0000 (-0400) Subject: add large vtu test X-Git-Tag: v9.4.0-rc1~89^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=5a5c7a56005faae7c00e1893d100480ef6af7016;p=dealii.git add large vtu test A a test that writes a .vtu file with parallel IO that is larger than 4GB. --- diff --git a/tests/distributed_grids/large_vtu_01.cc b/tests/distributed_grids/large_vtu_01.cc new file mode 100644 index 0000000000..18b6131537 --- /dev/null +++ b/tests/distributed_grids/large_vtu_01.cc @@ -0,0 +1,131 @@ +/* --------------------------------------------------------------------- + * + * Copyright (C) 2021 by the deal.II authors + * + * This file is part of the deal.II library. + * + * The deal.II library is free software; you can use it, redistribute + * it, and/or modify it under the terms of the GNU Lesser General + * Public License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * The full text of the license can be found in the file LICENSE.md at + * the top level directory of deal.II. + * + * --------------------------------------------------------------------- + */ + +// Test writing of large vtu files with a total file size above 4 GB + +// This test is running a tiny version by default. Set the following +// flag to true to run a test that generates a file larger than 4GB +// when running with 2 MPI ranks. The total file size is about +// 7 GB. Warning, you need quite a bit of RAM and patience to run this. +const bool run_big = false; + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "../tests.h" + +template +void +run() +{ + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + parallel::distributed::Triangulation triangulation(mpi_communicator); + + GridGenerator::subdivided_hyper_cube(triangulation, 1); + unsigned int n_cycles_global = (run_big) ? 4 : 1; + triangulation.refine_global(n_cycles_global); + + deallog << "n_global_active_cells: " << triangulation.n_global_active_cells() + << " n_global_levels: " << triangulation.n_global_levels() + << std::endl; + + const unsigned int n_vectors = 1; + + FE_DGQ fe(2); + DoFHandler dof_handler(triangulation); + dof_handler.distribute_dofs(fe); + // Make FE vector + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + + using VectorType = LinearAlgebra::distributed::Vector; + VectorType global_dof_vector; + global_dof_vector.reinit(dof_handler.locally_owned_dofs(), + locally_relevant_dofs, + MPI_COMM_WORLD); + + VectorTools::interpolate(dof_handler, + Functions::SquareFunction(), + global_dof_vector); + + { + DataOut data_out; + data_out.attach_dof_handler(dof_handler); + for (unsigned int i = 0; i < n_vectors; ++i) + data_out.add_data_vector( + dof_handler, + global_dof_vector, + std::vector(1, "data" + Utilities::to_string(i))); + + data_out.build_patches((run_big) ? 42 : 2); + data_out.write_vtu_in_parallel("base.vtu", MPI_COMM_WORLD); + } + + if (dealii::Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + { + // Let's print 10 lines from vtu (exclude the header and the data): + std::ifstream in("base.vtu"); + for (int i = 0; i < 10; ++i) + { + std::string line; + std::getline(in, line); + if (line[0] == '<') + deallog << line << '\n'; + } + } + deallog << "OK" << std::endl; +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + run<3>(); +} diff --git a/tests/distributed_grids/large_vtu_01.with_p4est=true.mpirun=4.output b/tests/distributed_grids/large_vtu_01.with_p4est=true.mpirun=4.output new file mode 100644 index 0000000000..40c58b1ba0 --- /dev/null +++ b/tests/distributed_grids/large_vtu_01.with_p4est=true.mpirun=4.output @@ -0,0 +1,12 @@ + +DEAL:0::n_global_active_cells: 8 n_global_levels: 2 +DEAL:0:: +