From e3ad28b7ff2b41f3c46716a7572575ab0589f600 Mon Sep 17 00:00:00 2001 From: Timo Heister Date: Sun, 12 Jun 2022 20:18:45 -0400 Subject: [PATCH] add test --- tests/data_out/data_out_hdf5_03.cc | 91 +++++++++++++++++++ ..._hdf5=true.with_p4est=true.mpirun=2.output | 5 + ..._hdf5=true.with_p4est=true.mpirun=3.output | 5 + 3 files changed, 101 insertions(+) create mode 100644 tests/data_out/data_out_hdf5_03.cc create mode 100644 tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=2.output create mode 100644 tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=3.output diff --git a/tests/data_out/data_out_hdf5_03.cc b/tests/data_out/data_out_hdf5_03.cc new file mode 100644 index 0000000000..a345f165d8 --- /dev/null +++ b/tests/data_out/data_out_hdf5_03.cc @@ -0,0 +1,91 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 - 2021 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +// test parallel DataOut with HDF5 +// +// When running with 3 MPI ranks, one of the ranks will have 0 +// cells. This tests a corner case that used to fail because the code +// assumed that all ranks have at least one cell. + +#include + +#include + +#include + +#include +#include + +#include + +#include +#include + +#include "../tests.h" + +template +void +test() +{ + parallel::distributed::Triangulation tria(MPI_COMM_WORLD); + std::vector repetitions(dim, 1); + repetitions[0] = 2; // 2x1x1 cells + Point p1; + Point p2; + for (int i = 0; i < dim; ++i) + p2[i] = 1.0; + GridGenerator::subdivided_hyper_rectangle(tria, repetitions, p1, p2); + tria.refine_global(1); + + FE_Q fe(1); + + DoFHandler dof(tria); + dof.distribute_dofs(fe); + + Vector v1(dof.n_dofs()); + for (unsigned int i = 0; i < v1.size(); ++i) + v1(i) = i; + + DataOut data_out; + data_out.add_data_vector(dof, v1, "bla"); + data_out.build_patches(1); + + DataOutBase::DataOutFilter data_filter( + DataOutBase::DataOutFilterFlags(false, false)); + data_out.write_filtered_data(data_filter); + data_out.write_hdf5_parallel(data_filter, "out.h5", MPI_COMM_WORLD); + + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) + { + // Sadly hdf5 is binary and we can not use hd5dump because it might + // not be in the path. + std::ifstream f("out.h5"); + AssertThrow(f.good(), ExcIO()); + deallog << "ok" << std::endl; + } +} + + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + + test<2>(); + test<3>(); +} diff --git a/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=2.output b/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=2.output new file mode 100644 index 0000000000..1b4b05f75a --- /dev/null +++ b/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=2.output @@ -0,0 +1,5 @@ + +DEAL:0::ok +DEAL:0::ok + + diff --git a/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=3.output b/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=3.output new file mode 100644 index 0000000000..1b4b05f75a --- /dev/null +++ b/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=3.output @@ -0,0 +1,5 @@ + +DEAL:0::ok +DEAL:0::ok + + -- 2.39.5