From: Timo Heister Date: Fri, 2 Sep 2022 19:18:49 +0000 (-0400) Subject: support XDMF writing with empty ranks X-Git-Tag: v9.5.0-rc1~938^2~6 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=31fad920825c5ed82a563d54bcc44038779fc8c4;p=dealii.git support XDMF writing with empty ranks followup to #13962 (hdf writing with no cells on rank 0) fixes #13404 --- diff --git a/source/base/data_out_base.cc b/source/base/data_out_base.cc index a1c23ca2b2..12ae2678ff 100644 --- a/source/base/data_out_base.cc +++ b/source/base/data_out_base.cc @@ -8196,7 +8196,10 @@ DataOutInterface::create_xdmf_entry( ExcMessage("XDMF only supports 2 or 3 space dimensions.")); local_node_cell_count[0] = data_filter.n_nodes(); - local_node_cell_count[1] = data_filter.n_cells(); + // n_cells returns an invalid unsigned int if the object is empty: + local_node_cell_count[1] = + (data_filter.n_nodes() > 0) ? data_filter.n_cells() : 0; + // And compute the global total #ifdef DEAL_II_WITH_MPI @@ -8215,11 +8218,36 @@ DataOutInterface::create_xdmf_entry( global_node_cell_count[1] = local_node_cell_count[1]; #endif - // Output the XDMF file only on the root process - if (myrank == 0) + // The implementation is a bit complicated because we are supposed to return + // the correct data on rank 0 and an empty object on all other ranks but all + // information (for example the attributes) are only available on ranks that + // have any cells. + // We will identify the smallest rank that has data and then communicate + // from this rank to rank 0 (if they are different ranks). + + const bool have_data = (data_filter.n_nodes() > 0); + MPI_Comm split_comm; + { + const int key = myrank; + const int color = (have_data ? 1 : 0); + const int ierr = MPI_Comm_split(comm, color, key, &split_comm); + AssertThrowMPI(ierr); + } + + const bool am_i_first_rank_with_data = + have_data && (Utilities::MPI::this_mpi_process(split_comm) == 0); + + ierr = MPI_Comm_free(&split_comm); + AssertThrowMPI(ierr); + + const int tag = 47381; + + // Output the XDMF file only on the root process of all ranks with data: + if (am_i_first_rank_with_data) { const auto &patches = get_patches(); Assert(patches.size() > 0, DataOutBase::ExcNoPatches()); + // We currently don't support writing mixed meshes: #ifdef DEBUG for (const auto &patch : patches) @@ -8227,8 +8255,7 @@ DataOutInterface::create_xdmf_entry( ExcNotImplemented()); #endif - - XDMFEntry entry(h5_mesh_filename, + XDMFEntry entry(h5_mesh_filename, h5_solution_filename, cur_time, global_node_cell_count[0], @@ -8236,23 +8263,56 @@ DataOutInterface::create_xdmf_entry( dim, spacedim, patches[0].reference_cell); - unsigned int n_data_sets = data_filter.n_data_sets(); + const unsigned int n_data_sets = data_filter.n_data_sets(); - // The vector names generated here must match those generated in the HDF5 - // file - unsigned int i; - for (i = 0; i < n_data_sets; ++i) + // The vector names generated here must match those generated in + // the HDF5 file + for (unsigned int i = 0; i < n_data_sets; ++i) { entry.add_attribute(data_filter.get_data_set_name(i), data_filter.get_data_set_dim(i)); } - return entry; + if (myrank != 0) + { + // send to rank 0 + const std::vector buffer = Utilities::pack(entry, false); + ierr = MPI_Send(buffer.data(), buffer.size(), MPI_CHAR, 0, tag, comm); + AssertThrowMPI(ierr); + + return {}; + } + else + return entry; } - else + + if (myrank == 0 && !am_i_first_rank_with_data) { - return {}; + // receive the XDMF data on rank 0 if we don't have it... + + MPI_Status status; + int ierr = MPI_Probe(MPI_ANY_SOURCE, tag, comm, &status); + AssertThrowMPI(ierr); + + int len; + ierr = MPI_Get_count(&status, MPI_BYTE, &len); + AssertThrowMPI(ierr); + + std::vector buffer(len); + ierr = MPI_Recv(buffer.data(), + len, + MPI_BYTE, + status.MPI_SOURCE, + tag, + comm, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + return Utilities::unpack(buffer, false); } + + // default case for any other rank is to return an empty object + return {}; } template diff --git a/tests/data_out/data_out_hdf5_03.cc b/tests/data_out/data_out_hdf5_03.cc index a345f165d8..b732c5b9a7 100644 --- a/tests/data_out/data_out_hdf5_03.cc +++ b/tests/data_out/data_out_hdf5_03.cc @@ -13,7 +13,7 @@ // // --------------------------------------------------------------------- -// test parallel DataOut with HDF5 +// test parallel DataOut with HDF5 + xdmf // // When running with 3 MPI ranks, one of the ranks will have 0 // cells. This tests a corner case that used to fail because the code @@ -41,7 +41,7 @@ test() { parallel::distributed::Triangulation tria(MPI_COMM_WORLD); std::vector repetitions(dim, 1); - repetitions[0] = 2; // 2x1x1 cells + repetitions[0] = 1; // 2x1x1 cells Point p1; Point p2; for (int i = 0; i < dim; ++i) @@ -65,10 +65,19 @@ test() DataOutBase::DataOutFilter data_filter( DataOutBase::DataOutFilterFlags(false, false)); data_out.write_filtered_data(data_filter); + deallog << "n_cells on my rank: " << data_filter.n_cells() << std::endl; data_out.write_hdf5_parallel(data_filter, "out.h5", MPI_COMM_WORLD); + std::vector xdmf_entries; + xdmf_entries.push_back( + data_out.create_xdmf_entry(data_filter, "out.h5", 0, MPI_COMM_WORLD)); + + data_out.write_xdmf_file(xdmf_entries, "out.xdmf", MPI_COMM_WORLD); + if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0) { + cat_file("out.xdmf"); + // Sadly hdf5 is binary and we can not use hd5dump because it might // not be in the path. std::ifstream f("out.h5"); diff --git a/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=2.output b/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=2.output index 1b4b05f75a..001e715fdb 100644 --- a/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=2.output +++ b/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=2.output @@ -1,5 +1,63 @@ +DEAL:0::n_cells on my rank: 4 + + + + + + + + + + + DEAL:0::ok +DEAL:0::n_cells on my rank: 8 + + + + + + + + + + + DEAL:0::ok +DEAL:1::n_cells on my rank: 4294967295 +DEAL:1::n_cells on my rank: 4294967295 diff --git a/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=3.output b/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=3.output index 1b4b05f75a..807a068c37 100644 --- a/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=3.output +++ b/tests/data_out/data_out_hdf5_03.with_hdf5=true.with_p4est=true.mpirun=3.output @@ -1,5 +1,67 @@ +DEAL:0::n_cells on my rank: 4294967295 + + + + + + + + + + + DEAL:0::ok +DEAL:0::n_cells on my rank: 4294967295 + + + + + + + + + + + DEAL:0::ok +DEAL:1::n_cells on my rank: 4294967295 +DEAL:1::n_cells on my rank: 8 + + +DEAL:2::n_cells on my rank: 4 +DEAL:2::n_cells on my rank: 4294967295 diff --git a/tests/serialization/xdmf_entry.cc b/tests/serialization/xdmf_entry.cc index 80ae61ea60..55464d15ad 100644 --- a/tests/serialization/xdmf_entry.cc +++ b/tests/serialization/xdmf_entry.cc @@ -32,8 +32,14 @@ test() const unsigned int dim = 2; const unsigned int spacedim = 3; - XDMFEntry entry1( - mesh_filename, solution_filename, time, nodes, cells, dim, spacedim); + XDMFEntry entry1(mesh_filename, + solution_filename, + time, + nodes, + cells, + dim, + spacedim, + ReferenceCells::Quadrilateral); XDMFEntry entry2; // save data to archive