* parallel_forest,
const char *filename) const
{
- Assert(variable_size_data_stored == false, ExcNotImplemented());
+ // Large fractions of this function have been copied from
+ // DataOutInterface::write_vtu_in_parallel.
+ // TODO: Write general MPIIO interface.
Assert(sizes_fixed_cumulative.size() > 0,
ExcMessage("No data has been packed!"));
- const std::string fname_fixed = std::string(filename) + "_fixed.data";
-
- // ----- copied -----
- // from DataOutInterface::write_vtu_in_parallel
- // TODO: write general MPIIO interface
const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
- MPI_Info info;
- int ierr = MPI_Info_create(&info);
- AssertThrowMPI(ierr);
- MPI_File fh;
- ierr = MPI_File_open(mpi_communicator,
- const_cast<char *>(fname_fixed.c_str()),
- MPI_MODE_CREATE | MPI_MODE_WRONLY,
- info,
- &fh);
- AssertThrowMPI(ierr);
-
- ierr = MPI_File_set_size(fh, 0); // delete the file contents
- AssertThrowMPI(ierr);
- // this barrier is necessary, because otherwise others might already
- // write while one core is still setting the size to zero.
- ierr = MPI_Barrier(mpi_communicator);
- AssertThrowMPI(ierr);
- ierr = MPI_Info_free(&info);
- AssertThrowMPI(ierr);
- // ------------------
-
- // Check if number of processors is lined up with p4est partitioning.
- Assert(myrank < parallel_forest->mpisize, ExcInternalError());
-
- // Write cumulative sizes to file.
- // Since each processor owns the same information about the data sizes,
- // it is sufficient to let only the first processor perform this task.
- if (myrank == 0)
+ //
+ // ---------- Fixed size data ----------
+ //
+ {
+ const std::string fname_fixed = std::string(filename) + "_fixed.data";
+
+ MPI_Info info;
+ int ierr = MPI_Info_create(&info);
+ AssertThrowMPI(ierr);
+
+ MPI_File fh;
+ ierr = MPI_File_open(mpi_communicator,
+ fname_fixed.c_str(),
+ MPI_MODE_CREATE | MPI_MODE_WRONLY,
+ info,
+ &fh);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_File_set_size(fh, 0); // delete the file contents
+ AssertThrowMPI(ierr);
+ // this barrier is necessary, because otherwise others might already
+ // write while one core is still setting the size to zero.
+ ierr = MPI_Barrier(mpi_communicator);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Info_free(&info);
+ AssertThrowMPI(ierr);
+ // ------------------
+
+ // Check if number of processors is lined up with p4est partitioning.
+ Assert(myrank < parallel_forest->mpisize, ExcInternalError());
+
+ // Write cumulative sizes to file.
+ // Since each processor owns the same information about the data sizes,
+ // it is sufficient to let only the first processor perform this task.
+ if (myrank == 0)
+ {
+ ierr = MPI_File_write_at(fh,
+ 0,
+ sizes_fixed_cumulative.data(),
+ sizes_fixed_cumulative.size(),
+ MPI_UNSIGNED,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+ }
+
+ // Write packed data to file simultaneously.
+ const unsigned int offset_fixed =
+ sizes_fixed_cumulative.size() * sizeof(unsigned int);
+
+ ierr = MPI_File_write_at(
+ fh,
+ offset_fixed +
+ parallel_forest->global_first_quadrant[myrank] *
+ sizes_fixed_cumulative.back(), // global position in file
+ src_data_fixed.data(),
+ src_data_fixed.size(), // local buffer
+ MPI_CHAR,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_File_close(&fh);
+ AssertThrowMPI(ierr);
+ }
+
+ //
+ // ---------- Variable size data ----------
+ //
+ if (variable_size_data_stored)
{
- ierr = MPI_File_write_at(fh,
- 0,
- sizes_fixed_cumulative.data(),
- sizes_fixed_cumulative.size(),
- MPI_UNSIGNED,
- MPI_STATUS_IGNORE);
+ const std::string fname_variable =
+ std::string(filename) + "_variable.data";
+
+ const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+ MPI_Info info;
+ int ierr = MPI_Info_create(&info);
AssertThrowMPI(ierr);
- }
- // Write packed data to file simultaneously.
- const unsigned int offset =
- sizes_fixed_cumulative.size() * sizeof(unsigned int);
-
- ierr = MPI_File_write_at(
- fh,
- offset + parallel_forest->global_first_quadrant[myrank] *
- sizes_fixed_cumulative.back(), // global position in file
- src_data_fixed.data(),
- src_data_fixed.size(), // local buffer
- MPI_CHAR,
- MPI_STATUS_IGNORE);
- AssertThrowMPI(ierr);
-
- ierr = MPI_File_close(&fh);
- AssertThrowMPI(ierr);
+ MPI_File fh;
+ ierr = MPI_File_open(mpi_communicator,
+ fname_variable.c_str(),
+ MPI_MODE_CREATE | MPI_MODE_WRONLY,
+ info,
+ &fh);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_File_set_size(fh, 0); // delete the file contents
+ AssertThrowMPI(ierr);
+ // this barrier is necessary, because otherwise others might already
+ // write while one core is still setting the size to zero.
+ ierr = MPI_Barrier(mpi_communicator);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Info_free(&info);
+ AssertThrowMPI(ierr);
+
+ // Write sizes of each cell into file simultaneously.
+ ierr =
+ MPI_File_write_at(fh,
+ parallel_forest->global_first_quadrant[myrank] *
+ sizeof(int), // global position in file
+ src_sizes_variable.data(),
+ src_sizes_variable.size(), // local buffer
+ MPI_INT,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ const unsigned int offset_variable =
+ parallel_forest->global_num_quadrants * sizeof(int);
+
+ // Gather size of data in bytes we want to store from this processor.
+ const unsigned int size_on_proc = src_data_variable.size();
+
+ // Share information among all processors.
+ std::vector<unsigned int> sizes_on_all_procs(n_procs);
+ ierr = MPI_Allgather(&size_on_proc,
+ 1,
+ MPI_UNSIGNED,
+ sizes_on_all_procs.data(),
+ 1,
+ MPI_UNSIGNED,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ // Generate accumulated sum to get an offset for writing variable
+ // size data.
+ std::partial_sum(sizes_on_all_procs.begin(),
+ sizes_on_all_procs.end(),
+ sizes_on_all_procs.begin());
+
+ // Write data consecutively into file.
+ ierr = MPI_File_write_at(
+ fh,
+ offset_variable +
+ ((myrank == 0) ?
+ 0 :
+ sizes_on_all_procs[myrank - 1]), // global position in file
+ src_data_variable.data(),
+ src_data_variable.size(), // local buffer
+ MPI_CHAR,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_File_close(&fh);
+ AssertThrowMPI(ierr);
+ }
}
const typename dealii::internal::p4est::types<dim>::forest
* parallel_forest,
const char * filename,
- const unsigned int n_attached_deserialize)
+ const unsigned int n_attached_deserialize_fixed,
+ const unsigned int n_attached_deserialize_variable)
{
+ // Large fractions of this function have been copied from
+ // DataOutInterface::write_vtu_in_parallel.
+ // TODO: Write general MPIIO interface.
+
Assert(dest_data_fixed.size() == 0,
ExcMessage("Previously loaded data has not been released yet!"));
- const std::string fname_fixed = std::string(filename) + "_fixed.data";
+ variable_size_data_stored = (n_attached_deserialize_variable > 0);
- // ----- copied -----
- // from DataOutInterface::write_vtu_in_parallel
- // TODO: write general MPIIO interface
const int myrank = Utilities::MPI::this_mpi_process(mpi_communicator);
- MPI_Info info;
- int ierr = MPI_Info_create(&info);
- AssertThrowMPI(ierr);
- MPI_File fh;
- ierr = MPI_File_open(mpi_communicator,
- const_cast<char *>(fname_fixed.c_str()),
- MPI_MODE_RDONLY,
- info,
- &fh);
- AssertThrowMPI(ierr);
-
- ierr = MPI_Info_free(&info);
- AssertThrowMPI(ierr);
- // ------------------
-
- // Check if number of processors is lined up with p4est partitioning.
- Assert(myrank < parallel_forest->mpisize, ExcInternalError());
-
- // Read cumulative sizes from file.
- // Since all processors need the same information about the data sizes,
- // let each of them gain it by reading from the same location in the file.
- sizes_fixed_cumulative.resize(1 + n_attached_deserialize);
- ierr = MPI_File_read_at(fh,
- 0,
- sizes_fixed_cumulative.data(),
- sizes_fixed_cumulative.size(),
- MPI_UNSIGNED,
- MPI_STATUS_IGNORE);
- AssertThrowMPI(ierr);
+ //
+ // ---------- Fixed size data ----------
+ //
+ {
+ const std::string fname_fixed = std::string(filename) + "_fixed.data";
+
+ MPI_Info info;
+ int ierr = MPI_Info_create(&info);
+ AssertThrowMPI(ierr);
+
+ MPI_File fh;
+ ierr = MPI_File_open(
+ mpi_communicator, fname_fixed.c_str(), MPI_MODE_RDONLY, info, &fh);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_Info_free(&info);
+ AssertThrowMPI(ierr);
+
+ // Check if number of processors is lined up with p4est partitioning.
+ Assert(myrank < parallel_forest->mpisize, ExcInternalError());
+
+ // Read cumulative sizes from file.
+ // Since all processors need the same information about the data sizes,
+ // let each of them retrieve it by reading from the same location in
+ // the file.
+ sizes_fixed_cumulative.resize(1 + n_attached_deserialize_fixed +
+ (variable_size_data_stored ? 1 : 0));
+ ierr = MPI_File_read_at(fh,
+ 0,
+ sizes_fixed_cumulative.data(),
+ sizes_fixed_cumulative.size(),
+ MPI_UNSIGNED,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ // Allocate sufficient memory.
+ dest_data_fixed.resize(parallel_forest->local_num_quadrants *
+ sizes_fixed_cumulative.back());
+
+ // Read packed data from file simultaneously.
+ const unsigned int offset =
+ sizes_fixed_cumulative.size() * sizeof(unsigned int);
+
+ ierr = MPI_File_read_at(
+ fh,
+ offset + parallel_forest->global_first_quadrant[myrank] *
+ sizes_fixed_cumulative.back(), // global position in file
+ dest_data_fixed.data(),
+ dest_data_fixed.size(), // local buffer
+ MPI_CHAR,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
- // Allocate sufficient memory.
- dest_data_fixed.resize(parallel_forest->local_num_quadrants *
- sizes_fixed_cumulative.back());
+ ierr = MPI_File_close(&fh);
+ AssertThrowMPI(ierr);
+ }
+
+ //
+ // ---------- Variable size data ----------
+ //
+ if (variable_size_data_stored)
+ {
+ const std::string fname_variable =
+ std::string(filename) + "_variable.data";
+
+ const int n_procs = Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+ MPI_Info info;
+ int ierr = MPI_Info_create(&info);
+ AssertThrowMPI(ierr);
- // Read packed data from file simultaneously.
- const unsigned int offset =
- sizes_fixed_cumulative.size() * sizeof(unsigned int);
-
- ierr = MPI_File_read_at(
- fh,
- offset + parallel_forest->global_first_quadrant[myrank] *
- sizes_fixed_cumulative.back(), // global position in file
- dest_data_fixed.data(),
- dest_data_fixed.size(), // local buffer
- MPI_CHAR,
- MPI_STATUS_IGNORE);
- AssertThrowMPI(ierr);
-
- ierr = MPI_File_close(&fh);
- AssertThrowMPI(ierr);
+ MPI_File fh;
+ ierr = MPI_File_open(mpi_communicator,
+ fname_variable.c_str(),
+ MPI_MODE_RDONLY,
+ info,
+ &fh);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_Info_free(&info);
+ AssertThrowMPI(ierr);
+
+ // Read sizes of all locally owned cells.
+ dest_sizes_variable.resize(parallel_forest->local_num_quadrants);
+ ierr =
+ MPI_File_read_at(fh,
+ parallel_forest->global_first_quadrant[myrank] *
+ sizeof(int),
+ dest_sizes_variable.data(),
+ dest_sizes_variable.size(),
+ MPI_INT,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ const unsigned int offset =
+ parallel_forest->global_num_quadrants * sizeof(int);
+
+ const unsigned int size_on_proc =
+ std::accumulate(dest_sizes_variable.begin(),
+ dest_sizes_variable.end(),
+ 0);
+
+ // share information among all processors
+ std::vector<unsigned int> sizes_on_all_procs(n_procs);
+ ierr = MPI_Allgather(&size_on_proc,
+ 1,
+ MPI_UNSIGNED,
+ sizes_on_all_procs.data(),
+ 1,
+ MPI_UNSIGNED,
+ mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ // generate accumulated sum
+ std::partial_sum(sizes_on_all_procs.begin(),
+ sizes_on_all_procs.end(),
+ sizes_on_all_procs.begin());
+
+ dest_data_variable.resize(size_on_proc);
+ ierr = MPI_File_read_at(fh,
+ offset + ((myrank == 0) ?
+ 0 :
+ sizes_on_all_procs[myrank - 1]),
+ dest_data_variable.data(),
+ dest_data_variable.size(),
+ MPI_CHAR,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_File_close(&fh);
+ AssertThrowMPI(ierr);
+ }
}
{
std::string fname = std::string(filename) + ".info";
std::ofstream f(fname.c_str());
- f << "version nproc n_attached_objs n_coarse_cells" << std::endl
- << 3 << " "
+ f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_coarse_cells"
+ << std::endl
+ << 4 << " "
<< Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " "
<< cell_attached_data.pack_callbacks_fixed.size() << " "
+ << cell_attached_data.pack_callbacks_variable.size() << " "
<< this->n_cells(0) << std::endl;
}
connectivity);
connectivity = nullptr;
- unsigned int version, numcpus, attached_count, n_coarse_cells;
+ unsigned int version, numcpus, attached_count_fixed,
+ attached_count_variable, n_coarse_cells;
{
std::string fname = std::string(filename) + ".info";
std::ifstream f(fname.c_str());
AssertThrow(f, ExcIO());
std::string firstline;
getline(f, firstline); // skip first line
- f >> version >> numcpus >> attached_count >> n_coarse_cells;
+ f >> version >> numcpus >> attached_count_fixed >>
+ attached_count_variable >> n_coarse_cells;
}
- Assert(version == 3,
- ExcMessage("Incompatible version found in .info file."));
+ AssertThrow(version == 4,
+ ExcMessage("Incompatible version found in .info file."));
Assert(this->n_cells(0) == n_coarse_cells,
ExcMessage("Number of coarse cells differ!"));
// clear all of the callback data, as explained in the documentation of
// register_data_attach()
- cell_attached_data.n_attached_data_sets = 0;
- cell_attached_data.n_attached_deserialize = attached_count;
+ cell_attached_data.n_attached_data_sets = 0;
+ cell_attached_data.n_attached_deserialize =
+ attached_count_fixed + attached_count_variable;
parallel_forest = dealii::internal::p4est::functions<dim>::load_ext(
filename,
}
// load saved data, if any was stored
- if (attached_count > 0)
+ if (cell_attached_data.n_attached_deserialize > 0)
{
- data_transfer.load(parallel_forest, filename, attached_count);
+ data_transfer.load(parallel_forest,
+ filename,
+ attached_count_fixed,
+ attached_count_variable);
data_transfer.unpack_cell_status(local_quadrant_cell_relations);
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// save and load a triangulation with a different number of cpus
+// with variable size data attach
+// this is a combination of tests p4est_save_04 and attach_data_02
+
+#include <deal.II/base/tensor.h>
+#include <deal.II/base/utilities.h>
+
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+
+#include "../tests.h"
+
+
+
+template <int dim>
+std::vector<char>
+pack_function(
+ const typename parallel::distributed::Triangulation<dim, dim>::cell_iterator
+ &cell,
+ const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
+ status)
+{
+ static int some_number = 1;
+ std::vector<unsigned int> some_vector(some_number);
+ for (unsigned int i = 0; i < some_number; ++i)
+ some_vector[i] = i;
+
+ std::vector<char> buffer;
+ buffer.reserve(some_number * sizeof(unsigned int));
+ for (auto vector_it = some_vector.cbegin(); vector_it != some_vector.cend();
+ ++vector_it)
+ {
+ Utilities::pack(*vector_it, buffer, /*allow_compression=*/false);
+ }
+
+ deallog << "packing cell " << cell->id()
+ << " with data size=" << buffer.size() << " accumulated data="
+ << std::accumulate(some_vector.begin(), some_vector.end(), 0)
+ << std::endl;
+
+ Assert((status ==
+ parallel::distributed::Triangulation<dim, dim>::CELL_PERSIST),
+ ExcInternalError());
+
+ ++some_number;
+ return buffer;
+}
+
+
+
+template <int dim>
+void
+unpack_function(
+ const typename parallel::distributed::Triangulation<dim, dim>::cell_iterator
+ &cell,
+ const typename parallel::distributed::Triangulation<dim, dim>::CellStatus
+ status,
+ const boost::iterator_range<std::vector<char>::const_iterator> &data_range)
+{
+ const unsigned int data_in_bytes =
+ std::distance(data_range.begin(), data_range.end());
+
+ std::vector<unsigned int> intdatavector(data_in_bytes / sizeof(unsigned int));
+
+ auto vector_it = intdatavector.begin();
+ auto data_it = data_range.begin();
+ for (; data_it != data_range.end();
+ ++vector_it, data_it += sizeof(unsigned int))
+ {
+ *vector_it =
+ Utilities::unpack<unsigned int>(data_it,
+ data_it + sizeof(unsigned int),
+ /*allow_compression=*/false);
+ }
+
+ deallog << "unpacking cell " << cell->id() << " with data size="
+ << std::distance(data_range.begin(), data_range.end())
+ << " accumulated data="
+ << std::accumulate(intdatavector.begin(), intdatavector.end(), 0)
+ << std::endl;
+
+ Assert((status ==
+ parallel::distributed::Triangulation<dim, dim>::CELL_PERSIST),
+ ExcInternalError());
+}
+
+
+
+template <int dim>
+void
+test()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+ MPI_Comm com_all = MPI_COMM_WORLD;
+ MPI_Comm com_small;
+
+ // split the communicator in proc 0,1,2 and 3,4
+ MPI_Comm_split(com_all, (myid < 3) ? 0 : 1, myid, &com_small);
+
+ // write with small com
+ if (myid < 3)
+ {
+ deallog << "writing with " << Utilities::MPI::n_mpi_processes(com_small)
+ << std::endl;
+
+ parallel::distributed::Triangulation<dim> tr(com_small);
+ GridGenerator::subdivided_hyper_cube(tr, 2);
+ tr.refine_global(1);
+
+ typename Triangulation<dim, dim>::active_cell_iterator cell;
+ for (cell = tr.begin_active(); cell != tr.end(); ++cell)
+ {
+ if (cell->is_locally_owned())
+ {
+ if (cell->id().to_string() == "0_1:0")
+ cell->set_refine_flag();
+ else if (cell->parent()->id().to_string() == "3_0:")
+ cell->set_coarsen_flag();
+ }
+ }
+ tr.execute_coarsening_and_refinement();
+
+ unsigned int handle =
+ tr.register_data_attach(pack_function<dim>,
+ /*returns_variable_size_data=*/true);
+
+ tr.save("file");
+ deallog << "#cells = " << tr.n_global_active_cells() << std::endl;
+ deallog << "Checksum: " << tr.get_checksum() << std::endl;
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ deallog << "reading with " << Utilities::MPI::n_mpi_processes(com_all)
+ << std::endl;
+
+ {
+ parallel::distributed::Triangulation<dim> tr(com_all);
+
+ GridGenerator::subdivided_hyper_cube(tr, 2);
+ tr.load("file");
+
+ unsigned int handle =
+ tr.register_data_attach(pack_function<dim>,
+ /*returns_variable_size_data=*/true);
+
+ tr.notify_ready_to_unpack(handle, unpack_function<dim>);
+
+ deallog << "#cells = " << tr.n_global_active_cells() << std::endl;
+ deallog << "Checksum: " << tr.get_checksum() << std::endl;
+ }
+
+ if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)
+ deallog << "OK" << std::endl;
+}
+
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll log;
+
+ test<2>();
+}