--- /dev/null
+New: The function DataOutInterface::write_vtu_with_pvtu_record() combines
+write_vtu() and write_pvtu_record() into a single function and generates
+the file extensions with counter, processor ID, and .vtu/.pvtu ending
+automatically.
+<br>
+(Niklas Fehn, 2019/10/14)
write_pvtu_record(std::ostream & out,
const std::vector<std::string> &piece_names) const;
+ /**
+ * This function combines DataOutInterface::write_vtu(), or
+ * DataOutInterface::write_vtu_in_parallel(), with
+ * DataOutInterface::write_pvtu_record(). It automatically constructs
+ * the filename of the generated output files and writes a pvtu file.
+ * A specified @p directory and a @p filename_without_extension
+ * form the first part of the filename. The filename is then extended with
+ * a @p counter labeling the current timestep/iteration/etc., the processor ID,
+ * and finally the .vtu/.pvtu ending. Since the number of timesteps to be
+ * written depends on the application, the number of digits to be reserved in
+ * the filename can be specified as parameter @p n_digits_for_counter, and the number
+ * is not padded with leading zeros if this parameter is left at its default
+ * value numbers::invalid_unsigned_int. If more than one file identifier
+ * is needed (e.g. time step number and iteration counter of solver), the
+ * last identifier is used as @p counter, while all other identifiers have to be
+ * added to @p filename_without_extension when calling this function. In a
+ * parallel setting, several files are typically written per time step. The
+ * number of files written in parallel depends on the number of MPI processes
+ * (see parameter @p mpi_communicator with default value MPI_COMM_WORLD), and a
+ * specified number of @p n_groups with default value 0. The background is that
+ * VTU file output supports grouping files from several CPUs into a given
+ * number of files using MPI I/O when writing on a parallel filesystem. The
+ * default value of @p n_groups is 0, meaning that every MPI rank will write one
+ * file. A value of 1 will generate one big file containing the solution over
+ * the whole domain, while a larger value will create @p n_groups files (but not
+ * more than there are MPI ranks). Note that only one processor needs to
+ * generate the .pvtu file, where processor zero is chosen to take over this
+ * job.
+ *
+ * The return value is the filename of the master file for the pvtu record.
+ *
+ * @note The code simply combines the strings @p directory and
+ * @p filename_without_extension, i.e., the user has to make sure that
+ * @p directory contains a trailing character, e.g. "/", that separates the
+ * directory from the filename.
+ *
+ * @note Use an empty string "" for the first argument if output is to be
+ * written in the current working directory.
+ *
+ * @author Niklas Fehn, Martin Kronbichler, 2019
+ */
+ std::string
+ write_vtu_with_pvtu_record(
+ const std::string &directory,
+ const std::string &filename_without_extension,
+ const unsigned int counter,
+ const unsigned int n_digits_for_counter = numbers::invalid_unsigned_int,
+ const MPI_Comm & mpi_communicator = MPI_COMM_WORLD,
+ const unsigned int n_groups = 0) const;
+
/**
* Obtain data through get_patches() and write it to <tt>out</tt> in SVG
* format. See DataOutBase::write_svg.
}
+template <int dim, int spacedim>
+std::string
+DataOutInterface<dim, spacedim>::write_vtu_with_pvtu_record(
+ const std::string &directory,
+ const std::string &filename_without_extension,
+ const unsigned int counter,
+ const unsigned int n_digits_for_counter,
+ const MPI_Comm & mpi_communicator,
+ const unsigned int n_groups) const
+{
+ const unsigned int rank = Utilities::MPI::this_mpi_process(mpi_communicator);
+ const unsigned int n_ranks =
+ Utilities::MPI::n_mpi_processes(mpi_communicator);
+ const unsigned int n_files_written =
+ (n_groups == 0 || n_groups > n_ranks) ? n_ranks : n_groups;
+
+ const unsigned int n_digits =
+ static_cast<int>(std::ceil(std::log10(std::fabs(n_files_written))));
+
+ const unsigned int color = rank % n_files_written;
+ const std::string filename =
+ directory + filename_without_extension + "_" +
+ Utilities::int_to_string(counter, n_digits_for_counter) + "." +
+ Utilities::int_to_string(color, n_digits) + ".vtu";
+
+ if (n_groups == 0 || n_groups > n_ranks)
+ {
+ // every processor writes one file
+ std::ofstream output(filename.c_str());
+ this->write_vtu(output);
+ }
+ else if (n_groups == 1)
+ {
+ // write only a single data file in parallel
+ this->write_vtu_in_parallel(filename.c_str(), mpi_communicator);
+ }
+ else
+ {
+#ifdef DEAL_II_WITH_MPI
+ // write n_groups data files
+ MPI_Comm comm_group;
+ int ierr = MPI_Comm_split(mpi_communicator, color, rank, &comm_group);
+ AssertThrowMPI(ierr);
+ this->write_vtu_in_parallel(filename.c_str(), comm_group);
+ ierr = MPI_Comm_free(&comm_group);
+ AssertThrowMPI(ierr);
+#else
+ AssertThrow(false, ExcMessage("Logical error. Should not arrive here."));
+#endif
+ }
+
+ // write pvtu record
+ const std::string filename_master =
+ filename_without_extension + "_" +
+ Utilities::int_to_string(counter, n_digits_for_counter) + ".pvtu";
+
+ if (rank == 0)
+ {
+ std::vector<std::string> filename_vector;
+ for (unsigned int i = 0; i < n_files_written; ++i)
+ {
+ const std::string filename =
+ filename_without_extension + "_" +
+ Utilities::int_to_string(counter, n_digits_for_counter) + "." +
+ Utilities::int_to_string(i, n_digits) + ".vtu";
+
+ filename_vector.emplace_back(filename);
+ }
+
+ std::ofstream master_output((directory + filename_master).c_str());
+ this->write_pvtu_record(master_output, filename_vector);
+ }
+
+ return filename_master;
+}
+
+
template <int dim, int spacedim>
void