// this single time step:
const std::string pvtu_master_filename =
data_out.write_vtu_with_pvtu_record(
- "./", "solution", timestep_no, 4, mpi_communicator);
+ "./", "solution", timestep_no, mpi_communicator, 4);
// The record files must be written only once and not by each processor,
// so we do this on processor 0:
static int out_index = 0;
data_out.write_vtu_with_pvtu_record(
- "./", "solution", out_index, 5, MPI_COMM_WORLD);
+ "./", "solution", out_index, MPI_COMM_WORLD, 5);
out_index++;
}
flags.compression_level = DataOutBase::VtkFlags::best_speed;
data_out.set_flags(flags);
data_out.write_vtu_with_pvtu_record(
- "./", "solution", cycle, 3, MPI_COMM_WORLD);
+ "./", "solution", cycle, MPI_COMM_WORLD, 3);
time_details << "Time write output (CPU/wall) " << time.cpu_time()
<< "s/" << time.wall_time() << "s\n";
// in parallel with the help of MPI-IO. Additionally a PVTU record is
// generated, which groups the written VTU files.
data_out.write_vtu_with_pvtu_record(
- "./", "solution", cycle, 2, mpi_communicator, 8);
+ "./", "solution", cycle, mpi_communicator, 2, 8);
}
// Paraview, the Visit visualization program, by creating a matching
// <code>.visit</code> file.
const std::string master_name = data_out.write_vtu_with_pvtu_record(
- output_dir, "solution", current_refinement_cycle, 2, mpi_communicator);
+ output_dir, "solution", current_refinement_cycle, mpi_communicator, 2);
pcout << master_name << std::endl;
TrilinosWrappers::MPI::Vector tmp(solution);
data_out.build_patches(mapping, degree + 1);
data_out.write_vtu_with_pvtu_record(
- "./", "solution", refinement_cycle, 2, MPI_COMM_WORLD);
+ "./", "solution", refinement_cycle, MPI_COMM_WORLD, 2);
}
data_out.build_patches();
data_out.write_vtu_with_pvtu_record(
- "./", "solution", timestep_number, 3, MPI_COMM_WORLD);
+ "./", "solution", timestep_number, MPI_COMM_WORLD, 3);
}
data_out.build_patches(0);
const auto filename = data_out.write_vtu_with_pvtu_record(
- "", "solution", cycle, 5, mpi_communicator, 8);
+ "", "solution", cycle, mpi_communicator, 5, 8);
pcout << " wrote " << filename << std::endl;
}
data_out.build_patches();
data_out.write_vtu_with_pvtu_record(
- "./", "solution", cycle, 2, mpi_communicator);
+ "./", "solution", cycle, mpi_communicator, 2);
}
flags.compression_level = DataOutBase::VtkFlags::best_speed;
data_out.set_flags(flags);
data_out.write_vtu_with_pvtu_record(
- "./", "solution", cycle, 2, mpi_communicator);
+ "./", "solution", cycle, mpi_communicator, 2);
Vector<float> cellwise_norm(triangulation.n_active_cells());
VectorTools::integrate_difference(dof_handler,
* DataOutInterface::write_pvtu_record().
*
* For example, running
- * <code> write_vtu_with_pvtu_record("output/", "solution", 3, 4, comm, 2)
+ * <code> write_vtu_with_pvtu_record("output/", "solution", 3, comm, 4, 2)
* </code> on 10 processes generates the files
* @code
* output/solution_0003.0.vtu
* In a
* parallel setting, several files are typically written per time step. The
* number of files written in parallel depends on the number of MPI processes
- * (see parameter @p mpi_communicator with default value MPI_COMM_WORLD), and a
+ * (see parameter @p mpi_communicator), and a
* specified number of @p n_groups with default value 0. The background is that
* VTU file output supports grouping files from several CPUs into a given
* number of files using MPI I/O when writing on a parallel filesystem. The
const std::string &directory,
const std::string &filename_without_extension,
const unsigned int counter,
+ const MPI_Comm & mpi_communicator,
const unsigned int n_digits_for_counter = numbers::invalid_unsigned_int,
- const MPI_Comm & mpi_communicator = MPI_COMM_WORLD,
const unsigned int n_groups = 0) const;
/**
const std::string &directory,
const std::string &filename_without_extension,
const unsigned int counter,
- const unsigned int n_digits_for_counter,
const MPI_Comm & mpi_communicator,
+ const unsigned int n_digits_for_counter,
const unsigned int n_groups) const
{
const unsigned int rank = Utilities::MPI::this_mpi_process(mpi_communicator);