--- /dev/null
+New: implemented p::f::Triangulation::load()/save() for use with p::d::SolutionTransfer.
+<br>
+(Pasquale Claudio Africa, Peter Munch, 2021/01/18)
* must be empty before calling this function.
*
* You need to load with the same number of MPI processes that
- * you saved with. Cell-based data that was saved with
- * register_data_attach() can be read in with notify_ready_to_unpack()
- * after calling load().
+ * you saved with, hence autopartition is disabled.
+ *
+ * Cell-based data that was saved with register_data_attach() can be read
+ * in with notify_ready_to_unpack() after calling load().
*/
virtual void
load(const std::string &filename,
- const bool autopartition = true) override;
+ const bool autopartition = false) override;
private:
virtual unsigned int
void
Triangulation<dim, spacedim>::update_cell_relations()
{
- AssertThrow(false, ExcNotImplemented());
+ // Reorganize memory for local_cell_relations.
+ this->local_cell_relations.resize(this->n_locally_owned_active_cells());
+ this->local_cell_relations.shrink_to_fit();
+
+ unsigned int cell_id = 0;
+
+ for (auto cell = this->begin_active(); cell != this->end(); ++cell)
+ {
+ if (!cell->is_locally_owned())
+ continue;
+
+ this->local_cell_relations[cell_id] =
+ std::make_tuple(Triangulation<dim, spacedim>::CELL_PERSIST, cell);
+ ++cell_id;
+ }
}
+
+
template <int dim, int spacedim>
void
Triangulation<dim, spacedim>::save(const std::string &filename) const
{
+#ifdef DEAL_II_WITH_MPI
+ AssertThrow(this->cell_attached_data.pack_callbacks_variable.size() == 0,
+ ExcNotImplemented());
+
+ Assert(
+ this->cell_attached_data.n_attached_deserialize == 0,
+ ExcMessage(
+ "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
+ Assert(this->n_cells() > 0,
+ ExcMessage("Can not save() an empty Triangulation."));
+
+ const int myrank =
+ Utilities::MPI::this_mpi_process(this->mpi_communicator);
+ const int mpisize =
+ Utilities::MPI::n_mpi_processes(this->mpi_communicator);
+
+ // Compute global offset for each rank.
+ unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
+
+ unsigned int global_first_cell = 0;
+
+ int ierr = MPI_Exscan(&n_locally_owned_cells,
+ &global_first_cell,
+ 1,
+ MPI_UNSIGNED,
+ MPI_SUM,
+ this->mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ global_first_cell *= sizeof(unsigned int);
+
+
+ if (myrank == 0)
+ {
+ std::string fname = std::string(filename) + ".info";
+ std::ofstream f(fname.c_str());
+ f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells"
+ << std::endl
+ << 4 << " "
+ << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " "
+ << this->cell_attached_data.pack_callbacks_fixed.size() << " "
+ << this->cell_attached_data.pack_callbacks_variable.size() << " "
+ << this->n_global_active_cells() << std::endl;
+ }
+
+ // Save cell attached data.
+ this->save_attached_data(global_first_cell,
+ this->n_global_active_cells(),
+ filename);
+
+ // Save triangulation description.
+ {
+ MPI_Info info;
+ int ierr = MPI_Info_create(&info);
+ AssertThrowMPI(ierr);
+
+ const std::string fname_tria = filename + "_triangulation.data";
+
+ // Open file.
+ MPI_File fh;
+ ierr = MPI_File_open(this->mpi_communicator,
+ DEAL_II_MPI_CONST_CAST(fname_tria.c_str()),
+ MPI_MODE_CREATE | MPI_MODE_WRONLY,
+ info,
+ &fh);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_File_set_size(fh, 0); // delete the file contents
+ AssertThrowMPI(ierr);
+ // this barrier is necessary, because otherwise others might already
+ // write while one core is still setting the size to zero.
+ ierr = MPI_Barrier(this->mpi_communicator);
+ AssertThrowMPI(ierr);
+ ierr = MPI_Info_free(&info);
+ AssertThrowMPI(ierr);
+ // ------------------
+
+ // Create construction data.
+ const auto construction_data = TriangulationDescription::Utilities::
+ create_description_from_triangulation(*this,
+ this->mpi_communicator,
+ this->settings);
+
+ // Pack.
+ std::vector<char> buffer;
+ dealii::Utilities::pack(construction_data, buffer, false);
+
+ // Write offsets to file.
+ unsigned int buffer_size = buffer.size();
+
+ unsigned int offset = 0;
+
+ ierr = MPI_Exscan(&buffer_size,
+ &offset,
+ 1,
+ MPI_UNSIGNED,
+ MPI_SUM,
+ this->mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ // Write offsets to file.
+ ierr = MPI_File_write_at(fh,
+ myrank * sizeof(unsigned int),
+ DEAL_II_MPI_CONST_CAST(&buffer_size),
+ 1,
+ MPI_UNSIGNED,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ // Write buffers to file.
+ ierr = MPI_File_write_at(fh,
+ mpisize * sizeof(unsigned int) +
+ offset, // global position in file
+ DEAL_II_MPI_CONST_CAST(buffer.data()),
+ buffer.size(), // local buffer
+ MPI_CHAR,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_File_close(&fh);
+ AssertThrowMPI(ierr);
+ }
+#else
(void)filename;
- AssertThrow(false, ExcNotImplemented());
+ AssertThrow(false, ExcNeedsMPI());
+#endif
}
Triangulation<dim, spacedim>::load(const std::string &filename,
const bool autopartition)
{
+#ifdef DEAL_II_WITH_MPI
+ AssertThrow(
+ autopartition == false,
+ ExcMessage(
+ "load() only works if run with the same number of MPI processes used for saving the triangulation, hence autopartition is disabled."));
+
+ Assert(this->n_cells() == 0,
+ ExcMessage("load() only works if the Triangulation is empty!"));
+
+ // Compute global offset for each rank.
+ unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells();
+
+ unsigned int global_first_cell = 0;
+
+ int ierr = MPI_Exscan(&n_locally_owned_cells,
+ &global_first_cell,
+ 1,
+ MPI_UNSIGNED,
+ MPI_SUM,
+ this->mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ global_first_cell *= sizeof(unsigned int);
+
+
+ unsigned int version, numcpus, attached_count_fixed,
+ attached_count_variable, n_global_active_cells;
+ {
+ std::string fname = std::string(filename) + ".info";
+ std::ifstream f(fname.c_str());
+ AssertThrow(f, ExcIO());
+ std::string firstline;
+ getline(f, firstline); // skip first line
+ f >> version >> numcpus >> attached_count_fixed >>
+ attached_count_variable >> n_global_active_cells;
+ }
+
+ AssertThrow(version == 4,
+ ExcMessage("Incompatible version found in .info file."));
+ Assert(this->n_global_active_cells() == n_global_active_cells,
+ ExcMessage("Number of global active cells differ!"));
+
+ // Load description and construct the triangulation.
+ {
+ const int myrank =
+ Utilities::MPI::this_mpi_process(this->mpi_communicator);
+ const int mpisize =
+ Utilities::MPI::n_mpi_processes(this->mpi_communicator);
+
+ AssertDimension(numcpus, mpisize);
+
+ // Open file.
+ MPI_Info info;
+ int ierr = MPI_Info_create(&info);
+ AssertThrowMPI(ierr);
+
+ const std::string fname_tria = filename + "_triangulation.data";
+
+ MPI_File fh;
+ ierr = MPI_File_open(this->mpi_communicator,
+ DEAL_II_MPI_CONST_CAST(fname_tria.c_str()),
+ MPI_MODE_RDONLY,
+ info,
+ &fh);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_Info_free(&info);
+ AssertThrowMPI(ierr);
+
+ // Read offsets from file.
+ unsigned int buffer_size;
+
+ ierr = MPI_File_read_at(fh,
+ myrank * sizeof(unsigned int),
+ DEAL_II_MPI_CONST_CAST(&buffer_size),
+ 1,
+ MPI_UNSIGNED,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ unsigned int offset = 0;
+
+ ierr = MPI_Exscan(&buffer_size,
+ &offset,
+ 1,
+ MPI_UNSIGNED,
+ MPI_SUM,
+ this->mpi_communicator);
+ AssertThrowMPI(ierr);
+
+ // Read buffers from file.
+ std::vector<char> buffer(buffer_size);
+ ierr = MPI_File_read_at(fh,
+ mpisize * sizeof(unsigned int) +
+ offset, // global position in file
+ DEAL_II_MPI_CONST_CAST(buffer.data()),
+ buffer.size(), // local buffer
+ MPI_CHAR,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+
+ ierr = MPI_File_close(&fh);
+ AssertThrowMPI(ierr);
+
+ auto construction_data = dealii::Utilities::template unpack<
+ TriangulationDescription::Description<dim, spacedim>>(buffer, false);
+
+ // WARNING: serialization cannot handle the MPI communicator
+ // which is the reason why we have to set it here explicitly
+ construction_data.comm = this->mpi_communicator;
+
+ this->create_triangulation(construction_data);
+ }
+
+ // clear all of the callback data, as explained in the documentation of
+ // register_data_attach()
+ this->cell_attached_data.n_attached_data_sets = 0;
+ this->cell_attached_data.n_attached_deserialize =
+ attached_count_fixed + attached_count_variable;
+
+ // Load attached cell data, if any was stored.
+ this->load_attached_data(global_first_cell,
+ this->n_global_active_cells(),
+ this->n_locally_owned_active_cells(),
+ filename,
+ attached_count_fixed,
+ attached_count_variable);
+
+ this->update_cell_relations();
+ this->update_periodic_face_map();
+ this->update_number_cache();
+#else
(void)filename;
(void)autopartition;
- AssertThrow(false, ExcNotImplemented());
+ AssertThrow(false, ExcNeedsMPI());
+#endif
}
*
* Given that the elements of @p dof_values are stored in consecutive
* locations, we can just memcpy them. Since floating point values don't
- * compress well, we also forgo the compression the default
+ * compress well, we also waive the compression that the default
* Utilities::pack() and Utilities::unpack() functions offer.
*/
template <typename value_type>
, handle(numbers::invalid_unsigned_int)
{
Assert(
- (dynamic_cast<const parallel::distributed::
- Triangulation<dim, DoFHandlerType::space_dimension> *>(
+ (dynamic_cast<const parallel::DistributedTriangulationBase<
+ dim,
+ DoFHandlerType::space_dimension> *>(
&dof_handler->get_triangulation()) != nullptr),
ExcMessage(
"parallel::distributed::SolutionTransfer requires a parallel::distributed::Triangulation object."));
SolutionTransfer<dim, VectorType, DoFHandlerType>::register_data_attach()
{
// TODO: casting away constness is bad
- parallel::distributed::Triangulation<dim, DoFHandlerType::space_dimension>
- *tria = (dynamic_cast<parallel::distributed::Triangulation<
- dim,
- DoFHandlerType::space_dimension> *>(
- const_cast<dealii::Triangulation<dim, DoFHandlerType::space_dimension>
- *>(&dof_handler->get_triangulation())));
+ auto *tria = (dynamic_cast<parallel::DistributedTriangulationBase<
+ dim,
+ DoFHandlerType::space_dimension> *>(
+ const_cast<dealii::Triangulation<dim, DoFHandlerType::space_dimension>
+ *>(&dof_handler->get_triangulation())));
Assert(tria != nullptr, ExcInternalError());
handle = tria->register_data_attach(
ExcDimensionMismatch(input_vectors.size(), all_out.size()));
// TODO: casting away constness is bad
- parallel::distributed::Triangulation<dim, DoFHandlerType::space_dimension>
- *tria = (dynamic_cast<parallel::distributed::Triangulation<
- dim,
- DoFHandlerType::space_dimension> *>(
- const_cast<dealii::Triangulation<dim, DoFHandlerType::space_dimension>
- *>(&dof_handler->get_triangulation())));
+ auto *tria = (dynamic_cast<parallel::DistributedTriangulationBase<
+ dim,
+ DoFHandlerType::space_dimension> *>(
+ const_cast<dealii::Triangulation<dim, DoFHandlerType::space_dimension>
+ *>(&dof_handler->get_triangulation())));
Assert(tria != nullptr, ExcInternalError());
tria->notify_ready_to_unpack(
Assert(
this->cell_attached_data.n_attached_deserialize == 0,
ExcMessage(
- "not all SolutionTransfer's got deserialized after the last load()"));
+ "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
Assert(this->n_cells() > 0,
ExcMessage("Can not save() an empty Triangulation."));
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2021 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Test fullydistributed::Triangulation::load()/save().
+// Create a triangulation, save it and load it.
+// The initial and the loaded triangulations must be the same.
+
+#include <deal.II/distributed/fully_distributed_tria.h>
+#include <deal.II/distributed/tria.h>
+
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria_description.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+
+#include "./tests.h"
+
+using namespace dealii;
+
+template <int dim, typename TriangulationType>
+void
+test(TriangulationType &triangulation)
+{
+ const std::string filename = "save_load_" + std::to_string(dim) + "d_out";
+
+ triangulation.save(filename);
+ deallog.push("save");
+ print_statistics(triangulation, false);
+ deallog.pop();
+
+ triangulation.clear();
+
+ triangulation.load(filename);
+ deallog.push("load");
+ print_statistics(triangulation, false);
+ deallog.pop();
+}
+
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll all;
+
+ deallog.push("2d");
+ {
+ constexpr int dim = 2;
+
+ parallel::distributed::Triangulation<dim> triangulation(MPI_COMM_WORLD);
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global(3);
+
+ const auto description = TriangulationDescription::Utilities::
+ create_description_from_triangulation(triangulation, MPI_COMM_WORLD);
+
+ parallel::fullydistributed::Triangulation<dim> triangulation_pft(
+ MPI_COMM_WORLD);
+ triangulation_pft.create_triangulation(description);
+
+ test<dim>(triangulation_pft);
+ }
+ deallog.pop();
+
+ deallog.push("3d");
+ {
+ constexpr int dim = 3;
+
+ parallel::distributed::Triangulation<dim> triangulation(MPI_COMM_WORLD);
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global(3);
+
+ const auto description = TriangulationDescription::Utilities::
+ create_description_from_triangulation(triangulation, MPI_COMM_WORLD);
+
+ parallel::fullydistributed::Triangulation<dim> triangulation_pft(
+ MPI_COMM_WORLD);
+ triangulation_pft.create_triangulation(description);
+
+ test<dim>(triangulation_pft);
+ }
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:0:2d:save::n_levels: 4
+DEAL:0:2d:save::n_cells: 85
+DEAL:0:2d:save::n_active_cells: 64
+DEAL:0:2d:save::
+DEAL:0:2d:load::n_levels: 4
+DEAL:0:2d:load::n_cells: 85
+DEAL:0:2d:load::n_active_cells: 64
+DEAL:0:2d:load::
+DEAL:0:3d:save::n_levels: 4
+DEAL:0:3d:save::n_cells: 585
+DEAL:0:3d:save::n_active_cells: 512
+DEAL:0:3d:save::
+DEAL:0:3d:load::n_levels: 4
+DEAL:0:3d:load::n_cells: 585
+DEAL:0:3d:load::n_active_cells: 512
+DEAL:0:3d:load::
--- /dev/null
+
+DEAL:0:2d:save::n_levels: 4
+DEAL:0:2d:save::n_cells: 57
+DEAL:0:2d:save::n_active_cells: 43
+DEAL:0:2d:save::
+DEAL:0:2d:load::n_levels: 4
+DEAL:0:2d:load::n_cells: 57
+DEAL:0:2d:load::n_active_cells: 43
+DEAL:0:2d:load::
+DEAL:0:3d:save::n_levels: 4
+DEAL:0:3d:save::n_cells: 361
+DEAL:0:3d:save::n_active_cells: 316
+DEAL:0:3d:save::
+DEAL:0:3d:load::n_levels: 4
+DEAL:0:3d:load::n_cells: 361
+DEAL:0:3d:load::n_active_cells: 316
+DEAL:0:3d:load::
+
+DEAL:1:2d:save::n_levels: 4
+DEAL:1:2d:save::n_cells: 57
+DEAL:1:2d:save::n_active_cells: 43
+DEAL:1:2d:save::
+DEAL:1:2d:load::n_levels: 4
+DEAL:1:2d:load::n_cells: 57
+DEAL:1:2d:load::n_active_cells: 43
+DEAL:1:2d:load::
+DEAL:1:3d:save::n_levels: 4
+DEAL:1:3d:save::n_cells: 361
+DEAL:1:3d:save::n_active_cells: 316
+DEAL:1:3d:save::
+DEAL:1:3d:load::n_levels: 4
+DEAL:1:3d:load::n_cells: 361
+DEAL:1:3d:load::n_active_cells: 316
+DEAL:1:3d:load::
+
+
+DEAL:2:2d:save::n_levels: 4
+DEAL:2:2d:save::n_cells: 57
+DEAL:2:2d:save::n_active_cells: 43
+DEAL:2:2d:save::
+DEAL:2:2d:load::n_levels: 4
+DEAL:2:2d:load::n_cells: 57
+DEAL:2:2d:load::n_active_cells: 43
+DEAL:2:2d:load::
+DEAL:2:3d:save::n_levels: 4
+DEAL:2:3d:save::n_cells: 361
+DEAL:2:3d:save::n_active_cells: 316
+DEAL:2:3d:save::
+DEAL:2:3d:load::n_levels: 4
+DEAL:2:3d:load::n_cells: 361
+DEAL:2:3d:load::n_active_cells: 316
+DEAL:2:3d:load::
+
+
+DEAL:3:2d:save::n_levels: 4
+DEAL:3:2d:save::n_cells: 57
+DEAL:3:2d:save::n_active_cells: 43
+DEAL:3:2d:save::
+DEAL:3:2d:load::n_levels: 4
+DEAL:3:2d:load::n_cells: 57
+DEAL:3:2d:load::n_active_cells: 43
+DEAL:3:2d:load::
+DEAL:3:3d:save::n_levels: 4
+DEAL:3:3d:save::n_cells: 361
+DEAL:3:3d:save::n_active_cells: 316
+DEAL:3:3d:save::
+DEAL:3:3d:load::n_levels: 4
+DEAL:3:3d:load::n_cells: 361
+DEAL:3:3d:load::n_active_cells: 316
+DEAL:3:3d:load::
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2008 - 2021 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Test distributed solution transfer with fullydistributed triangulations.
+
+#include <deal.II/distributed/fully_distributed_tria.h>
+#include <deal.II/distributed/solution_transfer.h>
+
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+
+#include <deal.II/fe/fe_q.h>
+
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/tria_description.h>
+
+#include <deal.II/lac/la_parallel_vector.h>
+
+#include <deal.II/numerics/vector_tools.h>
+
+#include "../tests.h"
+
+using namespace dealii;
+
+template <int dim>
+class InterpolationFunction : public Function<dim>
+{
+public:
+ InterpolationFunction()
+ : Function<dim>(1)
+ {}
+
+ virtual double
+ value(const Point<dim> &p, const unsigned int component = 0) const
+ {
+ return p.norm();
+ }
+};
+
+template <int dim, typename TriangulationType>
+void
+test(TriangulationType &triangulation)
+{
+ DoFHandler<dim> dof_handler(triangulation);
+ dof_handler.distribute_dofs(FE_Q<dim>(2));
+
+ IndexSet locally_relevant_dofs;
+ DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
+
+ using VectorType = LinearAlgebra::distributed::Vector<double>;
+
+ std::shared_ptr<Utilities::MPI::Partitioner> partitioner =
+ std::make_shared<Utilities::MPI::Partitioner>(
+ dof_handler.locally_owned_dofs(), locally_relevant_dofs, MPI_COMM_WORLD);
+
+ VectorType vector(partitioner);
+
+ VectorTools::interpolate(dof_handler, InterpolationFunction<dim>(), vector);
+ vector.update_ghost_values();
+
+ VectorType vector_loaded(partitioner);
+
+ const std::string filename =
+ "solution_transfer_" + std::to_string(dim) + "d_out";
+
+ {
+ parallel::distributed::SolutionTransfer<dim, VectorType> solution_transfer(
+ dof_handler);
+ solution_transfer.prepare_for_serialization(vector);
+
+ triangulation.save(filename);
+ }
+
+ triangulation.clear();
+
+ {
+ triangulation.load(filename);
+
+ parallel::distributed::SolutionTransfer<dim, VectorType> solution_transfer(
+ dof_handler);
+ solution_transfer.deserialize(vector_loaded);
+
+ vector_loaded.update_ghost_values();
+ }
+
+ // Verify that error is 0.
+ VectorType error(vector);
+ error.add(-1, vector_loaded);
+
+ deallog << (error.linfty_norm() < 1e-16 ? "PASSED" : "FAILED") << std::endl;
+}
+
+
+int
+main(int argc, char **argv)
+{
+ initlog();
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ deallog.push("2d");
+ {
+ constexpr int dim = 2;
+
+ parallel::distributed::Triangulation<dim> triangulation(MPI_COMM_WORLD);
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global(3);
+
+ const auto description = TriangulationDescription::Utilities::
+ create_description_from_triangulation(triangulation, MPI_COMM_WORLD);
+
+ parallel::fullydistributed::Triangulation<dim> triangulation_pft(
+ MPI_COMM_WORLD);
+ triangulation_pft.create_triangulation(description);
+
+ test<dim>(triangulation_pft);
+ }
+ deallog.pop();
+
+ deallog.push("3d");
+ {
+ constexpr int dim = 3;
+
+ parallel::distributed::Triangulation<dim> triangulation(MPI_COMM_WORLD);
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global(3);
+
+ const auto description = TriangulationDescription::Utilities::
+ create_description_from_triangulation(triangulation, MPI_COMM_WORLD);
+
+ parallel::fullydistributed::Triangulation<dim> triangulation_pft(
+ MPI_COMM_WORLD);
+ triangulation_pft.create_triangulation(description);
+
+ test<dim>(triangulation_pft);
+ }
+ deallog.pop();
+}
--- /dev/null
+
+DEAL:2d::PASSED
+DEAL:3d::PASSED
--- /dev/null
+
+DEAL:2d::PASSED
+DEAL:3d::PASSED