From bdd93f06d5293228f2142b900337e3db73990367 Mon Sep 17 00:00:00 2001 From: Pasquale Africa Date: Mon, 18 Jan 2021 19:06:37 +0000 Subject: [PATCH] Implement p::f::T::load()/save() --- .../changes/minor/20210118PasqualeAfrica-2 | 3 + .../distributed/fully_distributed_tria.h | 9 +- source/distributed/fully_distributed_tria.cc | 279 +++++++++++++++++- source/distributed/solution_transfer.cc | 29 +- source/distributed/tria.cc | 2 +- tests/fullydistributed_grids/save_load_01.cc | 103 +++++++ .../save_load_01.mpirun=1.output | 17 ++ .../save_load_01.mpirun=4.output | 71 +++++ .../solution_transfer_01.cc | 152 ++++++++++ .../solution_transfer_01.mpirun=1.output | 3 + .../solution_transfer_01.mpirun=4.output | 3 + 11 files changed, 648 insertions(+), 23 deletions(-) create mode 100644 doc/news/changes/minor/20210118PasqualeAfrica-2 create mode 100644 tests/fullydistributed_grids/save_load_01.cc create mode 100644 tests/fullydistributed_grids/save_load_01.mpirun=1.output create mode 100644 tests/fullydistributed_grids/save_load_01.mpirun=4.output create mode 100644 tests/fullydistributed_grids/solution_transfer_01.cc create mode 100644 tests/fullydistributed_grids/solution_transfer_01.mpirun=1.output create mode 100644 tests/fullydistributed_grids/solution_transfer_01.mpirun=4.output diff --git a/doc/news/changes/minor/20210118PasqualeAfrica-2 b/doc/news/changes/minor/20210118PasqualeAfrica-2 new file mode 100644 index 0000000000..0885f1162e --- /dev/null +++ b/doc/news/changes/minor/20210118PasqualeAfrica-2 @@ -0,0 +1,3 @@ +New: implemented p::f::Triangulation::load()/save() for use with p::d::SolutionTransfer. +
+(Pasquale Claudio Africa, Peter Munch, 2021/01/18) diff --git a/include/deal.II/distributed/fully_distributed_tria.h b/include/deal.II/distributed/fully_distributed_tria.h index b7d8d1acda..9f2ad34d6c 100644 --- a/include/deal.II/distributed/fully_distributed_tria.h +++ b/include/deal.II/distributed/fully_distributed_tria.h @@ -246,13 +246,14 @@ namespace parallel * must be empty before calling this function. * * You need to load with the same number of MPI processes that - * you saved with. Cell-based data that was saved with - * register_data_attach() can be read in with notify_ready_to_unpack() - * after calling load(). + * you saved with, hence autopartition is disabled. + * + * Cell-based data that was saved with register_data_attach() can be read + * in with notify_ready_to_unpack() after calling load(). */ virtual void load(const std::string &filename, - const bool autopartition = true) override; + const bool autopartition = false) override; private: virtual unsigned int diff --git a/source/distributed/fully_distributed_tria.cc b/source/distributed/fully_distributed_tria.cc index 7922563df6..e01ea7cd48 100644 --- a/source/distributed/fully_distributed_tria.cc +++ b/source/distributed/fully_distributed_tria.cc @@ -407,16 +407,156 @@ namespace parallel void Triangulation::update_cell_relations() { - AssertThrow(false, ExcNotImplemented()); + // Reorganize memory for local_cell_relations. + this->local_cell_relations.resize(this->n_locally_owned_active_cells()); + this->local_cell_relations.shrink_to_fit(); + + unsigned int cell_id = 0; + + for (auto cell = this->begin_active(); cell != this->end(); ++cell) + { + if (!cell->is_locally_owned()) + continue; + + this->local_cell_relations[cell_id] = + std::make_tuple(Triangulation::CELL_PERSIST, cell); + ++cell_id; + } } + + template void Triangulation::save(const std::string &filename) const { +#ifdef DEAL_II_WITH_MPI + AssertThrow(this->cell_attached_data.pack_callbacks_variable.size() == 0, + ExcNotImplemented()); + + Assert( + this->cell_attached_data.n_attached_deserialize == 0, + ExcMessage( + "Not all SolutionTransfer objects have been deserialized after the last call to load().")); + Assert(this->n_cells() > 0, + ExcMessage("Can not save() an empty Triangulation.")); + + const int myrank = + Utilities::MPI::this_mpi_process(this->mpi_communicator); + const int mpisize = + Utilities::MPI::n_mpi_processes(this->mpi_communicator); + + // Compute global offset for each rank. + unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells(); + + unsigned int global_first_cell = 0; + + int ierr = MPI_Exscan(&n_locally_owned_cells, + &global_first_cell, + 1, + MPI_UNSIGNED, + MPI_SUM, + this->mpi_communicator); + AssertThrowMPI(ierr); + + global_first_cell *= sizeof(unsigned int); + + + if (myrank == 0) + { + std::string fname = std::string(filename) + ".info"; + std::ofstream f(fname.c_str()); + f << "version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells" + << std::endl + << 4 << " " + << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " " + << this->cell_attached_data.pack_callbacks_fixed.size() << " " + << this->cell_attached_data.pack_callbacks_variable.size() << " " + << this->n_global_active_cells() << std::endl; + } + + // Save cell attached data. + this->save_attached_data(global_first_cell, + this->n_global_active_cells(), + filename); + + // Save triangulation description. + { + MPI_Info info; + int ierr = MPI_Info_create(&info); + AssertThrowMPI(ierr); + + const std::string fname_tria = filename + "_triangulation.data"; + + // Open file. + MPI_File fh; + ierr = MPI_File_open(this->mpi_communicator, + DEAL_II_MPI_CONST_CAST(fname_tria.c_str()), + MPI_MODE_CREATE | MPI_MODE_WRONLY, + info, + &fh); + AssertThrowMPI(ierr); + + ierr = MPI_File_set_size(fh, 0); // delete the file contents + AssertThrowMPI(ierr); + // this barrier is necessary, because otherwise others might already + // write while one core is still setting the size to zero. + ierr = MPI_Barrier(this->mpi_communicator); + AssertThrowMPI(ierr); + ierr = MPI_Info_free(&info); + AssertThrowMPI(ierr); + // ------------------ + + // Create construction data. + const auto construction_data = TriangulationDescription::Utilities:: + create_description_from_triangulation(*this, + this->mpi_communicator, + this->settings); + + // Pack. + std::vector buffer; + dealii::Utilities::pack(construction_data, buffer, false); + + // Write offsets to file. + unsigned int buffer_size = buffer.size(); + + unsigned int offset = 0; + + ierr = MPI_Exscan(&buffer_size, + &offset, + 1, + MPI_UNSIGNED, + MPI_SUM, + this->mpi_communicator); + AssertThrowMPI(ierr); + + // Write offsets to file. + ierr = MPI_File_write_at(fh, + myrank * sizeof(unsigned int), + DEAL_II_MPI_CONST_CAST(&buffer_size), + 1, + MPI_UNSIGNED, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + // Write buffers to file. + ierr = MPI_File_write_at(fh, + mpisize * sizeof(unsigned int) + + offset, // global position in file + DEAL_II_MPI_CONST_CAST(buffer.data()), + buffer.size(), // local buffer + MPI_CHAR, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + ierr = MPI_File_close(&fh); + AssertThrowMPI(ierr); + } +#else (void)filename; - AssertThrow(false, ExcNotImplemented()); + AssertThrow(false, ExcNeedsMPI()); +#endif } @@ -426,10 +566,143 @@ namespace parallel Triangulation::load(const std::string &filename, const bool autopartition) { +#ifdef DEAL_II_WITH_MPI + AssertThrow( + autopartition == false, + ExcMessage( + "load() only works if run with the same number of MPI processes used for saving the triangulation, hence autopartition is disabled.")); + + Assert(this->n_cells() == 0, + ExcMessage("load() only works if the Triangulation is empty!")); + + // Compute global offset for each rank. + unsigned int n_locally_owned_cells = this->n_locally_owned_active_cells(); + + unsigned int global_first_cell = 0; + + int ierr = MPI_Exscan(&n_locally_owned_cells, + &global_first_cell, + 1, + MPI_UNSIGNED, + MPI_SUM, + this->mpi_communicator); + AssertThrowMPI(ierr); + + global_first_cell *= sizeof(unsigned int); + + + unsigned int version, numcpus, attached_count_fixed, + attached_count_variable, n_global_active_cells; + { + std::string fname = std::string(filename) + ".info"; + std::ifstream f(fname.c_str()); + AssertThrow(f, ExcIO()); + std::string firstline; + getline(f, firstline); // skip first line + f >> version >> numcpus >> attached_count_fixed >> + attached_count_variable >> n_global_active_cells; + } + + AssertThrow(version == 4, + ExcMessage("Incompatible version found in .info file.")); + Assert(this->n_global_active_cells() == n_global_active_cells, + ExcMessage("Number of global active cells differ!")); + + // Load description and construct the triangulation. + { + const int myrank = + Utilities::MPI::this_mpi_process(this->mpi_communicator); + const int mpisize = + Utilities::MPI::n_mpi_processes(this->mpi_communicator); + + AssertDimension(numcpus, mpisize); + + // Open file. + MPI_Info info; + int ierr = MPI_Info_create(&info); + AssertThrowMPI(ierr); + + const std::string fname_tria = filename + "_triangulation.data"; + + MPI_File fh; + ierr = MPI_File_open(this->mpi_communicator, + DEAL_II_MPI_CONST_CAST(fname_tria.c_str()), + MPI_MODE_RDONLY, + info, + &fh); + AssertThrowMPI(ierr); + + ierr = MPI_Info_free(&info); + AssertThrowMPI(ierr); + + // Read offsets from file. + unsigned int buffer_size; + + ierr = MPI_File_read_at(fh, + myrank * sizeof(unsigned int), + DEAL_II_MPI_CONST_CAST(&buffer_size), + 1, + MPI_UNSIGNED, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + unsigned int offset = 0; + + ierr = MPI_Exscan(&buffer_size, + &offset, + 1, + MPI_UNSIGNED, + MPI_SUM, + this->mpi_communicator); + AssertThrowMPI(ierr); + + // Read buffers from file. + std::vector buffer(buffer_size); + ierr = MPI_File_read_at(fh, + mpisize * sizeof(unsigned int) + + offset, // global position in file + DEAL_II_MPI_CONST_CAST(buffer.data()), + buffer.size(), // local buffer + MPI_CHAR, + MPI_STATUS_IGNORE); + AssertThrowMPI(ierr); + + ierr = MPI_File_close(&fh); + AssertThrowMPI(ierr); + + auto construction_data = dealii::Utilities::template unpack< + TriangulationDescription::Description>(buffer, false); + + // WARNING: serialization cannot handle the MPI communicator + // which is the reason why we have to set it here explicitly + construction_data.comm = this->mpi_communicator; + + this->create_triangulation(construction_data); + } + + // clear all of the callback data, as explained in the documentation of + // register_data_attach() + this->cell_attached_data.n_attached_data_sets = 0; + this->cell_attached_data.n_attached_deserialize = + attached_count_fixed + attached_count_variable; + + // Load attached cell data, if any was stored. + this->load_attached_data(global_first_cell, + this->n_global_active_cells(), + this->n_locally_owned_active_cells(), + filename, + attached_count_fixed, + attached_count_variable); + + this->update_cell_relations(); + this->update_periodic_face_map(); + this->update_number_cache(); +#else (void)filename; (void)autopartition; - AssertThrow(false, ExcNotImplemented()); + AssertThrow(false, ExcNeedsMPI()); +#endif } diff --git a/source/distributed/solution_transfer.cc b/source/distributed/solution_transfer.cc index 779cbe1620..6f8ac3e08f 100644 --- a/source/distributed/solution_transfer.cc +++ b/source/distributed/solution_transfer.cc @@ -52,7 +52,7 @@ namespace * * Given that the elements of @p dof_values are stored in consecutive * locations, we can just memcpy them. Since floating point values don't - * compress well, we also forgo the compression the default + * compress well, we also waive the compression that the default * Utilities::pack() and Utilities::unpack() functions offer. */ template @@ -121,8 +121,9 @@ namespace parallel , handle(numbers::invalid_unsigned_int) { Assert( - (dynamic_cast *>( + (dynamic_cast *>( &dof_handler->get_triangulation()) != nullptr), ExcMessage( "parallel::distributed::SolutionTransfer requires a parallel::distributed::Triangulation object.")); @@ -147,12 +148,11 @@ namespace parallel SolutionTransfer::register_data_attach() { // TODO: casting away constness is bad - parallel::distributed::Triangulation - *tria = (dynamic_cast *>( - const_cast - *>(&dof_handler->get_triangulation()))); + auto *tria = (dynamic_cast *>( + const_cast + *>(&dof_handler->get_triangulation()))); Assert(tria != nullptr, ExcInternalError()); handle = tria->register_data_attach( @@ -232,12 +232,11 @@ namespace parallel ExcDimensionMismatch(input_vectors.size(), all_out.size())); // TODO: casting away constness is bad - parallel::distributed::Triangulation - *tria = (dynamic_cast *>( - const_cast - *>(&dof_handler->get_triangulation()))); + auto *tria = (dynamic_cast *>( + const_cast + *>(&dof_handler->get_triangulation()))); Assert(tria != nullptr, ExcInternalError()); tria->notify_ready_to_unpack( diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index c8b4e24eee..b0d447cf9f 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1427,7 +1427,7 @@ namespace parallel Assert( this->cell_attached_data.n_attached_deserialize == 0, ExcMessage( - "not all SolutionTransfer's got deserialized after the last load()")); + "Not all SolutionTransfer objects have been deserialized after the last call to load().")); Assert(this->n_cells() > 0, ExcMessage("Can not save() an empty Triangulation.")); diff --git a/tests/fullydistributed_grids/save_load_01.cc b/tests/fullydistributed_grids/save_load_01.cc new file mode 100644 index 0000000000..c47123c6d0 --- /dev/null +++ b/tests/fullydistributed_grids/save_load_01.cc @@ -0,0 +1,103 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2008 - 2021 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// Test fullydistributed::Triangulation::load()/save(). +// Create a triangulation, save it and load it. +// The initial and the loaded triangulations must be the same. + +#include +#include + +#include +#include + +#include + +#include +#include + +#include + +#include "./tests.h" + +using namespace dealii; + +template +void +test(TriangulationType &triangulation) +{ + const std::string filename = "save_load_" + std::to_string(dim) + "d_out"; + + triangulation.save(filename); + deallog.push("save"); + print_statistics(triangulation, false); + deallog.pop(); + + triangulation.clear(); + + triangulation.load(filename); + deallog.push("load"); + print_statistics(triangulation, false); + deallog.pop(); +} + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + MPILogInitAll all; + + deallog.push("2d"); + { + constexpr int dim = 2; + + parallel::distributed::Triangulation triangulation(MPI_COMM_WORLD); + GridGenerator::hyper_cube(triangulation); + triangulation.refine_global(3); + + const auto description = TriangulationDescription::Utilities:: + create_description_from_triangulation(triangulation, MPI_COMM_WORLD); + + parallel::fullydistributed::Triangulation triangulation_pft( + MPI_COMM_WORLD); + triangulation_pft.create_triangulation(description); + + test(triangulation_pft); + } + deallog.pop(); + + deallog.push("3d"); + { + constexpr int dim = 3; + + parallel::distributed::Triangulation triangulation(MPI_COMM_WORLD); + GridGenerator::hyper_cube(triangulation); + triangulation.refine_global(3); + + const auto description = TriangulationDescription::Utilities:: + create_description_from_triangulation(triangulation, MPI_COMM_WORLD); + + parallel::fullydistributed::Triangulation triangulation_pft( + MPI_COMM_WORLD); + triangulation_pft.create_triangulation(description); + + test(triangulation_pft); + } + deallog.pop(); +} diff --git a/tests/fullydistributed_grids/save_load_01.mpirun=1.output b/tests/fullydistributed_grids/save_load_01.mpirun=1.output new file mode 100644 index 0000000000..7629869469 --- /dev/null +++ b/tests/fullydistributed_grids/save_load_01.mpirun=1.output @@ -0,0 +1,17 @@ + +DEAL:0:2d:save::n_levels: 4 +DEAL:0:2d:save::n_cells: 85 +DEAL:0:2d:save::n_active_cells: 64 +DEAL:0:2d:save:: +DEAL:0:2d:load::n_levels: 4 +DEAL:0:2d:load::n_cells: 85 +DEAL:0:2d:load::n_active_cells: 64 +DEAL:0:2d:load:: +DEAL:0:3d:save::n_levels: 4 +DEAL:0:3d:save::n_cells: 585 +DEAL:0:3d:save::n_active_cells: 512 +DEAL:0:3d:save:: +DEAL:0:3d:load::n_levels: 4 +DEAL:0:3d:load::n_cells: 585 +DEAL:0:3d:load::n_active_cells: 512 +DEAL:0:3d:load:: diff --git a/tests/fullydistributed_grids/save_load_01.mpirun=4.output b/tests/fullydistributed_grids/save_load_01.mpirun=4.output new file mode 100644 index 0000000000..48def76790 --- /dev/null +++ b/tests/fullydistributed_grids/save_load_01.mpirun=4.output @@ -0,0 +1,71 @@ + +DEAL:0:2d:save::n_levels: 4 +DEAL:0:2d:save::n_cells: 57 +DEAL:0:2d:save::n_active_cells: 43 +DEAL:0:2d:save:: +DEAL:0:2d:load::n_levels: 4 +DEAL:0:2d:load::n_cells: 57 +DEAL:0:2d:load::n_active_cells: 43 +DEAL:0:2d:load:: +DEAL:0:3d:save::n_levels: 4 +DEAL:0:3d:save::n_cells: 361 +DEAL:0:3d:save::n_active_cells: 316 +DEAL:0:3d:save:: +DEAL:0:3d:load::n_levels: 4 +DEAL:0:3d:load::n_cells: 361 +DEAL:0:3d:load::n_active_cells: 316 +DEAL:0:3d:load:: + +DEAL:1:2d:save::n_levels: 4 +DEAL:1:2d:save::n_cells: 57 +DEAL:1:2d:save::n_active_cells: 43 +DEAL:1:2d:save:: +DEAL:1:2d:load::n_levels: 4 +DEAL:1:2d:load::n_cells: 57 +DEAL:1:2d:load::n_active_cells: 43 +DEAL:1:2d:load:: +DEAL:1:3d:save::n_levels: 4 +DEAL:1:3d:save::n_cells: 361 +DEAL:1:3d:save::n_active_cells: 316 +DEAL:1:3d:save:: +DEAL:1:3d:load::n_levels: 4 +DEAL:1:3d:load::n_cells: 361 +DEAL:1:3d:load::n_active_cells: 316 +DEAL:1:3d:load:: + + +DEAL:2:2d:save::n_levels: 4 +DEAL:2:2d:save::n_cells: 57 +DEAL:2:2d:save::n_active_cells: 43 +DEAL:2:2d:save:: +DEAL:2:2d:load::n_levels: 4 +DEAL:2:2d:load::n_cells: 57 +DEAL:2:2d:load::n_active_cells: 43 +DEAL:2:2d:load:: +DEAL:2:3d:save::n_levels: 4 +DEAL:2:3d:save::n_cells: 361 +DEAL:2:3d:save::n_active_cells: 316 +DEAL:2:3d:save:: +DEAL:2:3d:load::n_levels: 4 +DEAL:2:3d:load::n_cells: 361 +DEAL:2:3d:load::n_active_cells: 316 +DEAL:2:3d:load:: + + +DEAL:3:2d:save::n_levels: 4 +DEAL:3:2d:save::n_cells: 57 +DEAL:3:2d:save::n_active_cells: 43 +DEAL:3:2d:save:: +DEAL:3:2d:load::n_levels: 4 +DEAL:3:2d:load::n_cells: 57 +DEAL:3:2d:load::n_active_cells: 43 +DEAL:3:2d:load:: +DEAL:3:3d:save::n_levels: 4 +DEAL:3:3d:save::n_cells: 361 +DEAL:3:3d:save::n_active_cells: 316 +DEAL:3:3d:save:: +DEAL:3:3d:load::n_levels: 4 +DEAL:3:3d:load::n_cells: 361 +DEAL:3:3d:load::n_active_cells: 316 +DEAL:3:3d:load:: + diff --git a/tests/fullydistributed_grids/solution_transfer_01.cc b/tests/fullydistributed_grids/solution_transfer_01.cc new file mode 100644 index 0000000000..1750f1a1c1 --- /dev/null +++ b/tests/fullydistributed_grids/solution_transfer_01.cc @@ -0,0 +1,152 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2008 - 2021 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// Test distributed solution transfer with fullydistributed triangulations. + +#include +#include + +#include +#include + +#include + +#include +#include + +#include + +#include + +#include "../tests.h" + +using namespace dealii; + +template +class InterpolationFunction : public Function +{ +public: + InterpolationFunction() + : Function(1) + {} + + virtual double + value(const Point &p, const unsigned int component = 0) const + { + return p.norm(); + } +}; + +template +void +test(TriangulationType &triangulation) +{ + DoFHandler dof_handler(triangulation); + dof_handler.distribute_dofs(FE_Q(2)); + + IndexSet locally_relevant_dofs; + DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); + + using VectorType = LinearAlgebra::distributed::Vector; + + std::shared_ptr partitioner = + std::make_shared( + dof_handler.locally_owned_dofs(), locally_relevant_dofs, MPI_COMM_WORLD); + + VectorType vector(partitioner); + + VectorTools::interpolate(dof_handler, InterpolationFunction(), vector); + vector.update_ghost_values(); + + VectorType vector_loaded(partitioner); + + const std::string filename = + "solution_transfer_" + std::to_string(dim) + "d_out"; + + { + parallel::distributed::SolutionTransfer solution_transfer( + dof_handler); + solution_transfer.prepare_for_serialization(vector); + + triangulation.save(filename); + } + + triangulation.clear(); + + { + triangulation.load(filename); + + parallel::distributed::SolutionTransfer solution_transfer( + dof_handler); + solution_transfer.deserialize(vector_loaded); + + vector_loaded.update_ghost_values(); + } + + // Verify that error is 0. + VectorType error(vector); + error.add(-1, vector_loaded); + + deallog << (error.linfty_norm() < 1e-16 ? "PASSED" : "FAILED") << std::endl; +} + + +int +main(int argc, char **argv) +{ + initlog(); + + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + deallog.push("2d"); + { + constexpr int dim = 2; + + parallel::distributed::Triangulation triangulation(MPI_COMM_WORLD); + GridGenerator::hyper_cube(triangulation); + triangulation.refine_global(3); + + const auto description = TriangulationDescription::Utilities:: + create_description_from_triangulation(triangulation, MPI_COMM_WORLD); + + parallel::fullydistributed::Triangulation triangulation_pft( + MPI_COMM_WORLD); + triangulation_pft.create_triangulation(description); + + test(triangulation_pft); + } + deallog.pop(); + + deallog.push("3d"); + { + constexpr int dim = 3; + + parallel::distributed::Triangulation triangulation(MPI_COMM_WORLD); + GridGenerator::hyper_cube(triangulation); + triangulation.refine_global(3); + + const auto description = TriangulationDescription::Utilities:: + create_description_from_triangulation(triangulation, MPI_COMM_WORLD); + + parallel::fullydistributed::Triangulation triangulation_pft( + MPI_COMM_WORLD); + triangulation_pft.create_triangulation(description); + + test(triangulation_pft); + } + deallog.pop(); +} diff --git a/tests/fullydistributed_grids/solution_transfer_01.mpirun=1.output b/tests/fullydistributed_grids/solution_transfer_01.mpirun=1.output new file mode 100644 index 0000000000..7e803b74e4 --- /dev/null +++ b/tests/fullydistributed_grids/solution_transfer_01.mpirun=1.output @@ -0,0 +1,3 @@ + +DEAL:2d::PASSED +DEAL:3d::PASSED diff --git a/tests/fullydistributed_grids/solution_transfer_01.mpirun=4.output b/tests/fullydistributed_grids/solution_transfer_01.mpirun=4.output new file mode 100644 index 0000000000..7e803b74e4 --- /dev/null +++ b/tests/fullydistributed_grids/solution_transfer_01.mpirun=4.output @@ -0,0 +1,3 @@ + +DEAL:2d::PASSED +DEAL:3d::PASSED -- 2.39.5