public:
using VectorType = LinearAlgebra::distributed::Vector<Number>;
+ /**
+ * Default constructor.
+ */
+ MGTwoLevelTransferBase();
+
/**
* Perform prolongation.
*/
const std::shared_ptr<const Utilities::MPI::Partitioner>
&partitioner_coarse,
const std::shared_ptr<const Utilities::MPI::Partitioner> &partitioner_fine,
+ bool &vec_fine_needs_ghost_update,
internal::MatrixFreeFunctions::ConstraintInfo<
dim,
VectorizedArray<Number, width>> &constraint_info_coarse,
std::shared_ptr<const Utilities::MPI::Partitioner> partitioner_fine;
protected:
+ /**
+ * Internal vector on that the actual prolongation/restriction is performed.
+ */
+ mutable LinearAlgebra::distributed::Vector<Number> vec_coarse;
+
/**
* Internal vector needed for collecting all degrees of freedom of the fine
* cells. It is only initialized if the fine-level DoF indices touch DoFs
mutable LinearAlgebra::distributed::Vector<Number> vec_fine;
/**
- * Internal vector on that the actual prolongation/restriction is performed.
+ * Bool indicating whether fine vector has relevant ghost values.
*/
- mutable LinearAlgebra::distributed::Vector<Number> vec_coarse;
+ bool vec_fine_needs_ghost_update;
/**
* Embedded partitioner for efficient communication if locally relevant DoFs
+template <typename Number>
+MGTwoLevelTransferBase<
+ LinearAlgebra::distributed::Vector<Number>>::MGTwoLevelTransferBase()
+ : vec_fine_needs_ghost_update(true)
+{}
+
+
+
template <typename Number>
void
MGTwoLevelTransferBase<LinearAlgebra::distributed::Vector<Number>>::
this->prolongate_and_add_internal(*vec_fine_ptr, *vec_coarse_ptr);
- if (fine_element_is_continuous || use_dst_inplace == false)
+ if (this->vec_fine_needs_ghost_update || use_dst_inplace == false)
this->compress(*vec_fine_ptr, VectorOperation::add);
if (use_dst_inplace == false)
this->vec_fine.copy_locally_owned_data_from(src);
if ((use_src_inplace == false) ||
- (fine_element_is_continuous && (src_ghosts_have_been_set == false)))
+ (vec_fine_needs_ghost_update && (src_ghosts_have_been_set == false)))
this->update_ghost_values(*vec_fine_ptr);
if (use_dst_inplace == false)
this->restrict_and_add_internal(*vec_coarse_ptr, *vec_fine_ptr);
// clean up related to update_ghost_values()
- if (fine_element_is_continuous == false && use_src_inplace == false)
+ if (vec_fine_needs_ghost_update == false && use_src_inplace == false)
this->zero_out_ghost_values(*vec_fine_ptr); // internal vector (DG)
- else if (fine_element_is_continuous && use_src_inplace == false)
+ else if (vec_fine_needs_ghost_update && use_src_inplace == false)
vec_fine_ptr->set_ghost_state(false); // internal vector (CG)
- else if (fine_element_is_continuous && (src_ghosts_have_been_set == false))
+ else if (vec_fine_needs_ghost_update && (src_ghosts_have_been_set == false))
this->zero_out_ghost_values(*vec_fine_ptr); // external vector
this->compress(*vec_coarse_ptr, VectorOperation::add);
if (use_src_inplace == false)
this->vec_fine.copy_locally_owned_data_from(src);
- if ((use_src_inplace == false) ||
- (this->fine_element_is_continuous && (src_ghosts_have_been_set == false)))
+ if ((use_src_inplace == false) || (this->vec_fine_needs_ghost_update &&
+ (src_ghosts_have_been_set == false)))
this->update_ghost_values(*vec_fine_ptr);
*vec_coarse_ptr = 0.0;
const std::shared_ptr<const Utilities::MPI::Partitioner>
&external_partitioner_coarse,
const std::shared_ptr<const Utilities::MPI::Partitioner>
- &external_partitioner_fine,
+ &external_partitioner_fine,
+ bool &vec_fine_needs_ghost_update,
internal::MatrixFreeFunctions::ConstraintInfo<
dim,
VectorizedArray<Number, width>> &constraint_info_coarse,
this->partitioner_coarse = external_partitioner_coarse;
}
+ vec_fine_needs_ghost_update =
+ Utilities::MPI::max(this->partitioner_fine->ghost_indices().n_elements(),
+ this->partitioner_fine->get_mpi_communicator()) != 0;
+
if (this->partitioner_fine->is_globally_compatible(
*external_partitioner_fine))
{
this->internal_enable_inplace_operations_if_possible(
external_partitioner_coarse,
external_partitioner_fine,
+ this->vec_fine_needs_ghost_update,
constraint_info_coarse,
constraint_info_fine.dof_indices);
}
this->internal_enable_inplace_operations_if_possible(
external_partitioner_coarse,
external_partitioner_fine,
+ this->vec_fine_needs_ghost_update,
constraint_info,
this->level_dof_indices_fine);
}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2023 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+
+/**
+ * Test fix for bug (no ghost value update in the case of DG)
+ * exposed in https://github.com/dealii/dealii/pull/16049.
+ */
+
+#include <deal.II/base/partitioner.h>
+
+#include <deal.II/distributed/shared_tria.h>
+
+#include <deal.II/dofs/dof_handler.h>
+
+#include <deal.II/fe/fe_dgq.h>
+
+#include <deal.II/grid/grid_generator.h>
+
+#include <deal.II/multigrid/mg_transfer_global_coarsening.h>
+
+#include "../tests.h"
+
+int
+main(int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+ MPILogInitAll all;
+
+ const int dim = 2;
+ using Number = double;
+
+ // fine mesh
+ parallel::shared::Triangulation<dim> tria_fine(
+ MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ true,
+ parallel::shared::Triangulation<dim>::partition_custom_signal);
+ tria_fine.signals.create.connect([&]() {
+ for (const auto &cell : tria_fine.active_cell_iterators())
+ cell->set_subdomain_id(cell->active_cell_index());
+ });
+ GridGenerator::subdivided_hyper_rectangle(tria_fine,
+ {2, 1},
+ Point<dim>(0, 0),
+ Point<dim>(2, 1));
+
+ DoFHandler<dim> dof_handler_fine(tria_fine);
+ dof_handler_fine.distribute_dofs(FE_DGQ<dim>(1));
+
+ // coarse mesh
+ parallel::shared::Triangulation<dim> tria_coarse(
+ MPI_COMM_WORLD,
+ ::Triangulation<dim>::none,
+ true,
+ parallel::shared::Triangulation<dim>::partition_custom_signal);
+ tria_coarse.signals.create.connect([&]() {
+ for (const auto &cell : tria_coarse.active_cell_iterators())
+ cell->set_subdomain_id(1 - cell->active_cell_index());
+ });
+ GridGenerator::subdivided_hyper_rectangle(tria_coarse,
+ {2, 1},
+ Point<dim>(0, 0),
+ Point<dim>(2, 1));
+
+ DoFHandler<dim> dof_handler_coarse(tria_coarse);
+ dof_handler_coarse.distribute_dofs(FE_DGQ<dim>(1));
+
+ // set up two-level transfer
+ MGTwoLevelTransfer<dim, LinearAlgebra::distributed::Vector<Number>> transfer;
+ transfer.reinit(dof_handler_fine, dof_handler_coarse);
+
+ IndexSet is_all(dof_handler_coarse.n_dofs());
+ is_all.add_range(0, dof_handler_coarse.n_dofs());
+
+ const auto partitioner_coarse = std::make_shared<Utilities::MPI::Partitioner>(
+ dof_handler_coarse.locally_owned_dofs(), is_all, MPI_COMM_WORLD);
+
+ const auto partitioner_fine = std::make_shared<Utilities::MPI::Partitioner>(
+ dof_handler_fine.locally_owned_dofs(), is_all, MPI_COMM_WORLD);
+
+ transfer.enable_inplace_operations_if_possible(partitioner_coarse,
+ partitioner_fine);
+
+ LinearAlgebra::distributed::Vector<Number> vec_fine(partitioner_fine);
+ LinearAlgebra::distributed::Vector<Number> vec_coarse(partitioner_coarse);
+
+ transfer.restrict_and_add(vec_coarse, vec_fine);
+
+ deallog << "OK!" << std::endl;
+}
--- /dev/null
+
+DEAL:0::OK!
+
+DEAL:1::OK!
+