protected:
+ /**
+ * Internal function to perform transfer of residuals or solutions
+ * basesd on the flag @p solution_transfer.
+ */
+ template <int dim, typename Number2, int spacedim>
+ void
+ copy_to_mg (const DoFHandler<dim,spacedim> &mg_dof,
+ MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &dst,
+ const LinearAlgebra::distributed::Vector<Number2> &src,
+ const bool solution_transfer) const;
+
+
/**
* Internal function to @p fill copy_indices*. Called by derived classes.
*/
std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
copy_indices;
+
+ /**
+ * Same as above, but used to transfer solution vectors.
+ */
+ std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
+ solution_copy_indices;
+
/**
* Additional degrees of freedom for the copy_to_mg() function. These are
* the ones where the global degree of freedom is locally owned and the
std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
copy_indices_global_mine;
+ /**
+ * Same as above, but used to transfer solution vectors.
+ */
+ std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
+ solution_copy_indices_global_mine;
+
/**
* Additional degrees of freedom for the copy_from_mg() function. These are
* the ones where the level degree of freedom is locally owned and the
std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
copy_indices_level_mine;
+ /**
+ * Same as above, but used to transfer solution vectors.
+ */
+ std::vector<std::vector<std::pair<unsigned int, unsigned int> > >
+ solution_copy_indices_level_mine;
+
/**
* Stores whether the copy operation from the global to the level vector is
* actually a plain copy to the finest level. This means that the grid has
*/
mutable LinearAlgebra::distributed::Vector<Number> ghosted_global_vector;
+ /**
+ * Same as above but used when working with solution vectors.
+ */
+ mutable LinearAlgebra::distributed::Vector<Number> solution_ghosted_global_vector;
+
/**
* In the function copy_from_mg, we access all level vectors with certain
* ghost entries for inserting the result into a global vector.
*/
mutable MGLevelObject<LinearAlgebra::distributed::Vector<Number> > ghosted_level_vector;
+
+ /**
+ * Same as above but used when working with solution vectors.
+ */
+ mutable MGLevelObject<LinearAlgebra::distributed::Vector<Number> > solution_ghosted_level_vector;
+
};
MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &dst,
const LinearAlgebra::distributed::Vector<Number2> &src) const
{
+ copy_to_mg (mg_dof_handler,
+ dst,
+ src,
+ false);
+}
+
+
+template <typename Number>
+template <int dim, typename Number2, int spacedim>
+void
+MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::copy_to_mg
+(const DoFHandler<dim,spacedim> &mg_dof_handler,
+ MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &dst,
+ const LinearAlgebra::distributed::Vector<Number2> &src,
+ const bool solution_transfer) const
+{
+ LinearAlgebra::distributed::Vector<Number> &this_ghosted_global_vector =
+ solution_transfer ? solution_ghosted_global_vector : ghosted_global_vector;
+ const std::vector<std::vector<std::pair<unsigned int, unsigned int> > > &this_copy_indices =
+ solution_transfer ? solution_copy_indices : copy_indices;
+ const std::vector<std::vector<std::pair<unsigned int, unsigned int> > > &this_copy_indices_level_mine =
+ solution_transfer ? solution_copy_indices_level_mine : copy_indices_level_mine;
+
AssertIndexRange(dst.max_level(), mg_dof_handler.get_triangulation().n_global_levels());
AssertIndexRange(dst.min_level(), dst.max_level()+1);
reinit_vector(mg_dof_handler, component_to_block_map, dst);
// copy the source vector to the temporary vector that we hold for the
// purpose of data exchange
- ghosted_global_vector = src;
- ghosted_global_vector.update_ghost_values();
+ this_ghosted_global_vector = src;
+ this_ghosted_global_vector.update_ghost_values();
for (unsigned int level=dst.max_level()+1; level != dst.min_level();)
{
LinearAlgebra::distributed::Vector<Number> &dst_level = dst[level];
// first copy local unknowns
- for (dof_pair_iterator i = copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- dst_level.local_element(i->second) = ghosted_global_vector.local_element(i->first);
+ for (dof_pair_iterator i = this_copy_indices[level].begin();
+ i != this_copy_indices[level].end(); ++i)
+ dst_level.local_element(i->second) = this_ghosted_global_vector.local_element(i->first);
// Do the same for the indices where the level index is local, but the
// global index is not
- for (dof_pair_iterator i = copy_indices_level_mine[level].begin();
- i != copy_indices_level_mine[level].end(); ++i)
- dst_level.local_element(i->second) = ghosted_global_vector.local_element(i->first);
+ for (dof_pair_iterator i = this_copy_indices_level_mine[level].begin();
+ i != this_copy_indices_level_mine[level].end(); ++i)
+ dst_level.local_element(i->second) = this_ghosted_global_vector.local_element(i->first);
dst_level.compress(VectorOperation::insert);
}
/**
* Internal function for filling the copy indices from global to level
* indices
+ *
+ * If @p skip_interface_dofs is false, the mapping will also contain
+ * DoFs at the interface between levels. This is desirable when
+ * transfering solution vectors instead of residuals.
*/
template <int dim, int spacedim>
void fill_copy_indices(const dealii::DoFHandler<dim,spacedim> &mg_dof,
const MGConstrainedDoFs *mg_constrained_dofs,
std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > ©_indices,
std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > ©_indices_global_mine,
- std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > ©_indices_level_mine);
+ std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > ©_indices_level_mine,
+ const bool skip_interface_dofs = true);
#include <deal.II/multigrid/mg_constrained_dofs.h>
#include <deal.II/base/mg_level_object.h>
#include <deal.II/multigrid/mg_transfer.h>
+#include <deal.II/multigrid/mg_transfer_internal.h>
#include <deal.II/base/vectorization.h>
#include <deal.II/dofs/dof_handler.h>
LinearAlgebra::distributed::Vector<Number> &dst,
const LinearAlgebra::distributed::Vector<Number> &src) const;
+ /**
+ * Restrict fine-mesh field @p src to each multigrid level in @p mg_dof and
+ * store the result in @p dst.
+ *
+ * If @p dst is empty or has incorrect locally owned size, it will be
+ * resized to locally relevant vectors on each level.
+ */
+ template<typename Number2, int spacedim>
+ void interpolate_to_mg(const DoFHandler< dim, spacedim > &mg_dof,
+ MGLevelObject<LinearAlgebra::distributed::Vector<Number>> &dst,
+ const LinearAlgebra::distributed::Vector<Number2> &src) const;
+
/**
* Finite element does not provide prolongation matrices.
*/
//------------------------ templated functions -------------------------
#ifndef DOXYGEN
+
+template <int dim, typename Number>
+template <typename Number2, int spacedim>
+void
+MGTransferMatrixFree<dim,Number>::
+interpolate_to_mg(const DoFHandler<dim,spacedim> &dof_handler,
+ MGLevelObject<LinearAlgebra::distributed::Vector<Number>> &dst,
+ const LinearAlgebra::distributed::Vector<Number2> &src) const
+{
+ const unsigned int min_level = dst.min_level();
+ const unsigned int max_level = dst.max_level();
+
+ Assert (max_level == dof_handler.get_triangulation().n_global_levels()-1,
+ ExcDimensionMismatch(max_level,dof_handler.get_triangulation().n_global_levels()-1));
+
+ const parallel::Triangulation<dim,spacedim> *p_tria =
+ (dynamic_cast<const parallel::Triangulation<dim,spacedim>*>
+ (&dof_handler.get_triangulation()));
+ MPI_Comm mpi_communicator = p_tria != nullptr ? p_tria->get_communicator() : MPI_COMM_SELF;
+
+ // resize the dst vector if it's empty or has incorrect size
+ MGLevelObject<IndexSet> relevant_dofs(min_level,max_level);
+ for (unsigned int level = min_level; level <= max_level; ++level)
+ {
+ DoFTools::extract_locally_relevant_level_dofs(dof_handler, level,
+ relevant_dofs[level]);
+ if (dst[level].size() != dof_handler.locally_owned_mg_dofs(level).size() ||
+ dst[level].local_size() != dof_handler.locally_owned_mg_dofs(level).n_elements())
+ dst[level].reinit(dof_handler.locally_owned_mg_dofs(level),
+ relevant_dofs[level],
+ mpi_communicator);
+ }
+
+ const FiniteElement<dim,spacedim> &fe = dof_handler.get_fe();
+
+ // copy fine level vector to active cells in MG hierarchy
+ this->copy_to_mg (dof_handler, dst, src, true);
+
+ // FIXME: maybe need to store hanging nodes constraints per level?
+ // MGConstrainedDoFs does NOT keep this info right now, only periodicity constraints...
+ dst[max_level].update_ghost_values();
+ // do the transfer from level to level-1:
+ for (unsigned int level=max_level; level > min_level; --level)
+ {
+ // auxiliary vector which always has ghost elements
+ LinearAlgebra::distributed::Vector<Number>
+ ghosted_vector(dof_handler.locally_owned_mg_dofs(level),
+ relevant_dofs[level],
+ mpi_communicator);
+ ghosted_vector = dst[level];
+ ghosted_vector.update_ghost_values();
+
+ std::vector<Number> dof_values_coarse(fe.dofs_per_cell);
+ Vector<Number> dof_values_fine(fe.dofs_per_cell);
+ Vector<Number> tmp(fe.dofs_per_cell);
+ std::vector<types::global_dof_index> dof_indices(fe.dofs_per_cell);
+ typename DoFHandler<dim>::cell_iterator cell=dof_handler.begin(level-1);
+ typename DoFHandler<dim>::cell_iterator endc=dof_handler.end(level-1);
+ for ( ; cell != endc; ++cell)
+ if (cell->is_locally_owned_on_level())
+ {
+ // if we get to a cell without children (== active), we can
+ // skip it as there values should be already set by the
+ // equivalent of copy_to_mg()
+ if (!cell->has_children())
+ continue;
+
+ std::fill(dof_values_coarse.begin(), dof_values_coarse.end(), 0.);
+ for (unsigned int child=0; child<cell->n_children(); ++child)
+ {
+ cell->child(child)->get_mg_dof_indices(dof_indices);
+ for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
+ dof_values_fine(i) = ghosted_vector(dof_indices[i]);
+ fe.get_restriction_matrix(child, cell->refinement_case()).vmult (tmp, dof_values_fine);
+ for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
+ if (fe.restriction_is_additive(i))
+ dof_values_coarse[i] += tmp[i];
+ else if (tmp(i) != 0.)
+ dof_values_coarse[i] = tmp[i];
+ }
+ cell->get_mg_dof_indices(dof_indices);
+ for (unsigned int i=0; i<fe.dofs_per_cell; ++i)
+ dst[level-1](dof_indices[i]) = dof_values_coarse[i];
+ }
+
+ dst[level-1].compress(VectorOperation::insert);
+ dst[level-1].update_ghost_values();
+ }
+}
+
+
+
template <int dim, typename Number>
template <typename Number2, int spacedim>
void
/* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
+namespace
+{
+ template <int dim, int spacedim, typename Number>
+ void fill_internal(const DoFHandler<dim,spacedim> &mg_dof,
+ SmartPointer<const MGConstrainedDoFs, MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> > > mg_constrained_dofs,
+ const MPI_Comm mpi_communicator,
+ const bool transfer_solution_vectors,
+ std::vector<std::vector<std::pair<unsigned int, unsigned int> > > ©_indices,
+ std::vector<std::vector<std::pair<unsigned int, unsigned int> > > ©_indices_global_mine,
+ std::vector<std::vector<std::pair<unsigned int, unsigned int> > > ©_indices_level_mine,
+ LinearAlgebra::distributed::Vector<Number> &ghosted_global_vector,
+ MGLevelObject<LinearAlgebra::distributed::Vector<Number> > &ghosted_level_vector)
+ {
+ // first go to the usual routine...
+ std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+ my_copy_indices;
+ std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+ my_copy_indices_global_mine;
+ std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
+ my_copy_indices_level_mine;
+
+ internal::MGTransfer::fill_copy_indices(mg_dof, mg_constrained_dofs, my_copy_indices,
+ my_copy_indices_global_mine, my_copy_indices_level_mine,
+ !transfer_solution_vectors);
+
+ // get all degrees of freedom that we need read access to in copy_to_mg
+ // and copy_from_mg, respectively. We fill an IndexSet once on each level
+ // (for the global_mine indices accessing remote level indices) and once
+ // globally (for the level_mine indices accessing remote global indices).
+
+ // the variables index_set and level_index_set are going to define the
+ // ghost indices of the respective vectors (due to construction, these are
+ // precisely the indices that we need)
+
+ IndexSet index_set(mg_dof.locally_owned_dofs().size());
+ std::vector<types::global_dof_index> accessed_indices;
+ ghosted_level_vector.resize(0, mg_dof.get_triangulation().n_global_levels()-1);
+ std::vector<IndexSet> level_index_set(mg_dof.get_triangulation().n_global_levels());
+ for (unsigned int l=0; l<mg_dof.get_triangulation().n_global_levels(); ++l)
+ {
+ for (unsigned int i=0; i<my_copy_indices_level_mine[l].size(); ++i)
+ accessed_indices.push_back(my_copy_indices_level_mine[l][i].first);
+ std::vector<types::global_dof_index> accessed_level_indices;
+ for (unsigned int i=0; i<my_copy_indices_global_mine[l].size(); ++i)
+ accessed_level_indices.push_back(my_copy_indices_global_mine[l][i].second);
+ std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
+ level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
+ level_index_set[l].add_indices(accessed_level_indices.begin(),
+ accessed_level_indices.end());
+ level_index_set[l].compress();
+ ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
+ level_index_set[l],
+ mpi_communicator);
+ }
+ std::sort(accessed_indices.begin(), accessed_indices.end());
+ index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
+ index_set.compress();
+ ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
+ index_set,
+ mpi_communicator);
+
+ // localize the copy indices for faster access. Since all access will be
+ // through the ghosted vector in 'data', we can use this (much faster)
+ // option
+ copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
+ copy_indices_level_mine.resize(mg_dof.get_triangulation().n_global_levels());
+ copy_indices_global_mine.resize(mg_dof.get_triangulation().n_global_levels());
+ for (unsigned int level=0; level<mg_dof.get_triangulation().n_global_levels(); ++level)
+ {
+ const Utilities::MPI::Partitioner &global_partitioner =
+ *ghosted_global_vector.get_partitioner();
+ const Utilities::MPI::Partitioner &level_partitioner =
+ *ghosted_level_vector[level].get_partitioner();
+ // owned-owned case: the locally owned indices are going to control
+ // the local index
+ copy_indices[level].resize(my_copy_indices[level].size());
+ for (unsigned int i=0; i<my_copy_indices[level].size(); ++i)
+ copy_indices[level][i] =
+ std::pair<unsigned int,unsigned int>
+ (global_partitioner.global_to_local(my_copy_indices[level][i].first),
+ level_partitioner.global_to_local(my_copy_indices[level][i].second));
+
+ // remote-owned case: the locally owned indices for the level and the
+ // ghost dofs for the global indices set the local index
+ copy_indices_level_mine[level].
+ resize(my_copy_indices_level_mine[level].size());
+ for (unsigned int i=0; i<my_copy_indices_level_mine[level].size(); ++i)
+ copy_indices_level_mine[level][i] =
+ std::pair<unsigned int,unsigned int>
+ (global_partitioner.global_to_local(my_copy_indices_level_mine[level][i].first),
+ level_partitioner.global_to_local(my_copy_indices_level_mine[level][i].second));
+
+ // owned-remote case: the locally owned indices for the global dofs
+ // and the ghost dofs for the level indices set the local index
+ copy_indices_global_mine[level].
+ resize(my_copy_indices_global_mine[level].size());
+ for (unsigned int i=0; i<my_copy_indices_global_mine[level].size(); ++i)
+ copy_indices_global_mine[level][i] =
+ std::pair<unsigned int,unsigned int>
+ (global_partitioner.global_to_local(my_copy_indices_global_mine[level][i].first),
+ level_partitioner.global_to_local(my_copy_indices_global_mine[level][i].second));
+ }
+ }
+}
template <typename Number>
template <int dim, int spacedim>
MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number> >::fill_and_communicate_copy_indices
(const DoFHandler<dim,spacedim> &mg_dof)
{
- // first go to the usual routine...
- std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
- my_copy_indices;
- std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
- my_copy_indices_global_mine;
- std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
- my_copy_indices_level_mine;
-
- internal::MGTransfer::fill_copy_indices(mg_dof, mg_constrained_dofs, my_copy_indices,
- my_copy_indices_global_mine, my_copy_indices_level_mine);
-
- // get all degrees of freedom that we need read access to in copy_to_mg
- // and copy_from_mg, respectively. We fill an IndexSet once on each level
- // (for the global_mine indices accessing remote level indices) and once
- // globally (for the level_mine indices accessing remote global indices).
-
- // the variables index_set and level_index_set are going to define the
- // ghost indices of the respective vectors (due to construction, these are
- // precisely the indices that we need)
+
const parallel::Triangulation<dim, spacedim> *ptria =
dynamic_cast<const parallel::Triangulation<dim, spacedim> *>
(&mg_dof.get_triangulation());
const MPI_Comm mpi_communicator = ptria != nullptr ? ptria->get_communicator() :
MPI_COMM_SELF;
- IndexSet index_set(mg_dof.locally_owned_dofs().size());
- std::vector<types::global_dof_index> accessed_indices;
- ghosted_level_vector.resize(0, mg_dof.get_triangulation().n_global_levels()-1);
- std::vector<IndexSet> level_index_set(mg_dof.get_triangulation().n_global_levels());
- for (unsigned int l=0; l<mg_dof.get_triangulation().n_global_levels(); ++l)
- {
- for (unsigned int i=0; i<my_copy_indices_level_mine[l].size(); ++i)
- accessed_indices.push_back(my_copy_indices_level_mine[l][i].first);
- std::vector<types::global_dof_index> accessed_level_indices;
- for (unsigned int i=0; i<my_copy_indices_global_mine[l].size(); ++i)
- accessed_level_indices.push_back(my_copy_indices_global_mine[l][i].second);
- std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
- level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
- level_index_set[l].add_indices(accessed_level_indices.begin(),
- accessed_level_indices.end());
- level_index_set[l].compress();
- ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
- level_index_set[l],
- mpi_communicator);
- }
- std::sort(accessed_indices.begin(), accessed_indices.end());
- index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
- index_set.compress();
- ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
- index_set,
- mpi_communicator);
-
- // localize the copy indices for faster access. Since all access will be
- // through the ghosted vector in 'data', we can use this (much faster)
- // option
- this->copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
- this->copy_indices_level_mine.resize(mg_dof.get_triangulation().n_global_levels());
- this->copy_indices_global_mine.resize(mg_dof.get_triangulation().n_global_levels());
- for (unsigned int level=0; level<mg_dof.get_triangulation().n_global_levels(); ++level)
- {
- const Utilities::MPI::Partitioner &global_partitioner =
- *ghosted_global_vector.get_partitioner();
- const Utilities::MPI::Partitioner &level_partitioner =
- *ghosted_level_vector[level].get_partitioner();
- // owned-owned case: the locally owned indices are going to control
- // the local index
- this->copy_indices[level].resize(my_copy_indices[level].size());
- for (unsigned int i=0; i<my_copy_indices[level].size(); ++i)
- this->copy_indices[level][i] =
- std::pair<unsigned int,unsigned int>
- (global_partitioner.global_to_local(my_copy_indices[level][i].first),
- level_partitioner.global_to_local(my_copy_indices[level][i].second));
-
- // remote-owned case: the locally owned indices for the level and the
- // ghost dofs for the global indices set the local index
- this->copy_indices_level_mine[level].
- resize(my_copy_indices_level_mine[level].size());
- for (unsigned int i=0; i<my_copy_indices_level_mine[level].size(); ++i)
- this->copy_indices_level_mine[level][i] =
- std::pair<unsigned int,unsigned int>
- (global_partitioner.global_to_local(my_copy_indices_level_mine[level][i].first),
- level_partitioner.global_to_local(my_copy_indices_level_mine[level][i].second));
-
- // owned-remote case: the locally owned indices for the global dofs
- // and the ghost dofs for the level indices set the local index
- this->copy_indices_global_mine[level].
- resize(my_copy_indices_global_mine[level].size());
- for (unsigned int i=0; i<my_copy_indices_global_mine[level].size(); ++i)
- this->copy_indices_global_mine[level][i] =
- std::pair<unsigned int,unsigned int>
- (global_partitioner.global_to_local(my_copy_indices_global_mine[level][i].first),
- level_partitioner.global_to_local(my_copy_indices_global_mine[level][i].second));
- }
+ fill_internal(mg_dof,
+ mg_constrained_dofs,
+ mpi_communicator,
+ false,
+ this->copy_indices,
+ this->copy_indices_global_mine,
+ this->copy_indices_level_mine,
+ ghosted_global_vector,
+ ghosted_level_vector);
+
+ fill_internal(mg_dof,
+ mg_constrained_dofs,
+ mpi_communicator,
+ true,
+ this->solution_copy_indices,
+ this->solution_copy_indices_global_mine,
+ this->solution_copy_indices_level_mine,
+ solution_ghosted_global_vector,
+ solution_ghosted_level_vector);
perform_plain_copy = this->copy_indices.back().size()
== mg_dof.locally_owned_dofs().n_elements();
{
ghosted_global_vector.reinit(0);
ghosted_level_vector.resize(0, 0);
+ solution_ghosted_global_vector.reinit(0);
+ solution_ghosted_level_vector.resize(0, 0);
}
}
template
void MGLevelGlobalTransfer< LinearAlgebra::distributed::Vector<float> >::fill_and_communicate_copy_indices<deal_II_dimension,deal_II_dimension>(
const DoFHandler<deal_II_dimension,deal_II_dimension> &mg_dof);
+ template
+ void MGLevelGlobalTransfer< LinearAlgebra::distributed::Vector<float> >::copy_to_mg (
+ const DoFHandler<deal_II_dimension>&, MGLevelObject<LinearAlgebra::distributed::Vector<float>>&, const LinearAlgebra::distributed::Vector<float>&, const bool) const;
+ template
+ void MGLevelGlobalTransfer< LinearAlgebra::distributed::Vector<double> >::copy_to_mg (
+ const DoFHandler<deal_II_dimension>&, MGLevelObject<LinearAlgebra::distributed::Vector<double>>&, const LinearAlgebra::distributed::Vector<double>&, const bool) const;
+
}
for (deal_II_dimension : DIMENSIONS; S1, S2 : REAL_SCALARS)
{}
};
- // Internal function for filling the copy indices from global to level
- // indices
+
+
+
template <int dim, int spacedim>
void fill_copy_indices(const dealii::DoFHandler<dim,spacedim> &mg_dof,
const MGConstrainedDoFs *mg_constrained_dofs,
std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > ©_indices,
std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > ©_indices_global_mine,
- std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > ©_indices_level_mine)
+ std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > ©_indices_level_mine,
+ const bool skip_interface_dofs)
{
// Now we are filling the variables copy_indices*, which are essentially
// maps from global to mgdof for each level stored as a std::vector of
// pairs. We need to split this map on each level depending on the
- // ownership of the global and mgdof, so that we later not access
+ // ownership of the global and mgdof, so that we later do not access
// non-local elements in copy_to/from_mg.
// We keep track in the bitfield dof_touched which global dof has been
// processed already (on the current level). This is the same as the
for (unsigned int i=0; i<dofs_per_cell; ++i)
{
// we need to ignore if the DoF is on a refinement edge (hanging node)
- if (mg_constrained_dofs != nullptr
+ if (skip_interface_dofs &&
+ mg_constrained_dofs != nullptr
&& mg_constrained_dofs->at_refinement_edge(level, level_dof_indices[i]))
continue;
+
types::global_dof_index global_idx = globally_relevant.index_within_set(global_dof_indices[i]);
//skip if we did this global dof already (on this or a coarser level)
if (dof_touched[global_idx])
const MGConstrainedDoFs*,
std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &,
std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &,
- std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &);
+ std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > > &,
+ const bool);
#endif
\}
\}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// test MGTransferMatrixFree<dim,Number>::interpolate_to_mg()
+// for a scalar field
+
+#include "../tests.h"
+
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/lac/full_matrix.h>
+#include <deal.II/lac/identity_matrix.h>
+#include <deal.II/base/quadrature_lib.h>
+#include <deal.II/base/thread_management.h>
+#include <deal.II/base/function.h>
+#include <deal.II/lac/vector.h>
+#include <deal.II/lac/vector_memory.h>
+#include <deal.II/lac/filtered_matrix.h>
+#include <deal.II/lac/precondition.h>
+#include <deal.II/lac/solver_cg.h>
+#include <deal.II/lac/sparse_matrix.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_reordering.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/tria_accessor.h>
+#include <deal.II/grid/tria_iterator.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_accessor.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/petsc_parallel_vector.h>
+#include <deal.II/fe/mapping_q1.h>
+#include <deal.II/fe/mapping_q_eulerian.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/fe/fe_system.h>
+#include <deal.II/multigrid/mg_transfer_matrix_free.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/numerics/data_out.h>
+#include <deal.II/base/multithread_info.h>
+#include <deal.II/matrix_free/matrix_free.h>
+#include <deal.II/matrix_free/fe_evaluation.h>
+#include <deal.II/grid/grid_out.h>
+#include <iostream>
+#include <vector>
+
+
+
+
+
+
+using namespace dealii;
+
+
+template <int dim>
+class SimpleField : public Function<dim>
+{
+public:
+ SimpleField() :
+ Function<dim>(1)
+ {}
+
+ double value (const Point<dim> &p,
+ const unsigned int component = 0) const
+ {
+ (void)component;
+ return p[0]*2. + p[1] - 10.;
+ }
+};
+
+
+template <int dim, int fe_degree=2, int n_q_points=fe_degree+1, typename NumberType=double, typename LevelNumberType=NumberType>
+void test (const unsigned int n_glob_ref=2, const unsigned int n_ref = 0)
+{
+ SimpleField<dim> function;
+
+ deallog << "dim=" << dim << std::endl;
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ unsigned int myid = Utilities::MPI::this_mpi_process (mpi_communicator);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (mpi_communicator);
+
+ deallog << "numproc=" << numproc << std::endl;
+
+ parallel::distributed::Triangulation<dim>
+ triangulation (mpi_communicator,
+ Triangulation<dim>::limit_level_difference_at_vertices,
+ parallel::distributed::Triangulation<dim>::construct_multigrid_hierarchy);
+ GridGenerator::subdivided_hyper_cube (triangulation, 5, -1, 1);
+ // now do some refinement
+ triangulation.refine_global(n_glob_ref);
+ for (unsigned int ref=0; ref<n_ref; ++ref)
+ {
+ for (typename Triangulation<dim>::active_cell_iterator cell=triangulation.begin_active();
+ cell != triangulation.end(); ++cell)
+ if (cell->is_locally_owned() &&
+ (cell->center().norm() < 0.5 && (cell->level() < 5 ||
+ cell->center().norm() > 0.45)
+ ||
+ (dim == 2 && cell->center().norm() > 1.2)))
+ cell->set_refine_flag();
+ triangulation.execute_coarsening_and_refinement();
+ }
+
+ FE_Q<dim> fe (fe_degree);
+ DoFHandler<dim> dof_handler (triangulation);
+
+ dof_handler.distribute_dofs(fe);
+ dof_handler.distribute_mg_dofs();
+
+ IndexSet locally_owned_dofs, locally_relevant_dofs;
+ locally_owned_dofs = dof_handler.locally_owned_dofs ();
+ DoFTools::extract_locally_relevant_dofs (dof_handler,locally_relevant_dofs);
+
+ // constraints:
+ ConstraintMatrix constraints;
+ constraints.reinit (locally_relevant_dofs);
+ DoFTools::make_hanging_node_constraints (dof_handler, constraints);
+ constraints.close ();
+
+
+ // interpolate:
+ LinearAlgebra::distributed::Vector<LevelNumberType> fine_projection;
+ fine_projection.reinit(locally_owned_dofs,
+ locally_relevant_dofs,
+ mpi_communicator);
+ VectorTools::project (dof_handler,
+ constraints,
+ QGauss<dim>(n_q_points+2),
+ function,
+ fine_projection);
+ fine_projection.update_ghost_values();
+
+ // output for debug purposes:
+ if (true)
+ {
+ DataOut<dim> data_out;
+ data_out.attach_dof_handler (dof_handler);
+ data_out.add_data_vector (fine_projection, "projection");
+
+ Vector<float> subdomain (triangulation.n_active_cells());
+ for (unsigned int i=0; i<subdomain.size(); ++i)
+ subdomain(i) = triangulation.locally_owned_subdomain();
+ data_out.add_data_vector (subdomain, "subdomain");
+ data_out.build_patches ();
+ const std::string filename = "output_" + Utilities::int_to_string(myid) + ".vtu";
+ std::ofstream output (filename.c_str());
+ data_out.write_vtu (output);
+
+ const std::string mg_mesh = "mg_mesh";
+ GridOut grid_out;
+ grid_out.write_mesh_per_processor_as_vtu (triangulation,mg_mesh,true,true);
+ }
+
+ // MG hanging node constraints
+ // we do not add extra zero Dirichlet BC here
+ MGConstrainedDoFs mg_constrained_dofs;
+ mg_constrained_dofs.initialize(dof_handler);
+
+ // MG transfer:
+ MGTransferMatrixFree<dim, LevelNumberType> mg_transfer(mg_constrained_dofs);
+ mg_transfer.build(dof_handler);
+
+ // now the core of the test:
+ const unsigned int max_level = dof_handler.get_triangulation().n_global_levels()-1;
+ const unsigned int min_level = 0;
+ MGLevelObject<LinearAlgebra::distributed::Vector<LevelNumberType>> level_projection(min_level, max_level);
+ mg_transfer.interpolate_to_mg(dof_handler,level_projection, fine_projection);
+
+ // now go through all GMG levels and make sure FE field can represent
+ // analytic function exactly:
+ QGauss<dim> quadrature(n_q_points);
+ std::vector<LevelNumberType> q_values(quadrature.size());
+
+ FEValues<dim> fe_values(fe, quadrature, update_values | update_quadrature_points);
+ for (unsigned int level = max_level+1; level != min_level;)
+ {
+ --level;
+
+ std::vector<types::global_dof_index> dof_indices(fe.dofs_per_cell);
+ typename DoFHandler<dim>::cell_iterator cell=dof_handler.begin(level);
+ typename DoFHandler<dim>::cell_iterator endc=dof_handler.end(level);
+ for ( ; cell != endc; ++cell)
+ if (cell->is_locally_owned_on_level())
+ {
+ fe_values.reinit(cell);
+ cell->get_mg_dof_indices(dof_indices);
+ fe_values.get_function_values(level_projection[level],
+ dof_indices,
+ q_values);
+
+ const std::vector<Point<dim>> &q_points = fe_values.get_quadrature_points();
+
+ for (unsigned int q = 0; q < q_points.size(); ++q)
+ {
+ const double diff = q_values[q] - function.value(q_points[q]);
+ if (std::abs(diff) > 1e-10)
+ {
+ std::cout<<"dofs: ";
+ for (const auto i : dof_indices)
+ std::cout << i << " ";
+ std::cout << std::endl << "values: ";
+ std::vector<LevelNumberType> local_values(dof_indices.size());
+ level_projection[level].extract_subvector_to(dof_indices, local_values);
+ for (const auto v : local_values)
+ std::cout << v << " ";
+ std::cout << std::endl
+ << "val(q)=" << q_values[q] << std::endl;
+ std::cout << "MGTransfer indices:" << std::endl;
+ mg_transfer.print_indices(std::cout);
+ AssertThrow (false,
+ ExcMessage("Level " + std::to_string(level) +
+ " Diff " + std::to_string(diff) +
+ " center_x " + std::to_string(cell->center()[0]) +
+ " center_y " + std::to_string(cell->center()[1])
+ ));
+ }
+ }
+ }
+ }
+
+ deallog << "Ok" << std::endl;
+}
+
+
+
+int main (int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+ MPILogInitAll log;
+
+ test<2> ();
+ test<2,1> (0,1);
+}
+
+
--- /dev/null
+
+DEAL:0::dim=2
+DEAL:0::numproc=1
+DEAL:0::Ok
+DEAL:0::dim=2
+DEAL:0::numproc=1
+DEAL:0::Ok
--- /dev/null
+
+DEAL:0::dim=2
+DEAL:0::numproc=3
+DEAL:0::Ok
+DEAL:0::dim=2
+DEAL:0::numproc=3
+DEAL:0::Ok
+
+DEAL:1::dim=2
+DEAL:1::numproc=3
+DEAL:1::Ok
+DEAL:1::dim=2
+DEAL:1::numproc=3
+DEAL:1::Ok
+
+
+DEAL:2::dim=2
+DEAL:2::numproc=3
+DEAL:2::Ok
+DEAL:2::dim=2
+DEAL:2::numproc=3
+DEAL:2::Ok
+