constraints.close ();
hanging_node_constraints.close ();
+ //check(constraints);
+ //check(hanging_node_constraints);
+
+ DynamicSparsityPattern dsp(mg_dof_handler.n_dofs(), mg_dof_handler.n_dofs());
+ DoFTools::make_sparsity_pattern (mg_dof_handler, dsp, constraints);
+ system_matrix.reinit (mg_dof_handler.locally_owned_dofs(), dsp, MPI_COMM_WORLD, true);
- CompressedSimpleSparsityPattern csp(mg_dof_handler.n_dofs(), mg_dof_handler.n_dofs());
- DoFTools::make_sparsity_pattern (mg_dof_handler, csp, constraints);
- system_matrix.reinit (mg_dof_handler.locally_owned_dofs(), csp, MPI_COMM_WORLD, true);
+
// The multigrid constraints have to be
// initialized. They need to know about
// the boundary values as well, so we
extract_locally_relevant_dofs (const DH &dof_handler,
IndexSet &dof_set);
- IndexSet &dof_set,
- unsigned int level);
+ /**
+ *
+ * For each processor, determine the set of locally owned degrees of freedom as an IndexSet.
+ * This function then returns a vector of index sets, where the vector has size equal to the
+ * number of MPI processes that participate in the DoF handler object.
+ *
+ * The function can be used for objects of type dealii::Triangulation or parallel::shared::Triangulation.
+ * It will not work for objects of type parallel::distributed::Triangulation since for such triangulations
+ * we do not have information about all cells of the triangulation available locally,
+ * and consequently can not say anything definitive about the degrees of freedom active on other
+ * processors' locally owned cells.
+ *
+ * @author Denis Davydov, 2015
+ */
+ template <class DH>
+ std::vector<IndexSet>
+ locally_owned_dofs_per_subdomain (const DH &dof_handler);
+
+ /**
+ *
+ * For each processor, determine the set of locally relevant degrees of freedom as an IndexSet.
+ * This function then returns a vector of index sets, where the vector has size equal to the
+ * number of MPI processes that participate in the DoF handler object.
+ *
+ * The function can be used for objects of type dealii::Triangulation or parallel::shared::Triangulation.
+ * It will not work for objects of type parallel::distributed::Triangulation since for such triangulations
+ * we do not have information about all cells of the triangulation available locally,
+ * and consequently can not say anything definitive about the degrees of freedom active on other
+ * processors' locally owned cells.
+ *
+ * @author Jean-Paul Pelteret, 2015
+ */
+ template <class DH>
+ std::vector<IndexSet>
+ locally_relevant_dofs_per_subdomain (const DH &dof_handler);
+
++
+ /**
+ * Same as extract_locally_relevant_dofs() but for multigrid DoFs
+ * for the given @p level.
+ */
+ template <class DH>
+ void
+ extract_locally_relevant_mg_dofs (const DH &dof_handler,
++ IndexSet &dof_set,
++ unsigned int level);
++
+
/**
* For each DoF, return in the output array to which subdomain (as given by
* the <tt>cell->subdomain_id()</tt> function) it belongs. The output array
*
* Organization of the data is like for @p copy_indices_mine.
*/
- std::vector<std::vector<std::pair<types::global_dof_index, unsigned int> > >
+ std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
- copy_indices_to_me;
+ copy_indices_global_mine;
/**
* Additional degrees of freedom for the copy_from_mg() function. These are
*
* Organization of the data is like for @p copy_indices_mine.
*/
- std::vector<std::vector<std::pair<types::global_dof_index, unsigned int> > >
+ std::vector<std::vector<std::pair<types::global_dof_index, types::global_dof_index> > >
- copy_indices_from_me;
+ copy_indices_level_mine;
/**
--level;
VECTOR &dst_level = dst[level];
- typedef std::vector<std::pair<types::global_dof_index, unsigned int> >::const_iterator IT;
+#ifdef DEBUG_OUTPUT
+ MPI_Barrier(MPI_COMM_WORLD);
+ int myid=-1;
+ MPI_Comm_rank (MPI_COMM_WORLD, &myid);
+#endif
+
+ typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator IT;
for (IT i= copy_indices[level].begin();
i != copy_indices[level].end(); ++i)
dst_level(i->second) = src(i->first);
dst = 0;
for (unsigned int level=0; level<mg_dof_handler.get_tria().n_global_levels(); ++level)
{
- typedef std::vector<std::pair<types::global_dof_index, unsigned int> >::const_iterator IT;
+ typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator IT;
+#ifdef DEBUG_OUTPUT
+ MPI_Barrier(MPI_COMM_WORLD);
+ std::cout << "copy_from_mg src " << level << " " << src[level].l2_norm() << std::endl;
+ MPI_Barrier(MPI_COMM_WORLD);
+#endif
// First copy all indices local to this process
- if (constraints==0)
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- dst(i->first) = src[level](i->second);
- else
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ dst(i->first) = src[level](i->second);
// Do the same for the indices where the level index is local,
// but the global index is not
// functions
for (unsigned int level=0; level<mg_dof_handler.get_tria().n_global_levels(); ++level)
{
- typedef std::vector<std::pair<types::global_dof_index, unsigned int> >::const_iterator IT;
+ typedef std::vector<std::pair<types::global_dof_index, types::global_dof_index> >::const_iterator IT;
- if (constraints==0)
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- dst(i->first) += src[level](i->second);
- else
- for (IT i= copy_indices[level].begin();
- i != copy_indices[level].end(); ++i)
- constraints->distribute_local_to_global(i->first, src[level](i->second), dst);
+ for (IT i= copy_indices[level].begin();
+ i != copy_indices[level].end(); ++i)
+ dst(i->first) += src[level](i->second);
// Do the same for the indices where the level index is local,
// but the global index is not
bool global_mine = mg_dof.locally_owned_dofs().is_element(global_dof_indices[i]);
bool level_mine = mg_dof.locally_owned_mg_dofs(level).is_element(level_dof_indices[i]);
+
if (global_mine && level_mine)
- copy_indices[level].push_back(
- std::make_pair (global_dof_indices[i], level_dof_indices[i]));
- else if (level_mine)
- copy_indices_from_me[level].push_back(
+ {
+ copy_indices[level].push_back(
- std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
+ std::make_pair (global_dof_indices[i], level_dof_indices[i]));
- else if (global_mine)
- copy_indices_to_me[level].push_back(
+ }
+ else if(global_mine)
+ {
+ copy_indices_global_mine[level].push_back(
- std::pair<unsigned int, unsigned int> (global_dof_indices[i], level_dof_indices[i]));
+ std::make_pair (global_dof_indices[i], level_dof_indices[i]));
+
+ //send this to the owner of the level_dof:
+ send_data_temp.push_back(dof_pair(level, global_dof_indices[i], level_dof_indices[i]));
+ }
else
- continue;
+ {
+ // somebody will send those to me
+ }
dof_touched[global_idx] = true;
}
// If we are in debugging mode, we order the copy indices, so we get
// more reliable output for regression texts
#ifdef DEBUG
- std::less<std::pair<types::global_dof_index, unsigned int> > compare;
+ std::less<std::pair<types::global_dof_index, types::global_dof_index> > compare;
for (unsigned int level=0; level<copy_indices.size(); ++level)
std::sort(copy_indices[level].begin(), copy_indices[level].end(), compare);
- for (unsigned int level=0; level<copy_indices_from_me.size(); ++level)
- std::sort(copy_indices_from_me[level].begin(), copy_indices_from_me[level].end(), compare);
- for (unsigned int level=0; level<copy_indices_to_me.size(); ++level)
- std::sort(copy_indices_to_me[level].begin(), copy_indices_to_me[level].end(), compare);
+ for (unsigned int level=0; level<copy_indices_level_mine.size(); ++level)
+ std::sort(copy_indices_level_mine[level].begin(), copy_indices_level_mine[level].end(), compare);
+ for (unsigned int level=0; level<copy_indices_global_mine.size(); ++level)
+ std::sort(copy_indices_global_mine[level].begin(), copy_indices_global_mine[level].end(), compare);
#endif
}