/**
* Return whether the IndexSets are ascending with respect to MPI process
- * number, i.e., each index is contained in exactly one IndexSet,
- * the first indices are contained in the IndexSet of the first MPI process,
- * the second indices are contained in the IndexSet of the second MPI process
- * and so on.
- * In case there is only one MPI process, this is always true.
+ * number and 1:1, i.e., each index is contained in exactly one IndexSet
+ * (among those stored on the different processes), each process stores
+ * contiguous subset of indices, and the index set on process $p+1$ starts
+ * at the index one larger than the last one stored on process $p$.
+ * In case there is only one MPI process, this just means that the IndexSet
+ * is complete.
*/
- bool is_globally_ascending() const;
+ bool is_ascending_and_one_to_one(const MPI_Comm &communicator) const;
/**
* Return the number of elements stored in this index set.
}
#endif
- // Find out if the IndexSet is ascending.
- // Overlapping IndexSets are never ascending.
- const bool ascending = overlapping ? false : is_globally_ascending();
+ // Find out if the IndexSet is ascending and 1:1. This correspinds to a
+ // linear EpetraMap. Overlapping IndexSets are never 1:1.
+ const bool linear = overlapping ? false : is_ascending_and_one_to_one(communicator);
- if (ascending)
+ if (linear)
return Epetra_Map (TrilinosWrappers::types::int_type(size()),
TrilinosWrappers::types::int_type(n_elements()),
0,
bool
-IndexSet::is_globally_ascending () const
+IndexSet::is_ascending_and_one_to_one (const MPI_Comm &communicator) const
{
- // If there is only one process, the IndexSet is always ascending.
- const unsigned int n_ranks = Utilities::MPI::n_mpi_processes(communicator);
- if (n_ranks==1)
- return true;
- // / Non-contiguous IndexSets can't be linear.
+ // If the sum of local elements does not add up to the total size,
+ // the IndexSet can't be complete.
+ const size_type n_global_elements
+ = Utilities::MPI::sum (n_elements(), communicator);
+ if (n_global_elements != size())
+ return false;
+
+ // Non-contiguous IndexSets can't be linear.
const bool all_contiguous = (Utilities::MPI::min (is_contiguous() ? 1 : 0, communicator) == 1);
if (!all_contiguous)
return false;
bool is_globally_ascending = true;
// we know that there is only one interval
- types::global_dof_index local_dofs[2];
- local_dofs[0] = (n_elements()>0) ? *(begin_intervals()->begin())
- : numbers::invalid_dof_index ;
- local_dofs[1] = (n_elements()>0) ? begin_intervals()->last()
- : numbers::invalid_dof_index;
+ types::global_dof_index first_local_dof
+ = (n_elements()>0) ? *(begin_intervals()->begin())
+ : numbers::invalid_dof_index ;
const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
+ const unsigned int n_ranks = Utilities::MPI::n_mpi_processes(communicator);
// first gather all information on process 0
- const unsigned int gather_size = (my_rank==0)?2*n_ranks:1;
+ const unsigned int gather_size = (my_rank==0)?n_ranks:1;
std::vector<types::global_dof_index> global_dofs(gather_size);
- MPI_Gather(local_dofs, 2, DEAL_II_DOF_INDEX_MPI_TYPE,
- &(global_dofs[0]), 2, DEAL_II_DOF_INDEX_MPI_TYPE, 0,
+ MPI_Gather(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ &(global_dofs[0]), 1, DEAL_II_DOF_INDEX_MPI_TYPE, 0,
communicator);
if (my_rank == 0)
{
- // find out if the received std::vector is linear
+ // find out if the received std::vector is ascending
types::global_dof_index old_dof = global_dofs[0], new_dof = 0;
types::global_dof_index index = 0;
while (global_dofs[index] == numbers::invalid_dof_index)
new_dof = global_dofs[index];
if (new_dof == numbers::invalid_dof_index)
new_dof = old_dof;
- else if (new_dof<old_dof)
+ else if (new_dof<=old_dof)
{
is_globally_ascending = false;
break;
check_solver_within_range(
solver.solve (system_matrix, solution, system_rhs,
preconditioner),
- solver_control.last_step(), 25, 34);
+ solver_control.last_step(), 20, 34);
}
deallog.pop();
check_solver_within_range(
solver.solve (system_matrix, solution, system_rhs,
preconditioner),
- solver_control.last_step(), 24, 40);
+ solver_control.last_step(), 20, 40);
}
deallog.pop();
deallog << std::endl;