From 1aa955a36fec3f033828f7ad4a836bf99d0eec95 Mon Sep 17 00:00:00 2001 From: Daniel Arndt Date: Sun, 11 Sep 2016 12:56:29 +0200 Subject: [PATCH] Move is_globally_ascending into an own function --- include/deal.II/base/index_set.h | 7 +- source/base/index_set.cc | 112 +++++++++++++++++-------------- 2 files changed, 66 insertions(+), 53 deletions(-) diff --git a/include/deal.II/base/index_set.h b/include/deal.II/base/index_set.h index 8d2bc790bc..2ec7a30270 100644 --- a/include/deal.II/base/index_set.h +++ b/include/deal.II/base/index_set.h @@ -218,13 +218,14 @@ public: bool is_contiguous () const; /** - * Return whether the index set stored by this object defines a linear - * range, i.e., each index is contained in exactly one IndexSet, + * Return whether the IndexSets are ascending with respect to MPI process + * number, i.e., each index is contained in exactly one IndexSet, * the first indices are contained in the IndexSet of the first MPI process, * the second indices are contained in the IndexSet of the second MPI process * and so on. + * In case there is only one MPI process, this is always true. */ - bool is_linear() const; + bool is_globally_ascending() const; /** * Return the number of elements stored in this index set. diff --git a/source/base/index_set.cc b/source/base/index_set.cc index b3ff1ab4b3..24d5c8ca93 100644 --- a/source/base/index_set.cc +++ b/source/base/index_set.cc @@ -512,56 +512,11 @@ IndexSet::make_trilinos_map (const MPI_Comm &communicator, } #endif - // Find out if the IndexSet is linear - // If there is only one process, the IndexSet is always linear - const unsigned int n_ranks = Utilities::MPI::n_mpi_processes(communicator); - bool is_linear = (n_ranks==1); - const bool all_contiguous = (Utilities::MPI::min (is_contiguous() ? 1 : 0, communicator) == 1); - // overlapping IndexSets or non_contiguous IndexSets can't be linear - if ((all_contiguous) && (!overlapping) && (!is_linear)) - { - is_linear = true; - // we know that there is only one interval - types::global_dof_index local_dofs[2]; - local_dofs[0] = (n_elements()>0) ? *(begin_intervals()->begin()) - : numbers::invalid_dof_index ; - local_dofs[1] = (n_elements()>0) ? begin_intervals()->last() - : numbers::invalid_dof_index; - - const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator); - // first gather all information on process 0 - const unsigned int gather_size = (my_rank==0)?2*n_ranks:1; - std::vector global_dofs(gather_size); - - MPI_Gather(local_dofs, 2, DEAL_II_DOF_INDEX_MPI_TYPE, - &(global_dofs[0]), 2, DEAL_II_DOF_INDEX_MPI_TYPE, 0, - communicator); - if (my_rank == 0) - { - // find out if the received std::vector is linear - types::global_dof_index old_dof = global_dofs[0], new_dof = 0; - types::global_dof_index index = 0; - while (global_dofs[index] == numbers::invalid_dof_index) - ++index; - old_dof = global_dofs[index++]; - for (; index0) ? *(begin_intervals()->begin()) + : numbers::invalid_dof_index ; + local_dofs[1] = (n_elements()>0) ? begin_intervals()->last() + : numbers::invalid_dof_index; + + const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator); + // first gather all information on process 0 + const unsigned int gather_size = (my_rank==0)?2*n_ranks:1; + std::vector global_dofs(gather_size); + + MPI_Gather(local_dofs, 2, DEAL_II_DOF_INDEX_MPI_TYPE, + &(global_dofs[0]), 2, DEAL_II_DOF_INDEX_MPI_TYPE, 0, + communicator); + if (my_rank == 0) + { + // find out if the received std::vector is linear + types::global_dof_index old_dof = global_dofs[0], new_dof = 0; + types::global_dof_index index = 0; + while (global_dofs[index] == numbers::invalid_dof_index) + ++index; + old_dof = global_dofs[index++]; + for (; index