From c6d6552380875a1dde1247b6373e3032fb2f0a0a Mon Sep 17 00:00:00 2001 From: kronbichler Date: Fri, 31 May 2013 09:48:26 +0000 Subject: [PATCH] Fix a few bugs with distribute. constraint_matrix_trilinos_bug/ncpu_10 still not working. Try to figure it out. git-svn-id: https://svn.dealii.org/trunk@29679 0785d39b-7218-0410-832d-ea1e28bc413d --- .../deal.II/lac/constraint_matrix.templates.h | 60 ++++++++----------- tests/mpi/constraint_matrix_trilinos_bug.cc | 46 -------------- .../ncpu_10/cmp/generic | 10 ++-- .../ncpu_4/cmp/generic | 10 ++-- tests/mpi/p4est_2d_constraintmatrix_03.cc | 2 - 5 files changed, 36 insertions(+), 92 deletions(-) diff --git a/deal.II/include/deal.II/lac/constraint_matrix.templates.h b/deal.II/include/deal.II/lac/constraint_matrix.templates.h index ed19a1c0d6..24063ce28c 100644 --- a/deal.II/include/deal.II/lac/constraint_matrix.templates.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.templates.h @@ -1016,6 +1016,8 @@ namespace internal parallel::distributed::Vector &output, const internal::bool2type /*is_block_vector*/) { + // TODO: the in vector might already have all elements. need to find a + // way to efficiently avoid the copy then const_cast&>(vec).zero_out_ghosts(); output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator()); output = vec; @@ -1069,28 +1071,20 @@ template void ConstraintMatrix::distribute (VectorType &vec) const { - Assert (sorted==true, ExcMatrixIsClosed()); - - // if the vector type supports parallel storage and if the - // vector actually does store only part of the vector, distributing - // is slightly more complicated. we can skip the complicated part - // if the local processor stores the *entire* vector and pretend - // that this is a sequential vector but we need to pay attention that - // in that case the other processors don't actually do anything (in - // particular that they do not call compress because the processor - // that owns everything doesn't do so either); this is the first if - // case here, the second is for the complicated case, the last else - // is for the simple case (sequential vector or distributed vector - // where the current processor stores everything) - if ((vec.supports_distributed_data == true) - && - (vec.locally_owned_elements().n_elements() == 0)) - { - // do nothing, in particular don't call compress() - } - else if ((vec.supports_distributed_data == true) - && - (vec.locally_owned_elements() != complete_index_set(vec.size()))) + Assert (sorted==true, ExcMatrixNotClosed()); + + // if the vector type supports parallel storage and if the vector actually + // does store only part of the vector, distributing is slightly more + // complicated. we might be able to skip the complicated part if one + // processor owns everything and pretend that this is a sequential vector, + // but it is difficult for the other processors to know whether they should + // not do anything or if other processors will create a temporary vector, + // exchange data (requiring communication, maybe even with the processors + // that do not own anything because of that particular parallel model), and + // call compress() finally. the first case here is for the complicated case, + // the last else is for the simple case (sequential vector) + const IndexSet vec_owned_elements = vec.locally_owned_elements(); + if (vec.supports_distributed_data == true) { // This processor owns only part of the vector. one may think that // every processor should be able to simply communicate those elements @@ -1105,7 +1099,6 @@ ConstraintMatrix::distribute (VectorType &vec) const // need to get a vector that has all the *sources* or constraints we // own locally, possibly as ghost vector elements, then read from them, // and finally throw away the ghosted vector. Implement this in the following. - const IndexSet vec_owned_elements = vec.locally_owned_elements(); IndexSet needed_elements = vec_owned_elements; typedef std::vector::const_iterator constraint_iterator; @@ -1124,17 +1117,16 @@ ConstraintMatrix::distribute (VectorType &vec) const for (constraint_iterator it = lines.begin(); it != lines.end(); ++it) if (vec_owned_elements.is_element(it->line)) - for (unsigned int i=0; ientries.size(); ++i) - { - typename VectorType::value_type + { + typename VectorType::value_type new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (static_cast - (ghosted_vector(it->entries[i].first)) * - it->entries[i].second); - Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); - vec(it->line) = new_value; - } + for (unsigned int i=0; ientries.size(); ++i) + new_value += (static_cast + (ghosted_vector(it->entries[i].first)) * + it->entries[i].second); + Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); + vec(it->line) = new_value; + } // now compress to communicate the entries that we added to // and that weren't to local processors to the owner @@ -1146,7 +1138,7 @@ ConstraintMatrix::distribute (VectorType &vec) const else // purely sequential vector (either because the type doesn't // support anything else or because it's completely stored - // locally + // locally) { std::vector::const_iterator next_constraint = lines.begin(); for (; next_constraint != lines.end(); ++next_constraint) diff --git a/tests/mpi/constraint_matrix_trilinos_bug.cc b/tests/mpi/constraint_matrix_trilinos_bug.cc index 6872ef2c69..e0c397d24a 100644 --- a/tests/mpi/constraint_matrix_trilinos_bug.cc +++ b/tests/mpi/constraint_matrix_trilinos_bug.cc @@ -46,50 +46,6 @@ #include -template -class FilteredDataOut : public DataOut -{ - public: - FilteredDataOut (const unsigned int subdomain_id) - : - subdomain_id (subdomain_id) - {} - - virtual typename DoFHandler::cell_iterator - first_cell () - { - typename DoFHandler::active_cell_iterator - cell = this->dofs->begin_active(); - while ((cell != this->dofs->end()) && - (cell->subdomain_id() != subdomain_id)) - ++cell; - - return cell; - } - - virtual typename DoFHandler::cell_iterator - next_cell (const typename DoFHandler::cell_iterator &old_cell) - { - if (old_cell != this->dofs->end()) - { - const IteratorFilters::SubdomainEqualTo - predicate(subdomain_id); - - return - ++(FilteredIterator - ::active_cell_iterator> - (predicate,old_cell)); - } - else - return old_cell; - } - - private: - const unsigned int subdomain_id; -}; - - - template void test() @@ -131,11 +87,9 @@ void test() TrilinosWrappers::MPI::Vector x; x.reinit(owned_set, MPI_COMM_WORLD); x=2.0; - x.compress(); TrilinosWrappers::MPI::Vector x_rel; x_rel.reinit(relevant_set, MPI_COMM_WORLD); - x_rel.compress(); ConstraintMatrix cm(relevant_set); DoFTools::make_hanging_node_constraints (dofh, cm); diff --git a/tests/mpi/constraint_matrix_trilinos_bug/ncpu_10/cmp/generic b/tests/mpi/constraint_matrix_trilinos_bug/ncpu_10/cmp/generic index b3af4de688..c3ced6c764 100644 --- a/tests/mpi/constraint_matrix_trilinos_bug/ncpu_10/cmp/generic +++ b/tests/mpi/constraint_matrix_trilinos_bug/ncpu_10/cmp/generic @@ -1,13 +1,13 @@ DEAL:0:2d::Exception: -------------------------------------------------------- -An error occurred in file in function - void dealii::ConstraintMatrix::distribute(VectorType&) const [with VectorType = dealii::TrilinosWrappers::MPI::Vector] +An error occurred in file in function + void dealii::TrilinosWrappers::VectorBase::set(unsigned int, const unsigned int*, const double*) The violated condition was: - vec.vector_partitioner().IsOneToOne() + !has_ghost_elements() The name and call sequence of the exception was: - ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning.") + ExcGhostsPresent() Additional Information: -Distribute does not work on vectors with overlapping parallel partitioning. +(none) -------------------------------------------------------- diff --git a/tests/mpi/constraint_matrix_trilinos_bug/ncpu_4/cmp/generic b/tests/mpi/constraint_matrix_trilinos_bug/ncpu_4/cmp/generic index b3af4de688..c3ced6c764 100644 --- a/tests/mpi/constraint_matrix_trilinos_bug/ncpu_4/cmp/generic +++ b/tests/mpi/constraint_matrix_trilinos_bug/ncpu_4/cmp/generic @@ -1,13 +1,13 @@ DEAL:0:2d::Exception: -------------------------------------------------------- -An error occurred in file in function - void dealii::ConstraintMatrix::distribute(VectorType&) const [with VectorType = dealii::TrilinosWrappers::MPI::Vector] +An error occurred in file in function + void dealii::TrilinosWrappers::VectorBase::set(unsigned int, const unsigned int*, const double*) The violated condition was: - vec.vector_partitioner().IsOneToOne() + !has_ghost_elements() The name and call sequence of the exception was: - ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning.") + ExcGhostsPresent() Additional Information: -Distribute does not work on vectors with overlapping parallel partitioning. +(none) -------------------------------------------------------- diff --git a/tests/mpi/p4est_2d_constraintmatrix_03.cc b/tests/mpi/p4est_2d_constraintmatrix_03.cc index 8d147e39dd..a3a7bed44b 100644 --- a/tests/mpi/p4est_2d_constraintmatrix_03.cc +++ b/tests/mpi/p4est_2d_constraintmatrix_03.cc @@ -143,11 +143,9 @@ void test() TrilinosWrappers::MPI::Vector x; x.reinit(owned_set, MPI_COMM_WORLD); x=2.0; - x.compress(); TrilinosWrappers::MPI::Vector x_rel; x_rel.reinit(relevant_set, MPI_COMM_WORLD); - x_rel.compress(); ConstraintMatrix cm(relevant_set); DoFTools::make_hanging_node_constraints (dofh, cm); -- 2.39.5