From 390c65b968b3d474c8c73a470ebc4927aa6522b6 Mon Sep 17 00:00:00 2001 From: Wolfgang Bangerth Date: Sat, 25 May 2013 12:29:14 +0000 Subject: [PATCH] Unify the various implementations of ConstraintMatrix::distribute using the new scheme. This includes making it now also work for objects of type PETScWrappers::MPI::BlockVector. Compared to the previous implementation, it now also works if constraints have inhomogeneities. git-svn-id: https://svn.dealii.org/trunk@29593 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/doc/news/changes.h | 6 + .../deal.II/lac/constraint_matrix.templates.h | 104 ++++++- deal.II/source/lac/constraint_matrix.cc | 273 +----------------- deal.II/source/lac/constraint_matrix.inst.in | 9 +- tests/mpi/petsc_distribute_01_inhomogenous.cc | 159 ++++++++++ .../ncpu_1/cmp/generic | 2 + .../ncpu_10/cmp/generic | 2 + .../ncpu_2/cmp/generic | 2 + .../ncpu_4/cmp/generic | 2 + .../trilinos_distribute_01_inhomogenous.cc | 164 +++++++++++ .../ncpu_1/cmp/generic | 2 + .../ncpu_10/cmp/generic | 2 + .../ncpu_2/cmp/generic | 2 + .../ncpu_4/cmp/generic | 2 + tests/mpi/trilinos_distribute_02.cc | 142 +++++++++ .../trilinos_distribute_02/ncpu_1/cmp/generic | 2 + .../ncpu_10/cmp/generic | 2 + .../trilinos_distribute_02/ncpu_2/cmp/generic | 2 + .../trilinos_distribute_02/ncpu_4/cmp/generic | 2 + 19 files changed, 592 insertions(+), 289 deletions(-) create mode 100644 tests/mpi/petsc_distribute_01_inhomogenous.cc create mode 100644 tests/mpi/petsc_distribute_01_inhomogenous/ncpu_1/cmp/generic create mode 100644 tests/mpi/petsc_distribute_01_inhomogenous/ncpu_10/cmp/generic create mode 100644 tests/mpi/petsc_distribute_01_inhomogenous/ncpu_2/cmp/generic create mode 100644 tests/mpi/petsc_distribute_01_inhomogenous/ncpu_4/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_01_inhomogenous.cc create mode 100644 tests/mpi/trilinos_distribute_01_inhomogenous/ncpu_1/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_01_inhomogenous/ncpu_10/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_01_inhomogenous/ncpu_2/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_01_inhomogenous/ncpu_4/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_02.cc create mode 100644 tests/mpi/trilinos_distribute_02/ncpu_1/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_02/ncpu_10/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_02/ncpu_2/cmp/generic create mode 100644 tests/mpi/trilinos_distribute_02/ncpu_4/cmp/generic diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h index f8fd69f022..a29684ee60 100644 --- a/deal.II/doc/news/changes.h +++ b/deal.II/doc/news/changes.h @@ -130,6 +130,12 @@ this function.

Specific improvements

    +
  1. New: ConstraintMatrix::distribute is now also implemented for +arguments of type PETScWrappers::MPI::BlockVector. +
    +(Wolfgang Bangerth, 2013/05/25) +
  2. +
  3. Fixed: IndexSet::operator== returned the wrong results in some cases.
    diff --git a/deal.II/include/deal.II/lac/constraint_matrix.templates.h b/deal.II/include/deal.II/lac/constraint_matrix.templates.h index 76268dcdcb..037b2f9535 100644 --- a/deal.II/include/deal.II/lac/constraint_matrix.templates.h +++ b/deal.II/include/deal.II/lac/constraint_matrix.templates.h @@ -982,22 +982,98 @@ template void ConstraintMatrix::distribute (VectorType &vec) const { - Assert (sorted == true, ExcMatrixNotClosed()); + Assert (sorted==true, ExcMatrixIsClosed()); + + // if the vector type supports parallel storage and if the + // vector actually does it, we need to be a bit more + // careful about how we do things + if ((vec.supports_distributed_data == true) + && + (vec.locally_owned_elements() != complete_index_set(vec.size()))) + { + const IndexSet vec_owned_elements = vec.locally_owned_elements(); - std::vector::const_iterator next_constraint = lines.begin(); - for (; next_constraint != lines.end(); ++next_constraint) + typedef std::vector::const_iterator constraint_iterator; + +#ifdef DEBUG + { + // the algorithm below pushes contributions c_ij x_j to a constrained + // DoF x_j *from the side of the owner of x_j*, as opposed to pulling + // it from the owner of the target side x_i. this relies on the + // assumption that both target and source know about the constraint + // on x_i. + // + // the disadvantage is that it is incredibly difficult to find out what + // is happening if the assumption is not satisfied. to help debug + // problems of this kind, we do the following in a first step if + // debugging is enabled: + // - every processor who owns an x_j where c_ij!=0 sends a one + // to the owner of x_i to add up + // - the owner of x_i knows how many nonzero entries exist, so can + // verify that the correct number of ones has been added + // - if the sum is not correct, then apparently one of the owners + // of the x_j's did not know to send its one and, consequently, + // will also not know to send the correct c_ij*x_j later on, + // leading to a bug + set_zero (vec); + for (constraint_iterator it = lines.begin(); + it != lines.end(); ++it) + for (unsigned int i=0; ientries.size(); ++i) + if (vec_owned_elements.is_element (it->entries[i].first)) + vec(it->line) += 1; + vec.compress (VectorOperation::add); + + for (constraint_iterator it = lines.begin(); + it != lines.end(); ++it) + if (vec_owned_elements.is_element(it->line)) + Assert (vec(it->line) == it->entries.size(), + ExcIncorrectConstraint(it->line, it->entries.size())); + } +#endif + + // set the values of all constrained DoFs to zero, then add the + // contributions for each constrained DoF of all the ones we + // own locally + set_zero (vec); + + for (constraint_iterator it = lines.begin(); + it != lines.end(); ++it) + for (unsigned int i=0; ientries.size(); ++i) + if (vec_owned_elements.is_element(it->entries[i].first)) + vec(it->line) += it->entries[i].second * + vec(it->entries[i].first); + + // in a final step, add the inhomogeneities to the elements we + // own locally + for (constraint_iterator it = lines.begin(); + it != lines.end(); ++it) + if (vec_owned_elements.is_element(it->line)) + vec(it->line) += it->inhomogeneity; + + // now compress to communicate the entries that we added to + // and that weren't to local processors to the owner + vec.compress (VectorOperation::add); + } + else + // purely sequential vector (either because the type doesn't + // support anything else or because it's completely stored + // locally { - // fill entry in line - // next_constraint.line by adding the - // different contributions - typename VectorType::value_type - new_value = next_constraint->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (static_cast - (vec(next_constraint->entries[i].first)) * - next_constraint->entries[i].second); - Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); - vec(next_constraint->line) = new_value; + std::vector::const_iterator next_constraint = lines.begin(); + for (; next_constraint != lines.end(); ++next_constraint) + { + // fill entry in line + // next_constraint.line by adding the + // different contributions + typename VectorType::value_type + new_value = next_constraint->inhomogeneity; + for (unsigned int i=0; ientries.size(); ++i) + new_value += (static_cast + (vec(next_constraint->entries[i].first)) * + next_constraint->entries[i].second); + Assert(numbers::is_finite(new_value), ExcNumberNotFinite()); + vec(next_constraint->line) = new_value; + } } } diff --git a/deal.II/source/lac/constraint_matrix.cc b/deal.II/source/lac/constraint_matrix.cc index 55f1bdf4d7..3e7c63c384 100644 --- a/deal.II/source/lac/constraint_matrix.cc +++ b/deal.II/source/lac/constraint_matrix.cc @@ -1518,271 +1518,6 @@ void ConstraintMatrix::condense (BlockCompressedSimpleSparsityPattern &sparsity) -#ifdef DEAL_II_WITH_TRILINOS - -// this is a specialization for a parallel (non-block) Trilinos vector. The -// basic idea is to just work on the local range of the vector. But we need -// access to values that the local nodes are constrained to. - -template<> -void -ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const -{ - Assert (sorted==true, ExcMatrixIsClosed()); - - //TODO: not implemented yet, we need to fix LocalRange() first to only - //include "owned" indices. For this we need to keep track of the owned - //indices, because Trilinos doesn't. Use same constructor interface as in - //PETSc with two IndexSets! - AssertThrow (vec.vector_partitioner().IsOneToOne(), - ExcMessage ("Distribute does not work on vectors with overlapping parallel partitioning.")); - - typedef std::vector::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.local_range().first; - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.local_range().second; - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - // Here we search all the indices that we need to have read-access to - - // the local nodes and all the nodes that the constraints indicate. - IndexSet my_indices (vec.size()); - { - const std::pair - local_range = vec.local_range(); - - my_indices.add_range (local_range.first, local_range.second); - - std::set individual_indices; - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - for (unsigned int i=0; ientries.size(); ++i) - if ((it->entries[i].first < local_range.first) - || - (it->entries[i].first >= local_range.second)) - individual_indices.insert (it->entries[i].first); - - my_indices.add_indices (individual_indices.begin(), - individual_indices.end()); - } - -#ifdef DEAL_II_WITH_MPI - const Epetra_MpiComm *mpi_comm - = dynamic_cast(&vec.trilinos_vector().Comm()); - - Assert (mpi_comm != 0, ExcInternalError()); - - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (mpi_comm->Comm(), true)); -#else - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)); -#endif - - // here we import the data - vec_distribute.reinit(vec,false,true); - - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - { - // fill entry in line next_constraint.line by adding the different - // contributions - double new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (vec_distribute(it->entries[i].first) * - it->entries[i].second); - vec(it->line) = new_value; - } - - // some processes might not apply constraints, so we need to explicitly - // state, that the others are doing an insert here: - vec.compress (::dealii::VectorOperation::insert); -} - - - -template<> -void -ConstraintMatrix::distribute (TrilinosWrappers::MPI::BlockVector &vec) const -{ - Assert (sorted==true, ExcMatrixIsClosed()); - - IndexSet my_indices (vec.size()); - for (unsigned int block=0; block::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.block(block).local_range().first - +vec.get_block_indices().block_start(block); - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.block(block).local_range().second - +vec.get_block_indices().block_start(block); - - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - // Here we search all the indices that we need to have read-access to - // - the local nodes and all the nodes that the constraints indicate. - // No caching done yet. would need some more clever data structures - // for doing that. - const std::pair - local_range = vec.block(block).local_range(); - - my_indices.add_range (local_range.first, local_range.second); - - std::set individual_indices; - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - for (unsigned int i=0; ientries.size(); ++i) - if ((it->entries[i].first < local_range.first) - || - (it->entries[i].first >= local_range.second)) - individual_indices.insert (it->entries[i].first); - - my_indices.add_indices (individual_indices.begin(), - individual_indices.end()); - } - -#ifdef DEAL_II_WITH_MPI - const Epetra_MpiComm *mpi_comm - = dynamic_cast(&vec.block(0).trilinos_vector().Comm()); - - Assert (mpi_comm != 0, ExcInternalError()); - - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (mpi_comm->Comm(), true)); -#else - TrilinosWrappers::MPI::Vector vec_distribute - (my_indices.make_trilinos_map (MPI_COMM_WORLD, true)); -#endif - - // here we import the data - vec_distribute.reinit(vec,true); - - for (unsigned int block=0; block::const_iterator constraint_iterator; - ConstraintLine index_comparison; - index_comparison.line = vec.block(block).local_range().first - +vec.get_block_indices().block_start(block); - const constraint_iterator begin_my_constraints = - Utilities::lower_bound (lines.begin(),lines.end(),index_comparison); - - index_comparison.line = vec.block(block).local_range().second - +vec.get_block_indices().block_start(block); - - const constraint_iterator end_my_constraints - = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - - for (constraint_iterator it = begin_my_constraints; - it != end_my_constraints; ++it) - { - // fill entry in line next_constraint.line by adding the - // different contributions - double new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) - new_value += (vec_distribute(it->entries[i].first) * - it->entries[i].second); - vec(it->line) = new_value; - } - vec.block(block).compress(::dealii::VectorOperation::insert); - } -} - -#endif - -#ifdef DEAL_II_WITH_PETSC - -// this is a specialization for a parallel (non-block) PETSc vector. The -// basic idea is to just work on the local range of the vector. But we need -// access to values that the local nodes are constrained to. - -template<> -void -ConstraintMatrix::distribute (PETScWrappers::MPI::Vector &vec) const -{ - Assert (sorted==true, ExcMatrixIsClosed()); - Assert (vec.has_ghost_elements() == false, - ExcMessage ("This operation can only be performed on vectors without ghost elements.")); - - IndexSet vec_owned_elements (vec.size()); - vec_owned_elements.add_range(vec.local_range().first, - vec.local_range().second); - - typedef std::vector::const_iterator constraint_iterator; - -#ifdef DEBUG - if (vec.supports_distributed_data == true) - { - // the algorithm below pushes contributions c_ij x_j to a constrained - // DoF x_j *from the side of the owner of x_j*, as opposed to pulling - // it from the owner of the target side x_i. this relies on the - // assumption that both target and source know about the constraint - // on x_i. - // - // the disadvantage is that it is incredibly difficult to find out what - // is happening if the assumption is not satisfied. to help debug - // problems of this kind, we do the following in a first step if - // debugging is enabled: - // - every processor who owns an x_j where c_ij!=0 sends a one - // to the owner of x_i to add up - // - the owner of x_i knows how many nonzero entries exist, so can - // verify that the correct number of ones has been added - // - if the sum is not correct, then apparently one of the owners - // of the x_j's did not know to send its one and, consequently, - // will also not know to send the correct c_ij*x_j later on, - // leading to a bug - set_zero (vec); - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) - for (unsigned int i=0; ientries.size(); ++i) - if (vec_owned_elements.is_element (it->entries[i].first)) - vec(it->line) += 1; - vec.compress (VectorOperation::add); - - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) - if (vec_owned_elements.is_element(it->line)) - Assert (vec(it->line) == it->entries.size(), - ExcIncorrectConstraint(it->line, it->entries.size())); - } -#endif - - // set the values of all constrained DoFs to zero, then add the - // contributions for each constrained DoF of all the ones we - // own locally - set_zero (vec); - - for (constraint_iterator it = lines.begin(); - it != lines.end(); ++it) - for (unsigned int i=0; ientries.size(); ++i) - if (vec_owned_elements.is_element(it->entries[i].first)) - vec(it->line) += it->entries[i].second * - vec(it->entries[i].first); - - // now compress to communicate the entries that we added to - // and that weren't to local processors to the owner - vec.compress (VectorOperation::add); -} - - -template<> -void -ConstraintMatrix::distribute (PETScWrappers::MPI::BlockVector &/*vec*/) const -{ - Assert (sorted==true, ExcMatrixIsClosed()); - AssertThrow (false, ExcNotImplemented()); -} - -#endif - - - bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const { if (is_constrained(index) == false) @@ -1924,8 +1659,7 @@ ConstraintMatrix::memory_consumption () const VectorType &, \ const FullMatrix &) const; \ template void ConstraintMatrix::distribute(const VectorType &condensed,\ - VectorType &uncondensed) const;\ - template void ConstraintMatrix::distribute(VectorType &vec) const + VectorType &uncondensed) const #define PARALLEL_VECTOR_FUNCTIONS(VectorType) \ template void ConstraintMatrix:: \ @@ -1935,11 +1669,6 @@ ConstraintMatrix::memory_consumption () const const FullMatrix &) const -// TODO: Can PETSc really do all the operations required by the above -// condense/distribute function etc also on distributed vectors? Trilinos -// can't do that - we have to rewrite those functions by hand if we want to -// use them. The key is to use local ranges etc., which still needs to be -// implemented. #ifdef DEAL_II_WITH_PETSC VECTOR_FUNCTIONS(PETScWrappers::MPI::Vector); VECTOR_FUNCTIONS(PETScWrappers::MPI::BlockVector); diff --git a/deal.II/source/lac/constraint_matrix.inst.in b/deal.II/source/lac/constraint_matrix.inst.in index 400f44cd4f..1a69fff4a2 100644 --- a/deal.II/source/lac/constraint_matrix.inst.in +++ b/deal.II/source/lac/constraint_matrix.inst.in @@ -5,7 +5,6 @@ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES) template void ConstraintMatrix::distribute_local_to_global > ( const Vector&, const std::vector &, T &, const FullMatrix&) const; template void ConstraintMatrix::distribute >(const T &, T &) const; - template void ConstraintMatrix::distribute >(T &) const; template void ConstraintMatrix::set_zero >(T &) const; } @@ -17,7 +16,6 @@ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES) template void ConstraintMatrix::distribute_local_to_global > ( const Vector&, const std::vector &, parallel::distributed::T &, const FullMatrix&) const; template void ConstraintMatrix::distribute >(const parallel::distributed::T &, parallel::distributed::T &) const; - template void ConstraintMatrix::distribute >(parallel::distributed::T &) const; template void ConstraintMatrix::set_zero >(parallel::distributed::T &) const; } @@ -29,7 +27,6 @@ for (V: EXTERNAL_SEQUENTIAL_VECTORS) template void ConstraintMatrix::distribute_local_to_global ( const Vector&, const std::vector &, V&, const FullMatrix&) const; template void ConstraintMatrix::distribute(const V&, V&) const; - template void ConstraintMatrix::distribute(V&) const; template void ConstraintMatrix::set_zero(V&) const; } @@ -57,3 +54,9 @@ for (S1 : REAL_SCALARS; S2 : REAL_SCALARS) template void ConstraintMatrix::condense >( const SparseMatrix&, const BlockVector&, SparseMatrix &, BlockVector&) const; } + + +for (Vec : SERIAL_VECTORS) + { + template void ConstraintMatrix::distribute(Vec &) const; + } diff --git a/tests/mpi/petsc_distribute_01_inhomogenous.cc b/tests/mpi/petsc_distribute_01_inhomogenous.cc new file mode 100644 index 0000000000..d1f623311a --- /dev/null +++ b/tests/mpi/petsc_distribute_01_inhomogenous.cc @@ -0,0 +1,159 @@ +//--------------------------------------------------------------------------- +// $Id$ +// Version: $Name$ +// +// Copyright (C) 2009, 2010, 2012, 2013 by the deal.II authors +// +// This file is subject to QPL and may not be distributed +// without copyright and license information. Please refer +// to the file deal.II/doc/license.html for the text and +// further information on this license. +// +//--------------------------------------------------------------------------- + + +// check ConstraintMatrix.distribute() for a petsc vector +// +// like _01, but with an inhomogeneity + +#include "../tests.h" +#include +#include +#include + +#include +#include + + + +void test() +{ + const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + + // create a vector that consists of elements indexed from 0 to n + PETScWrappers::MPI::Vector vec (MPI_COMM_WORLD, 100 * n_processes, 100); + Assert (vec.local_size() == 100, ExcInternalError()); + Assert (vec.local_range().first == 100*myid, ExcInternalError()); + Assert (vec.local_range().second == 100*myid+100, ExcInternalError()); + for (unsigned int i=vec.local_range().first; i (100*myid-50, 0), + std::min (100*myid+150, vec.size())); + ConstraintMatrix cm (locally_relevant_range); + + // add constraints that constrain an element in the middle of the + // local range of each processor against an element outside, both in + // the ghost range before and after + // + // note that we tell each processor about all constraints, but most + // of them will throw away this information since it is not for a + // DoF inside the locally relevant range + for (unsigned int p=0; p +#include +#include + +#include +#include + + + +void test() +{ + const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + + // create a vector that consists of elements indexed from 0 to n + TrilinosWrappers::MPI::Vector vec; + { + IndexSet is (100*n_processes); + is.add_range (100*myid, 100*myid+100); + vec.reinit (is, MPI_COMM_WORLD); + } + Assert (vec.local_size() == 100, ExcInternalError()); + Assert (vec.local_range().first == 100*myid, ExcInternalError()); + Assert (vec.local_range().second == 100*myid+100, ExcInternalError()); + for (unsigned int i=vec.local_range().first; i (100*myid-50, 0), + std::min (100*myid+150, vec.size())); + ConstraintMatrix cm (locally_relevant_range); + + // add constraints that constrain an element in the middle of the + // local range of each processor against an element outside, both in + // the ghost range before and after + // + // note that we tell each processor about all constraints, but most + // of them will throw away this information since it is not for a + // DoF inside the locally relevant range + for (unsigned int p=0; p +#include +#include + +#include +#include + + + +void test() +{ + const unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + const unsigned int n_processes = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD); + + // create a vector that consists of elements indexed from 0 to n + PETScWrappers::MPI::Vector vec (MPI_COMM_WORLD, 100 * n_processes, 100); + Assert (vec.local_size() == 100, ExcInternalError()); + Assert (vec.local_range().first == 100*myid, ExcInternalError()); + Assert (vec.local_range().second == 100*myid+100, ExcInternalError()); + for (unsigned int i=vec.local_range().first; i 1) + Assert (exception_caught == true, ExcInternalError()); + + if (myid == 0) + deallog << "OK" << std::endl; +} + + +int main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + + + deallog.push(Utilities::int_to_string(myid)); + + if (myid == 0) + { + std::ofstream logfile(output_file_for_mpi("petsc_distribute_02").c_str()); + deallog.attach(logfile); + deallog.depth_console(0); + deallog.threshold_double(1.e-10); + + test(); + } + else + test(); + +} diff --git a/tests/mpi/trilinos_distribute_02/ncpu_1/cmp/generic b/tests/mpi/trilinos_distribute_02/ncpu_1/cmp/generic new file mode 100644 index 0000000000..be8d055f86 --- /dev/null +++ b/tests/mpi/trilinos_distribute_02/ncpu_1/cmp/generic @@ -0,0 +1,2 @@ + +DEAL:0::OK diff --git a/tests/mpi/trilinos_distribute_02/ncpu_10/cmp/generic b/tests/mpi/trilinos_distribute_02/ncpu_10/cmp/generic new file mode 100644 index 0000000000..be8d055f86 --- /dev/null +++ b/tests/mpi/trilinos_distribute_02/ncpu_10/cmp/generic @@ -0,0 +1,2 @@ + +DEAL:0::OK diff --git a/tests/mpi/trilinos_distribute_02/ncpu_2/cmp/generic b/tests/mpi/trilinos_distribute_02/ncpu_2/cmp/generic new file mode 100644 index 0000000000..be8d055f86 --- /dev/null +++ b/tests/mpi/trilinos_distribute_02/ncpu_2/cmp/generic @@ -0,0 +1,2 @@ + +DEAL:0::OK diff --git a/tests/mpi/trilinos_distribute_02/ncpu_4/cmp/generic b/tests/mpi/trilinos_distribute_02/ncpu_4/cmp/generic new file mode 100644 index 0000000000..be8d055f86 --- /dev/null +++ b/tests/mpi/trilinos_distribute_02/ncpu_4/cmp/generic @@ -0,0 +1,2 @@ + +DEAL:0::OK -- 2.39.5