From 2e77b89554fd8d8fc38e689da67af5449719706f Mon Sep 17 00:00:00 2001 From: Jean-Paul Pelteret Date: Wed, 24 Jan 2018 21:55:59 +0100 Subject: [PATCH] Allow copy of Trilinos::MPI::Vector to dealii::Vector on subset of procs This commit adds a flag that reduces the strictness of the scenario in which the constructor to dealii::Vector that takes a Trilinos::MPI::Vector can be called. Previously it was necessary that this be called on all MPI processors (MPI_COMM_WORLD). By setting the additional flag to be true, this function need only be called by the processes (maybe a subset of MPI_COMM_WORLD) that own this vector. --- include/deal.II/lac/vector.h | 25 +++++--- include/deal.II/lac/vector.templates.h | 9 +-- tests/trilinos/vector_local_copy_01.cc | 57 +++++++++++++++++++ .../vector_local_copy_01.mpirun=1.output | 2 + .../vector_local_copy_01.mpirun=2.output | 2 + 5 files changed, 82 insertions(+), 13 deletions(-) create mode 100644 tests/trilinos/vector_local_copy_01.cc create mode 100644 tests/trilinos/vector_local_copy_01.mpirun=1.output create mode 100644 tests/trilinos/vector_local_copy_01.mpirun=2.output diff --git a/include/deal.II/lac/vector.h b/include/deal.II/lac/vector.h index a8d9f746e1..d777b56c48 100644 --- a/include/deal.II/lac/vector.h +++ b/include/deal.II/lac/vector.h @@ -190,11 +190,14 @@ public: * This copy constructor is only available if Trilinos was detected during * configuration time. * - * Note that due to the communication model used in MPI, this operation can - * only succeed if all processes do it at the same time. This means that it - * is not possible for only one process to obtain a copy of a parallel - * vector while the other jobs do something else. This call will rather - * result in a copy of the vector on all processors. + * @note Due to the communication model used in MPI, this operation can + * only succeed if all processes that have knowledge of @p v + * (i.e. those given by v.get_mpi_communicator()) do it at + * the same time. This means that unless you use a split MPI communicator + * then it is not normally possible for only one or a subset of processes + * to obtain a copy of a parallel vector while the other jobs do something + * else. This call will therefore result in a copy of the vector on all + * processors that share @p v. */ explicit Vector (const TrilinosWrappers::MPI::Vector &v); #endif @@ -377,10 +380,14 @@ public: * operator is only available if Trilinos was detected during configuration * time. * - * Note that due to the communication model used in MPI, this operation can - * only succeed if all processes do it at the same time. I.e., it is not - * possible for only one process to obtain a copy of a parallel vector while - * the other jobs do something else. + * @note Due to the communication model used in MPI, this operation can + * only succeed if all processes that have knowledge of @p v + * (i.e. those given by v.get_mpi_communicator()) do it at + * the same time. This means that unless you use a split MPI communicator + * then it is not normally possible for only one or a subset of processes + * to obtain a copy of a parallel vector while the other jobs do something + * else. This call will therefore result in a copy of the vector on all + * processors that share @p v. */ Vector & operator= (const TrilinosWrappers::MPI::Vector &v); diff --git a/include/deal.II/lac/vector.templates.h b/include/deal.II/lac/vector.templates.h index 04b03b18d9..eab1cf9b49 100644 --- a/include/deal.II/lac/vector.templates.h +++ b/include/deal.II/lac/vector.templates.h @@ -170,13 +170,14 @@ Vector::Vector (const TrilinosWrappers::MPI::Vector &v) allocate(); // Copy the distributed vector to - // a local one at all - // processors. TODO: There could + // a local one at all processors + // that know about the original vector. + // TODO: There could // be a better solution than // this, but it has not yet been // found. TrilinosWrappers::MPI::Vector localized_vector; - localized_vector.reinit(complete_index_set(vec_size)); + localized_vector.reinit(complete_index_set(vec_size), v.get_mpi_communicator()); localized_vector.reinit (v, false, true); // get a representation of the vector @@ -829,7 +830,7 @@ Vector::operator= (const TrilinosWrappers::MPI::Vector &v) // then call the other = // operator. TrilinosWrappers::MPI::Vector localized_vector; - localized_vector.reinit(complete_index_set(v.size())); + localized_vector.reinit(complete_index_set(v.size()), v.get_mpi_communicator()); localized_vector.reinit(v, false, true); if (v.size() != vec_size) diff --git a/tests/trilinos/vector_local_copy_01.cc b/tests/trilinos/vector_local_copy_01.cc new file mode 100644 index 0000000000..bcd77103b3 --- /dev/null +++ b/tests/trilinos/vector_local_copy_01.cc @@ -0,0 +1,57 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// Test that the creation of a local copy of a vector does not lead +// to a deadlock if not performed on all MPI processes + +#include "../tests.h" +#include +#include + + +int main (int argc, char **argv) +{ + initlog(); + + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads()); + MPI_Comm mpi_communicator = MPI_COMM_WORLD; + const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator); + const unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(mpi_communicator); + + MPI_Comm serial_communicator; + const unsigned int colour = this_mpi_process; + const unsigned int key = this_mpi_process; + if (n_mpi_processes > 1) + MPI_Comm_split(mpi_communicator, colour, key, &serial_communicator); + else + serial_communicator = mpi_communicator; + + if (this_mpi_process == 0) + { + + const TrilinosWrappers::MPI::Vector tril_vec (complete_index_set(10), serial_communicator); + + // Check copy constructor + const Vector local_vector_1 (tril_vec); + + // Check equality operator + Vector local_vector_2; + local_vector_2 = tril_vec; + } + + deallog << "OK" << std::endl; +} diff --git a/tests/trilinos/vector_local_copy_01.mpirun=1.output b/tests/trilinos/vector_local_copy_01.mpirun=1.output new file mode 100644 index 0000000000..0fd8fc12f0 --- /dev/null +++ b/tests/trilinos/vector_local_copy_01.mpirun=1.output @@ -0,0 +1,2 @@ + +DEAL::OK diff --git a/tests/trilinos/vector_local_copy_01.mpirun=2.output b/tests/trilinos/vector_local_copy_01.mpirun=2.output new file mode 100644 index 0000000000..0fd8fc12f0 --- /dev/null +++ b/tests/trilinos/vector_local_copy_01.mpirun=2.output @@ -0,0 +1,2 @@ + +DEAL::OK -- 2.39.5