* This copy constructor is only available if Trilinos was detected during
* configuration time.
*
- * Note that due to the communication model used in MPI, this operation can
- * only succeed if all processes do it at the same time. This means that it
- * is not possible for only one process to obtain a copy of a parallel
- * vector while the other jobs do something else. This call will rather
- * result in a copy of the vector on all processors.
+ * @note Due to the communication model used in MPI, this operation can
+ * only succeed if all processes that have knowledge of @p v
+ * (i.e. those given by <code>v.get_mpi_communicator()</code>) do it at
+ * the same time. This means that unless you use a split MPI communicator
+ * then it is not normally possible for only one or a subset of processes
+ * to obtain a copy of a parallel vector while the other jobs do something
+ * else. This call will therefore result in a copy of the vector on all
+ * processors that share @p v.
*/
explicit Vector (const TrilinosWrappers::MPI::Vector &v);
#endif
* operator is only available if Trilinos was detected during configuration
* time.
*
- * Note that due to the communication model used in MPI, this operation can
- * only succeed if all processes do it at the same time. I.e., it is not
- * possible for only one process to obtain a copy of a parallel vector while
- * the other jobs do something else.
+ * @note Due to the communication model used in MPI, this operation can
+ * only succeed if all processes that have knowledge of @p v
+ * (i.e. those given by <code>v.get_mpi_communicator()</code>) do it at
+ * the same time. This means that unless you use a split MPI communicator
+ * then it is not normally possible for only one or a subset of processes
+ * to obtain a copy of a parallel vector while the other jobs do something
+ * else. This call will therefore result in a copy of the vector on all
+ * processors that share @p v.
*/
Vector<Number> &
operator= (const TrilinosWrappers::MPI::Vector &v);
allocate();
// Copy the distributed vector to
- // a local one at all
- // processors. TODO: There could
+ // a local one at all processors
+ // that know about the original vector.
+ // TODO: There could
// be a better solution than
// this, but it has not yet been
// found.
TrilinosWrappers::MPI::Vector localized_vector;
- localized_vector.reinit(complete_index_set(vec_size));
+ localized_vector.reinit(complete_index_set(vec_size), v.get_mpi_communicator());
localized_vector.reinit (v, false, true);
// get a representation of the vector
// then call the other =
// operator.
TrilinosWrappers::MPI::Vector localized_vector;
- localized_vector.reinit(complete_index_set(v.size()));
+ localized_vector.reinit(complete_index_set(v.size()), v.get_mpi_communicator());
localized_vector.reinit(v, false, true);
if (v.size() != vec_size)
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// Test that the creation of a local copy of a vector does not lead
+// to a deadlock if not performed on all MPI processes
+
+#include "../tests.h"
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/vector.h>
+
+
+int main (int argc, char **argv)
+{
+ initlog();
+
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads());
+ MPI_Comm mpi_communicator = MPI_COMM_WORLD;
+ const unsigned int this_mpi_process = Utilities::MPI::this_mpi_process(mpi_communicator);
+ const unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(mpi_communicator);
+
+ MPI_Comm serial_communicator;
+ const unsigned int colour = this_mpi_process;
+ const unsigned int key = this_mpi_process;
+ if (n_mpi_processes > 1)
+ MPI_Comm_split(mpi_communicator, colour, key, &serial_communicator);
+ else
+ serial_communicator = mpi_communicator;
+
+ if (this_mpi_process == 0)
+ {
+
+ const TrilinosWrappers::MPI::Vector tril_vec (complete_index_set(10), serial_communicator);
+
+ // Check copy constructor
+ const Vector<double> local_vector_1 (tril_vec);
+
+ // Check equality operator
+ Vector<double> local_vector_2;
+ local_vector_2 = tril_vec;
+ }
+
+ deallog << "OK" << std::endl;
+}
--- /dev/null
+
+DEAL::OK
--- /dev/null
+
+DEAL::OK