*/
explicit Vector (const MPI_Comm &communicator,
const IndexSet &local,
- const IndexSet &ghost);
+ const IndexSet &ghost) DEAL_II_DEPRECATED;
+
+ /**
+ * Constructs a new parallel ghosted PETSc
+ * vector from an Indexset. Note that
+ * @p local must be contiguous and
+ * the global size of the vector is
+ * determined by local.size(). The
+ * global indices in @p ghost are
+ * supplied as ghost indices that can
+ * also be read locally.
+ *
+ * Note that the @p ghost IndexSet
+ * may be empty and that any indices
+ * already contained in @p local are
+ * ignored during construction. That
+ * way, the ghost parameter can equal
+ * the set of locally relevant
+ * degrees of freedom, see step-32.
+ *
+ * @note This operation always creates a ghosted
+ * vector.
+ */
+ explicit Vector (const IndexSet &local,
+ const IndexSet &ghost,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Constructs a new parallel PETSc
* vector from an Indexset. This creates a non
* ghosted vector.
*/
- explicit Vector (const MPI_Comm &communicator,
- const IndexSet &local);
+ explicit Vector (const MPI_Comm &communicator,
+ const IndexSet &local) DEAL_II_DEPRECATED;
+ /**
+ * Constructs a new parallel PETSc
+ * vector from an Indexset. This creates a non
+ * ghosted vector.
+ */
+ explicit Vector (const IndexSet &local,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Copy the given vector. Resize the
*/
void reinit (const MPI_Comm &communicator,
const IndexSet &local,
- const IndexSet &ghost);
+ const IndexSet &ghost) DEAL_II_DEPRECATED;
+ /**
+ * Reinit as a vector without ghost elements. See
+ * constructor with same signature
+ * for more detais.
+ */
+ void reinit (const IndexSet &local,
+ const IndexSet &ghost,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Reinit as a vector without ghost elements. See
* for more detais.
*/
void reinit (const MPI_Comm &communicator,
- const IndexSet &local);
+ const IndexSet &local) DEAL_II_DEPRECATED;
+ /**
+ * Reinit as a vector without ghost elements. See
+ * constructor with same signature
+ * for more detais.
+ */
+ void reinit (const IndexSet &local,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Return a reference to the MPI
Vector (const IndexSet ¶llel_partitioning,
const MPI_Comm &communicator = MPI_COMM_WORLD);
- Vector (const MPI_Comm &communicator,
- const IndexSet &local,
- const IndexSet &ghost=IndexSet(0));
+ /**
+ * Creates a ghosted parallel vector.
+ */
+ Vector (const IndexSet &local,
+ const IndexSet &ghost,
+ const MPI_Comm &communicator = MPI_COMM_WORLD);
/**
* Copy constructor from the
Vector::create_vector(local.size(), local.n_elements(), ghost_set);
}
+ Vector::Vector (const IndexSet &local,
+ const IndexSet &ghost,
+ const MPI_Comm &communicator)
+ :
+ communicator (communicator)
+ {
+ Assert(local.is_contiguous(), ExcNotImplemented());
+
+ IndexSet ghost_set = ghost;
+ ghost_set.subtract_set(local);
+
+ Vector::create_vector(local.size(), local.n_elements(), ghost_set);
+ }
+
+
+ Vector::Vector (const IndexSet &local,
+ const MPI_Comm &communicator)
+ :
+ communicator (communicator)
+ {
+ Assert(local.is_contiguous(), ExcNotImplemented());
+ Vector::create_vector(local.size(), local.n_elements());
+ }
+
+
Vector::Vector (const MPI_Comm &communicator,
const IndexSet &local)
:
Vector::reinit (const MPI_Comm &comm,
const IndexSet &local,
const IndexSet &ghost)
+ {
+ reinit(local, ghost, comm);
+ }
+
+ void
+ Vector::reinit (const IndexSet &local,
+ const IndexSet &ghost,
+ const MPI_Comm &comm)
{
communicator = comm;
void
Vector::reinit (const MPI_Comm &comm,
- const IndexSet &local)
+ const IndexSet &local)
+ {
+ reinit(local, comm);
+ }
+
+ void
+ Vector::reinit (const IndexSet &local,
+ const MPI_Comm &comm)
{
communicator = comm;
reinit (v, false, true);
}
- Vector::Vector (const MPI_Comm &communicator,
- const IndexSet &local,
- const IndexSet &ghost)
+ Vector::Vector (const IndexSet &local,
+ const IndexSet &ghost,
+ const MPI_Comm &communicator)
:
VectorBase()
{
Vector(const IndexSet &local, const IndexSet &ghost, const MPI_Comm &comm=MPI_COMM_WORLD)
{}
-
- Vector(const MPI_Comm &comm, const IndexSet local)
+
+ void reinit(const IndexSet local, const MPI_Comm &comm=MPI_COMM_WORLD)
{}
-
- Vector(const MPI_Comm &commm, const IndexSet &local, const IndexSet &ghost)
+
+ void reinit(const IndexSet local, const IndexSet &ghost, const MPI_Comm &comm=MPI_COMM_WORLD)
{}
void compress(VectorOperation::values op)
--- /dev/null
+//---------------------------------------------------------------------------
+// $Id: ghost_01.cc 28440 2013-02-17 14:35:08Z heister $
+// Version: $Name$
+//
+// Copyright (C) 2004, 2005, 2010 by the deal.II authors
+//
+// This file is subject to QPL and may not be distributed
+// without copyright and license information. Please refer
+// to the file deal.II/doc/license.html for the text and
+// further information on this license.
+//
+//---------------------------------------------------------------------------
+
+
+// creation and size of LA::MPI::Vector
+
+#include "../tests.h"
+#include <deal.II/lac/abstract_linear_algebra.h>
+#include <deal.II/base/index_set.h>
+#include <fstream>
+#include <iostream>
+#include <iomanip>
+#include <vector>
+
+#include "gla.h"
+
+template <class LA>
+void test ()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+
+ if (myid==0)
+ deallog << "numproc=" << numproc << std::endl;
+
+ // each processor owns 2 indices and all
+ // are ghosting Element 1 (the second)
+
+ IndexSet local_active(numproc*2);
+ local_active.add_range(myid*2,myid*2+2);
+ IndexSet local_relevant(numproc*2);
+ local_relevant.add_range(1,2);
+
+
+ IndexSet something(100);
+ something.add_range(myid,myid+1);
+ if (myid==numproc-1)
+ something.add_range(numproc,100);
+
+
+ {
+ //implicit communicator:
+ typename LA::MPI::Vector v1(local_active);
+ Assert(!v1.has_ghost_elements(), ExcInternalError());
+ Assert(v1.size()==numproc*2, ExcInternalError());
+
+ v1.reinit(something);
+ Assert(!v1.has_ghost_elements(), ExcInternalError());
+ Assert(v1.size()==100, ExcInternalError());
+
+ v1.reinit(local_active, local_relevant);
+ Assert(v1.has_ghost_elements(), ExcInternalError());
+ Assert(v1.size()==numproc*2, ExcInternalError());
+
+ v1.reinit(something, MPI_COMM_WORLD);
+ Assert(!v1.has_ghost_elements(), ExcInternalError());
+ Assert(v1.size()==100, ExcInternalError());
+
+ typename LA::MPI::Vector v2(local_active, local_relevant);
+ Assert(v2.has_ghost_elements(), ExcInternalError());
+ Assert(v2.size()==numproc*2, ExcInternalError());
+
+ v2.reinit(local_active,MPI_COMM_WORLD);
+ Assert(!v2.has_ghost_elements(), ExcInternalError());
+ Assert(v2.size()==numproc*2, ExcInternalError());
+
+ v2.reinit(local_active, local_relevant, MPI_COMM_WORLD);
+ Assert(v2.has_ghost_elements(), ExcInternalError());
+ Assert(v2.size()==numproc*2, ExcInternalError());
+
+ }
+
+ // done
+ if (myid==0)
+ deallog << "OK" << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv);
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+
+ deallog.push(Utilities::int_to_string(myid));
+
+ if (myid == 0)
+ {
+ std::ofstream logfile(output_file_for_mpi("vec_00").c_str());
+ deallog.attach(logfile);
+ deallog << std::setprecision(4);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ {
+ deallog.push("PETSc");
+ test<LA_PETSc>();
+ deallog.pop();
+ deallog.push("Trilinos");
+ test<LA_Trilinos>();
+ deallog.pop();
+ }
+
+ }
+ else
+ {
+ deallog.push("PETSc");
+ test<LA_PETSc>();
+ deallog.pop();
+ deallog.push("Trilinos");
+ test<LA_Trilinos>();
+ deallog.pop();
+ }
+
+ if (myid==9999)
+ test<LA_Dummy>();
+
+
+}
--- /dev/null
+
+DEAL:0:PETSc::numproc=10
+DEAL:0:PETSc::0:0.000
+DEAL:0:PETSc::1:2.000
+DEAL:0:PETSc::ghost: 2.000
+DEAL:0:PETSc::OK
+DEAL:0:Trilinos::numproc=10
+DEAL:0:Trilinos::0:0.000
+DEAL:0:Trilinos::1:2.000
+DEAL:0:Trilinos::ghost: 2.000
+DEAL:0:Trilinos::OK
--- /dev/null
+
+DEAL:0:PETSc::numproc=4
+DEAL:0:PETSc::0:0.000
+DEAL:0:PETSc::1:2.000
+DEAL:0:PETSc::ghost: 2.000
+DEAL:0:PETSc::OK
+DEAL:0:Trilinos::numproc=4
+DEAL:0:Trilinos::0:0.000
+DEAL:0:Trilinos::1:2.000
+DEAL:0:Trilinos::ghost: 2.000
+DEAL:0:Trilinos::OK
IndexSet local_relevant(numproc*2);
local_relevant.add_range(1,2);
- typename LA::MPI::Vector vb(MPI_COMM_WORLD, local_active);
- typename LA::MPI::Vector v(MPI_COMM_WORLD, local_active, local_relevant);
+ {
+ //implicit communicator:
+ typename LA::MPI::Vector v1(local_active);
+ typename LA::MPI::Vector v2(local_active, local_relevant);
+ Assert(!v1.has_ghost_elements(), ExcInternalError());
+ Assert(v2.has_ghost_elements(), ExcInternalError());
+ }
+
+ typename LA::MPI::Vector vb(local_active, MPI_COMM_WORLD);
+ typename LA::MPI::Vector v(local_active, local_relevant, MPI_COMM_WORLD);
// set local values
vb(myid*2)=myid*2.0;
Assert(v.has_ghost_elements(), ExcInternalError());
// check local values
- if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (myid==0)
{
deallog << myid*2 << ":" << v(myid*2) << std::endl;
deallog << myid*2+1 << ":" << v(myid*2+1) << std::endl;
// check ghost values
- if (Utilities::MPI::this_mpi_process (MPI_COMM_WORLD) == 0)
+ if (myid==0)
deallog << "ghost: " << v(1) << std::endl;
Assert(v(1) == 2.0, ExcInternalError());
IndexSet local_relevant(numproc*2);
local_relevant.add_range(1,2);
- typename LA::MPI::Vector vb(MPI_COMM_WORLD, local_active);
- typename LA::MPI::Vector v(MPI_COMM_WORLD, local_active, local_relevant);
- typename LA::MPI::Vector v2(MPI_COMM_WORLD, local_active, local_relevant);
+ typename LA::MPI::Vector vb(local_active);
+ typename LA::MPI::Vector v(local_active, local_relevant);
+ typename LA::MPI::Vector v2(local_active, local_relevant);
vb = 1.0;
v2 = vb;