last_action = Zero;
vector.reset (new Epetra_FEVector(*v.vector));
has_ghosts = v.has_ghosts;
+ owned_elements = v.locally_owned_elements();
}
// initialize a minimal, valid object and swap
last_action = Zero;
vector.reset(new Epetra_FEVector(Epetra_Map(0,0,0,Utilities::Trilinos::comm_self())));
+ owned_elements.clear();
swap(v);
}
:
VectorBase()
{
- IndexSet parallel_partitioning = local;
- parallel_partitioning.add_indices(ghost);
- reinit(parallel_partitioning, communicator);
+ reinit(local, ghost, communicator, false);
}
vector.reset (new Epetra_FEVector(input_map));
has_ghosts = vector->Map().UniqueGIDs()==false;
+
+ {
+ owned_elements.clear();
+ owned_elements.set_size(size());
+
+ // easy case: local range is contiguous
+ if (vector->Map().LinearMap())
+ {
+ const std::pair<size_type, size_type> x = local_range();
+ owned_elements.add_range (x.first, x.second);
+ }
+ else if (vector->Map().NumMyElements() > 0)
+ {
+ const size_type n_indices = vector->Map().NumMyElements();
+#ifndef DEAL_II_WITH_64BIT_INDICES
+ unsigned int *vector_indices = (unsigned int *)vector->Map().MyGlobalElements();
+#else
+ size_type *vector_indices = (size_type *)vector->Map().MyGlobalElements64();
+#endif
+ owned_elements.add_indices(vector_indices, vector_indices+n_indices);
+ owned_elements.compress();
+ }
+ }
+
last_action = Zero;
}
void
Vector::reinit (const IndexSet ¶llel_partitioner,
const MPI_Comm &communicator,
- const bool omit_zeroing_entries)
+ const bool /*omit_zeroing_entries*/)
{
nonlocal_vector.reset();
Epetra_Map map = parallel_partitioner.make_trilinos_map (communicator,
true);
- reinit (map, omit_zeroing_entries);
+
+ vector.reset (new Epetra_FEVector(map));
+
+ has_ghosts = vector->Map().UniqueGIDs()==false;
+
+ owned_elements = parallel_partitioner;
+
+ last_action = Zero;
}
vector.reset (new Epetra_FEVector(v.vector->Map()));
has_ghosts = v.has_ghosts;
last_action = Zero;
+ owned_elements = v.locally_owned_elements();
}
else if (omit_zeroing_entries == false)
{
const bool import_data)
{
nonlocal_vector.reset();
+ owned_elements.clear();
// In case we do not allow to have different maps, this call means that
// we have to reset the vector. So clear the vector, initialize our map
for (size_type block=0; block<v.n_blocks(); ++block)
n_elements += v.block(block).local_size();
std::vector<TrilinosWrappers::types::int_type> global_ids (n_elements, -1);
+ size_type max_size = 0;
for (size_type block=0; block<v.n_blocks(); ++block)
{
TrilinosWrappers::types::int_type *glob_elements =
for (size_type i=0; i<v.block(block).local_size(); ++i)
global_ids[added_elements++] = glob_elements[i] + block_offset;
block_offset += v.block(block).size();
+ max_size = std::max(max_size, v.block(block).size());
}
+ owned_elements.set_size(max_size);
Assert (n_elements == added_elements, ExcInternalError());
Epetra_Map new_map (v.size(), n_elements, &global_ids[0], 0,
{
v.block(block).trilinos_vector().ExtractCopy (entries, 0);
entries += v.block(block).local_size();
+ owned_elements.add_indices(v.block(block).locally_owned_elements());
}
if (import_data == true)
last_action = Insert;
}
-
}
const bool vector_writable)
{
nonlocal_vector.reset();
+ owned_elements = locally_owned_entries;
if (vector_writable == false)
{
- IndexSet parallel_partitioning = locally_owned_entries;
- parallel_partitioning.add_indices(ghost_entries);
- reinit(parallel_partitioning, communicator);
+ IndexSet parallel_partitioner = locally_owned_entries;
+ parallel_partitioner.add_indices(ghost_entries);
+ Epetra_Map map = parallel_partitioner.make_trilinos_map (communicator,
+ true);
+ vector.reset (new Epetra_FEVector(map));
+ has_ghosts = vector->Map().UniqueGIDs()==false;
+
+ last_action = Zero;
}
else
{
vector.reset (new Epetra_FEVector(*v.vector));
last_action = Zero;
has_ghosts = v.has_ghosts;
+ owned_elements = v.locally_owned_elements();
}
if (v.nonlocal_vector.get() != 0)
Vector::Vector (const size_type n)
{
- last_action = Zero;
- Epetra_LocalMap map ((TrilinosWrappers::types::int_type)n, 0, Utilities::Trilinos::comm_self());
- vector.reset (new Epetra_FEVector (map));
+ reinit(n);
}
Vector::Vector (const Epetra_Map &input_map)
{
- last_action = Zero;
- Epetra_LocalMap map (n_global_elements(input_map),
- input_map.IndexBase(),
- input_map.Comm());
- vector.reset (new Epetra_FEVector(map));
+ reinit(input_map);
}
Vector::Vector (const IndexSet &partitioning,
const MPI_Comm &communicator)
{
- last_action = Zero;
- Epetra_LocalMap map (static_cast<TrilinosWrappers::types::int_type>(partitioning.size()),
- 0,
-#ifdef DEAL_II_WITH_MPI
- Epetra_MpiComm(communicator));
-#else
- Epetra_SerialComm());
- (void)communicator;
-#endif
- vector.reset (new Epetra_FEVector(map));
+ reinit (partitioning, communicator);
}
}
else
reinit (v, false, true);
-
}
Vector::reinit (const size_type n,
const bool /*omit_zeroing_entries*/)
{
+ owned_elements.clear();
+ owned_elements.set_size(n);
+ owned_elements.add_range(0, n);
Epetra_LocalMap map ((TrilinosWrappers::types::int_type)n, 0,
Utilities::Trilinos::comm_self());
vector.reset (new Epetra_FEVector (map));
Epetra_LocalMap map (n_global_elements(input_map),
input_map.IndexBase(),
input_map.Comm());
- vector.reset (new Epetra_FEVector (map));
+ vector.reset (new Epetra_FEVector(map));
+ owned_elements.set_size(n_global_elements(input_map));
+ owned_elements.add_range(0, n_global_elements(input_map));
last_action = Zero;
}
vector.reset (new Epetra_FEVector(map));
last_action = Zero;
+ owned_elements = partitioning;
}
v.vector->Map().IndexBase(),
v.vector->Comm());
vector.reset (new Epetra_FEVector(map));
+ owned_elements = v.locally_owned_elements();
}
else if (omit_zeroing_entries)
{
v.vector->Map().IndexBase(),
v.vector->Comm());
vector.reset (new Epetra_FEVector(map));
+ owned_elements = v.locally_owned_elements();
}
const int ierr = vector->Update(1.0, *v.vector, 0.0);
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+// test TrilinosVector::locally_owned_elements
+
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/trilinos_vector.h>
+
+#include <fstream>
+#include <sstream>
+
+
+void test ()
+{
+ const int n_proc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
+ const int my_id = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD);
+
+ //All processes should own 10 entries
+ const int entries_per_process = 10;
+
+ IndexSet locally_owned(entries_per_process*n_proc);
+ const int begin_index = my_id*entries_per_process;
+ const int end_index = (my_id+1)*entries_per_process;
+ locally_owned.add_range(begin_index, end_index);
+
+ IndexSet locally_relevant(entries_per_process*n_proc);
+ const int local_begin = std::max(0, begin_index-entries_per_process/2);
+ const int local_end = entries_per_process*n_proc;
+ locally_relevant.add_range (local_begin, local_end);
+
+ TrilinosWrappers::MPI::Vector ghosted, distributed;
+ distributed.reinit(locally_owned, MPI_COMM_WORLD);
+ ghosted.reinit (locally_owned, locally_relevant, MPI_COMM_WORLD);
+
+ IndexSet locally_owned_elements_distributed = distributed.locally_owned_elements();
+ IndexSet locally_owned_elements_ghosted = ghosted.locally_owned_elements();
+
+ const types::global_dof_index local_range_begin_ghosted = ghosted.local_range().first;
+ const types::global_dof_index local_range_end_ghosted = ghosted.local_range().second;
+
+ const types::global_dof_index local_range_begin_distributed = distributed.local_range().first;
+ const types::global_dof_index local_range_end_distributed = distributed.local_range().second;
+
+ deallog << "locally_owned_elements_distributed: ";
+ locally_owned_elements_distributed.print(deallog);
+ deallog << "locally_owned_elements_ghosted: ";
+ locally_owned_elements_ghosted.print(deallog);
+ deallog << "local_range_begin_ghosted: "
+ << local_range_begin_ghosted << std::endl;
+ deallog << "local_range_end_ghosted: "
+ << local_range_end_ghosted << std::endl;
+ deallog << "local_range_begin_distributed: "
+ << local_range_begin_distributed << std::endl;
+ deallog << "local_range_end_distributed: "
+ << local_range_end_distributed << std::endl;
+
+ AssertThrow (locally_owned_elements_distributed == locally_owned_elements_ghosted,
+ ExcInternalError());
+
+ deallog << "OK" << std::endl;
+}
+
+
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+
+ MPILogInitAll log;
+
+ test();
+}