parallel::distributed::Vector<number> &output,
const internal::bool2type<false> /*is_block_vector*/)
{
+ // TODO: the in vector might already have all elements. need to find a
+ // way to efficiently avoid the copy then
const_cast<parallel::distributed::Vector<number>&>(vec).zero_out_ghosts();
output.reinit (locally_owned_elements, needed_elements, vec.get_mpi_communicator());
output = vec;
void
ConstraintMatrix::distribute (VectorType &vec) const
{
- Assert (sorted==true, ExcMatrixIsClosed());
-
- // if the vector type supports parallel storage and if the
- // vector actually does store only part of the vector, distributing
- // is slightly more complicated. we can skip the complicated part
- // if the local processor stores the *entire* vector and pretend
- // that this is a sequential vector but we need to pay attention that
- // in that case the other processors don't actually do anything (in
- // particular that they do not call compress because the processor
- // that owns everything doesn't do so either); this is the first if
- // case here, the second is for the complicated case, the last else
- // is for the simple case (sequential vector or distributed vector
- // where the current processor stores everything)
- if ((vec.supports_distributed_data == true)
- &&
- (vec.locally_owned_elements().n_elements() == 0))
- {
- // do nothing, in particular don't call compress()
- }
- else if ((vec.supports_distributed_data == true)
- &&
- (vec.locally_owned_elements() != complete_index_set(vec.size())))
+ Assert (sorted==true, ExcMatrixNotClosed());
+
+ // if the vector type supports parallel storage and if the vector actually
+ // does store only part of the vector, distributing is slightly more
+ // complicated. we might be able to skip the complicated part if one
+ // processor owns everything and pretend that this is a sequential vector,
+ // but it is difficult for the other processors to know whether they should
+ // not do anything or if other processors will create a temporary vector,
+ // exchange data (requiring communication, maybe even with the processors
+ // that do not own anything because of that particular parallel model), and
+ // call compress() finally. the first case here is for the complicated case,
+ // the last else is for the simple case (sequential vector)
+ const IndexSet vec_owned_elements = vec.locally_owned_elements();
+ if (vec.supports_distributed_data == true)
{
// This processor owns only part of the vector. one may think that
// every processor should be able to simply communicate those elements
// need to get a vector that has all the *sources* or constraints we
// own locally, possibly as ghost vector elements, then read from them,
// and finally throw away the ghosted vector. Implement this in the following.
- const IndexSet vec_owned_elements = vec.locally_owned_elements();
IndexSet needed_elements = vec_owned_elements;
typedef std::vector<ConstraintLine>::const_iterator constraint_iterator;
for (constraint_iterator it = lines.begin();
it != lines.end(); ++it)
if (vec_owned_elements.is_element(it->line))
- for (unsigned int i=0; i<it->entries.size(); ++i)
- {
- typename VectorType::value_type
+ {
+ typename VectorType::value_type
new_value = it->inhomogeneity;
- for (unsigned int i=0; i<it->entries.size(); ++i)
- new_value += (static_cast<typename VectorType::value_type>
- (ghosted_vector(it->entries[i].first)) *
- it->entries[i].second);
- Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
- vec(it->line) = new_value;
- }
+ for (unsigned int i=0; i<it->entries.size(); ++i)
+ new_value += (static_cast<typename VectorType::value_type>
+ (ghosted_vector(it->entries[i].first)) *
+ it->entries[i].second);
+ Assert(numbers::is_finite(new_value), ExcNumberNotFinite());
+ vec(it->line) = new_value;
+ }
// now compress to communicate the entries that we added to
// and that weren't to local processors to the owner
else
// purely sequential vector (either because the type doesn't
// support anything else or because it's completely stored
- // locally
+ // locally)
{
std::vector<ConstraintLine>::const_iterator next_constraint = lines.begin();
for (; next_constraint != lines.end(); ++next_constraint)
#include <sstream>
-template<int dim>
-class FilteredDataOut : public DataOut<dim>
-{
- public:
- FilteredDataOut (const unsigned int subdomain_id)
- :
- subdomain_id (subdomain_id)
- {}
-
- virtual typename DoFHandler<dim>::cell_iterator
- first_cell ()
- {
- typename DoFHandler<dim>::active_cell_iterator
- cell = this->dofs->begin_active();
- while ((cell != this->dofs->end()) &&
- (cell->subdomain_id() != subdomain_id))
- ++cell;
-
- return cell;
- }
-
- virtual typename DoFHandler<dim>::cell_iterator
- next_cell (const typename DoFHandler<dim>::cell_iterator &old_cell)
- {
- if (old_cell != this->dofs->end())
- {
- const IteratorFilters::SubdomainEqualTo
- predicate(subdomain_id);
-
- return
- ++(FilteredIterator
- <typename DoFHandler<dim>::active_cell_iterator>
- (predicate,old_cell));
- }
- else
- return old_cell;
- }
-
- private:
- const unsigned int subdomain_id;
-};
-
-
-
template<int dim>
void test()
TrilinosWrappers::MPI::Vector x;
x.reinit(owned_set, MPI_COMM_WORLD);
x=2.0;
- x.compress();
TrilinosWrappers::MPI::Vector x_rel;
x_rel.reinit(relevant_set, MPI_COMM_WORLD);
- x_rel.compress();
ConstraintMatrix cm(relevant_set);
DoFTools::make_hanging_node_constraints (dofh, cm);