// be cell2
Assert (cell2 == endc2, ExcInternalError());
- u2.compress();
- touch_count.compress();
+ u2.compress(VectorOperation::add);
+ touch_count.compress(VectorOperation::add);
// if we work on parallel distributed
// vectors, we have to ensure, that we only
}
// finish the work on parallel vectors
- u2.compress();
+ u2.compress(VectorOperation::insert);
// Apply hanging node constraints.
constraints.distribute(u2);
-
- // and finally update ghost values
- u2.update_ghost_values();
}
}
// if we work on a parallel PETSc vector
- // we have to finish the work and
- // update ghost values
- u1_interpolated.compress();
- u1_interpolated.update_ghost_values();
+ // we have to finish the work
+ u1_interpolated.compress(VectorOperation::insert);
}
};
// if we work on a parallel PETSc vector
- // we have to finish the work and
- // update ghost values
- u1_interpolated.compress();
- u1_interpolated.update_ghost_values();
+ // we have to finish the work
+ u1_interpolated.compress(VectorOperation::insert);
}
// if we work on a parallel PETSc vector
// we have to finish the work and
// update ghost values
- u1_difference.compress();
- u1_difference.update_ghost_values();
+ u1_difference.compress(VectorOperation::insert);
}
{
back_interpolate(dof1, constraints1, u1, dof2, constraints2, u1_difference);
u1_difference.sadd(-1, u1);
-
- // if we work on a parallel PETSc vector
- // we have to finish the work and
- // update ghost values
- u1_difference.compress();
- u1_difference.update_ghost_values();
}
}
Assert (local_range == solution.local_range(),
ExcInternalError());
-
- // we have to read and write from this
- // matrix (in this order). this will only
- // work if we compress the matrix first,
- // done here
- matrix.compress ();
-
// determine the first nonzero diagonal
// entry from within the part of the
// matrix that we can see. if we can't
// treated in the other functions.
matrix.clear_rows (constrained_rows, average_nonzero_diagonal_entry);
- // the next thing is to set right hand
- // side to the wanted value. there's one
- // drawback: if we write to individual
- // vector elements, then we have to do
- // that on all processors. however, some
- // processors may not need to set
- // anything because their chunk of
- // matrix/rhs do not contain any boundary
- // nodes. therefore, rather than using
- // individual calls, we use one call for
- // all elements, thereby making sure that
- // all processors call this function,
- // even if some only have an empty set of
- // elements to set
- right_hand_side.compress ();
- solution.compress ();
-
std::vector<unsigned int> indices;
std::vector<PetscScalar> solution_values;
for (std::map<unsigned int,double>::const_iterator
// clean up
matrix.compress ();
- solution.compress ();
- right_hand_side.compress ();
+ solution.compress (VectorOperation::insert);
+ right_hand_side.compress (VectorOperation::insert);
}
}
}
// matrix classes in deal.II.
matrix.clear_rows (constrained_rows, average_nonzero_diagonal_entry);
- // the next thing is to set right
- // hand side to the wanted
- // value. there's one drawback:
- // if we write to individual
- // vector elements, then we have
- // to do that on all
- // processors. however, some
- // processors may not need to set
- // anything because their chunk
- // of matrix/rhs do not contain
- // any boundary nodes. therefore,
- // rather than using individual
- // calls, we use one call for all
- // elements, thereby making sure
- // that all processors call this
- // function, even if some only
- // have an empty set of elements
- // to set
- right_hand_side.compress ();
- solution.compress ();
-
std::vector<unsigned int> indices;
std::vector<TrilinosScalar> solution_values;
for (std::map<unsigned int,double>::const_iterator
// clean up
matrix.compress ();
- solution.compress ();
- right_hand_side.compress ();
+ solution.compress (VectorOperation::insert);
+ right_hand_side.compress (VectorOperation::insert);
}