</p>
<ol>
-<li> Changed: The PETScWrapper::VectorBase class tried to keep track of
+<li> Changed: The PETScWrapper::VectorBase and PETScWrapper::MatrixBase
+classes tried to keep track of
whether the last operation done on a vector was to add to an element or to
write into one. If the previous such operation was of a different kind
than the current one, we would flush buffers (see the description in
@ref GlossCompress). However, trying to do this automatically turned
out to be an endless source of hard-to-find bugs in %parallel programs.
-The scheme has therefore now been changed to the following: the class
-keeps track of the previous operation and if it differs from the
+The scheme has therefore now been changed to the following: the classes
+keep track of the previous operation and if it differs from the
current one, reports an error stating that the user needs to call
-PETScWrapper::VectorBase::compress() instead.
+PETScWrapper::VectorBase::compress() or
+PETScWrapper::MatrixBase::compress() instead.
<br>
-(Wolfgang Bangerth, 2011/08/03)
+(Wolfgang Bangerth, 2011/08/03, 2011/08/30)
<li> Changed: The classes Tensor, SymmetricTensor and Point now have an
additional template argument for the number type. While a default template
* Exception
*/
DeclException0 (ExcSourceEqualsDestination);
-
+
+ /**
+ * Exception.
+ */
+ DeclException2 (ExcWrongMode,
+ int, int,
+ << "You tried to do a "
+ << (arg1 == 1 ?
+ "'set'" :
+ (arg1 == 2 ?
+ "'add'" : "???"))
+ << " operation but the vector is currently in "
+ << (arg2 == 1 ?
+ "'set'" :
+ (arg2 == 2 ?
+ "'add'" : "???"))
+ << " mode. You first have to call 'compress()'.");
+
protected:
/**
* A generic matrix object in
LastAction::Values last_action;
/**
- * Flush buffers on all CPUs when
- * switching between inserting and
- * adding to elements, no-op otherwise.
+ * Ensure that the add/set mode that
+ * is required for actions following
+ * this call is compatible with the
+ * current mode.
* Should be called from all internal
* functions accessing matrix elements.
*/
const unsigned int j,
const PetscScalar value)
{
-
Assert (numbers::is_finite(value), ExcNumberNotFinite());
set (i, 1, &j, &value, false);
void
MatrixBase::prepare_action(const LastAction::Values new_action)
{
- // flush PETSc buffers when switching
- // actions, otherwise just return.
- if (last_action == new_action) return;
-
- int ierr;
- ierr = MatAssemblyBegin(matrix, MAT_FLUSH_ASSEMBLY);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
-
- ierr = MatAssemblyEnd(matrix, MAT_FLUSH_ASSEMBLY);
- AssertThrow (ierr == 0, ExcPETScError(ierr));
-
- last_action = new_action;
+ if (last_action == new_action)
+ ;
+ else if (last_action == LastAction::none)
+ last_action = new_action;
+ else
+ Assert (false, ExcWrongMode (last_action, new_action));
}
ierr = MatAssemblyEnd (matrix,MAT_FINAL_ASSEMBLY);
AssertThrow (ierr == 0, ExcPETScError(ierr));
+
+ last_action == LastAction::none;
}
// $Id: simple_mpi_01.cc 23327 2011-02-11 03:19:07Z bangerth $
// Version: $Name$
//
-// Copyright (C) 2009 by the deal.II authors
+// Copyright (C) 2009, 2011 by the deal.II authors
//
// This file is subject to QPL and may not be distributed
// without copyright and license information. Please refer
#include "../tests.h"
#include <deal.II/lac/compressed_simple_sparsity_pattern.h>
-#include <deal.II/lac/petsc_sparse_matrix.h>
-#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
+#include <deal.II/lac/petsc_sparse_matrix.h>
+#include <deal.II/lac/petsc_parallel_sparse_matrix.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
CompressedSimpleSparsityPattern csp(2);
csp.add(0,0);
csp.add(1,1);
-
+
PETScWrappers::MPI::SparseMatrix mat;
std::vector< unsigned int > local_rows(numprocs, 0);
local_rows[0]=2;
-
+
mat.reinit(MPI_COMM_WORLD, csp, local_rows, local_rows, myid);
-#warning this here hangs, so it is uncommented until bug is fixed
- //if (myid == 0 )
- mat.add(0, 0, 1.0);
+ if (myid == 0 )
+ mat.add(0, 0, 1.0);
+
-
mat.compress();
if (myid==0)
DEAL::Running on 1 CPU(s).
-DEAL::2nd try
DEAL::done
DEAL::Running on 10 CPU(s).
-DEAL::2nd try
DEAL::done
DEAL::Running on 4 CPU(s).
-DEAL::2nd try
DEAL::done