From: bangerth
Date: Thu, 15 Sep 2011 05:43:25 +0000 (+0000)
Subject: Also convert MatrixCreator, MatrixTools to namespaces.
X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c1c3b2d71a626d2463655a78a1687e5d16a75e19;p=dealii-svn.git
Also convert MatrixCreator, MatrixTools to namespaces.
git-svn-id: https://svn.dealii.org/trunk@24326 0785d39b-7218-0410-832d-ea1e28bc413d
---
diff --git a/deal.II/doc/news/changes.h b/deal.II/doc/news/changes.h
index 19a097f0f9..25fdd1c821 100644
--- a/deal.II/doc/news/changes.h
+++ b/deal.II/doc/news/changes.h
@@ -21,7 +21,8 @@ inconvenience this causes.
-- Changed: GridTools, DoFTools, MGTools and VectorTools are now namespaces. They have long
+
- Changed: GridTools, DoFTools, MGTools, VectorTools, MatrixCreator
+and MatrixTools are now namespaces. They have long
been classes that had only public, static member functions, making
the end result semantically exactly equivalent to a namespace, which is
also how it was used. This is now also reflected in the actual code.
diff --git a/deal.II/include/deal.II/numerics/matrices.h b/deal.II/include/deal.II/numerics/matrices.h
index bb3ae80fbd..e624416121 100644
--- a/deal.II/include/deal.II/numerics/matrices.h
+++ b/deal.II/include/deal.II/numerics/matrices.h
@@ -785,303 +785,311 @@ namespace MatrixCreator
* @ingroup numerics
* @author Wolfgang Bangerth, 1998, 2000, 2004, 2005
*/
-class MatrixTools
+namespace MatrixTools
{
-// using namespace MatrixCreator
- public:
- /**
- * Apply dirichlet boundary conditions
- * to the system matrix and vectors
- * as described in the general
- * documentation.
- */
- template
- static void
- apply_boundary_values (const std::map &boundary_values,
- SparseMatrix &matrix,
- Vector &solution,
- Vector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * Import namespace MatrixCreator for
+ * backward compatibility with older
+ * versions of deal.II in which these
+ * namespaces were classes and class
+ * MatrixTools was publicly derived from
+ * class MatrixCreator.
+ */
+ using namespace MatrixCreator;
- /**
- * Apply dirichlet boundary
- * conditions to the system
- * matrix and vectors as
- * described in the general
- * documentation. This function
- * works for block sparse
- * matrices and block vectors
- */
- template
- static void
- apply_boundary_values (const std::map &boundary_values,
- BlockSparseMatrix &matrix,
- BlockVector &solution,
- BlockVector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * Apply dirichlet boundary conditions
+ * to the system matrix and vectors
+ * as described in the general
+ * documentation.
+ */
+ template
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ SparseMatrix &matrix,
+ Vector &solution,
+ Vector &right_hand_side,
+ const bool eliminate_columns = true);
+
+ /**
+ * Apply dirichlet boundary
+ * conditions to the system
+ * matrix and vectors as
+ * described in the general
+ * documentation. This function
+ * works for block sparse
+ * matrices and block vectors
+ */
+ template
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ BlockSparseMatrix &matrix,
+ BlockVector &solution,
+ BlockVector &right_hand_side,
+ const bool eliminate_columns = true);
#ifdef DEAL_II_USE_PETSC
- /**
- * Apply dirichlet boundary conditions to
- * the system matrix and vectors as
- * described in the general
- * documentation. This function works on
- * the classes that are used to wrap
- * PETSc objects.
- *
- * Note that this function is not very
- * efficient: it needs to alternatingly
- * read and write into the matrix, a
- * situation that PETSc does not handle
- * too well. In addition, we only get rid
- * of rows corresponding to boundary
- * nodes, but the corresponding case of
- * deleting the respective columns
- * (i.e. if @p eliminate_columns is @p
- * true) is not presently implemented,
- * and probably will never because it is
- * too expensive without direct access to
- * the PETSc data structures. (This leads
- * to the situation where the action
- * indicates by the default value of the
- * last argument is actually not
- * implemented; that argument has
- *
true
as its default value
- * to stay consistent with the other
- * functions of same name in this class.)
- * A third reason against this function
- * is that it doesn't handle the case
- * where the matrix is distributed across
- * an MPI system.
- *
- * This function is used in
- * step-17 and
- * step-18.
- */
- static void
- apply_boundary_values (const std::map &boundary_values,
- PETScWrappers::SparseMatrix &matrix,
- PETScWrappers::Vector &solution,
- PETScWrappers::Vector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * Apply dirichlet boundary conditions to
+ * the system matrix and vectors as
+ * described in the general
+ * documentation. This function works on
+ * the classes that are used to wrap
+ * PETSc objects.
+ *
+ * Note that this function is not very
+ * efficient: it needs to alternatingly
+ * read and write into the matrix, a
+ * situation that PETSc does not handle
+ * too well. In addition, we only get rid
+ * of rows corresponding to boundary
+ * nodes, but the corresponding case of
+ * deleting the respective columns
+ * (i.e. if @p eliminate_columns is @p
+ * true) is not presently implemented,
+ * and probably will never because it is
+ * too expensive without direct access to
+ * the PETSc data structures. (This leads
+ * to the situation where the action
+ * indicates by the default value of the
+ * last argument is actually not
+ * implemented; that argument has
+ * true
as its default value
+ * to stay consistent with the other
+ * functions of same name in this class.)
+ * A third reason against this function
+ * is that it doesn't handle the case
+ * where the matrix is distributed across
+ * an MPI system.
+ *
+ * This function is used in
+ * step-17 and
+ * step-18.
+ */
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ PETScWrappers::SparseMatrix &matrix,
+ PETScWrappers::Vector &solution,
+ PETScWrappers::Vector &right_hand_side,
+ const bool eliminate_columns = true);
- /**
- * Same function, but for parallel PETSc
- * matrices.
- */
- static void
- apply_boundary_values (const std::map &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
- PETScWrappers::MPI::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * Same function, but for parallel PETSc
+ * matrices.
+ */
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ PETScWrappers::MPI::SparseMatrix &matrix,
+ PETScWrappers::MPI::Vector &solution,
+ PETScWrappers::MPI::Vector &right_hand_side,
+ const bool eliminate_columns = true);
- /**
- * Same function, but for
- * parallel PETSc matrices. Note
- * that this function only
- * operates on the local range of
- * the parallel matrix, i.e. it
- * only eliminates rows
- * corresponding to degrees of
- * freedom for which the row is
- * stored on the present
- * processor. All other boundary
- * nodes are ignored, and it
- * doesn't matter whether they
- * are present in the first
- * argument to this function or
- * not. A consequence of this,
- * however, is that this function
- * has to be called from all
- * processors that participate in
- * sharing the contents of the
- * given matrices and vectors. It
- * is also implied that the local
- * range for all objects passed
- * to this function is the same.
- */
- static void
- apply_boundary_values (const std::map &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
- PETScWrappers::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * Same function, but for
+ * parallel PETSc matrices. Note
+ * that this function only
+ * operates on the local range of
+ * the parallel matrix, i.e. it
+ * only eliminates rows
+ * corresponding to degrees of
+ * freedom for which the row is
+ * stored on the present
+ * processor. All other boundary
+ * nodes are ignored, and it
+ * doesn't matter whether they
+ * are present in the first
+ * argument to this function or
+ * not. A consequence of this,
+ * however, is that this function
+ * has to be called from all
+ * processors that participate in
+ * sharing the contents of the
+ * given matrices and vectors. It
+ * is also implied that the local
+ * range for all objects passed
+ * to this function is the same.
+ */
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ PETScWrappers::MPI::SparseMatrix &matrix,
+ PETScWrappers::Vector &solution,
+ PETScWrappers::MPI::Vector &right_hand_side,
+ const bool eliminate_columns = true);
- /**
- * Same as above but for BlockSparseMatrix.
- */
- static void
- apply_boundary_values (const std::map &boundary_values,
- PETScWrappers::MPI::BlockSparseMatrix &matrix,
- PETScWrappers::MPI::BlockVector &solution,
- PETScWrappers::MPI::BlockVector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * Same as above but for BlockSparseMatrix.
+ */
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ PETScWrappers::MPI::BlockSparseMatrix &matrix,
+ PETScWrappers::MPI::BlockVector &solution,
+ PETScWrappers::MPI::BlockVector &right_hand_side,
+ const bool eliminate_columns = true);
#endif
#ifdef DEAL_II_USE_TRILINOS
- /**
- * Apply dirichlet boundary
- * conditions to the system matrix
- * and vectors as described in the
- * general documentation. This
- * function works on the classes
- * that are used to wrap Trilinos
- * objects.
- *
- * Note that this function is not
- * very efficient: it needs to
- * alternatingly read and write
- * into the matrix, a situation
- * that Trilinos does not handle
- * too well. In addition, we only
- * get rid of rows corresponding to
- * boundary nodes, but the
- * corresponding case of deleting
- * the respective columns (i.e. if
- * @p eliminate_columns is @p true)
- * is not presently implemented,
- * and probably will never because
- * it is too expensive without
- * direct access to the Trilinos
- * data structures. (This leads to
- * the situation where the action
- * indicates by the default value
- * of the last argument is actually
- * not implemented; that argument
- * has true
as its
- * default value to stay consistent
- * with the other functions of same
- * name in this class.) A third
- * reason against this function is
- * that it doesn't handle the case
- * where the matrix is distributed
- * across an MPI system.
- */
- static void
- apply_boundary_values (const std::map &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
- TrilinosWrappers::Vector &solution,
- TrilinosWrappers::Vector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * Apply dirichlet boundary
+ * conditions to the system matrix
+ * and vectors as described in the
+ * general documentation. This
+ * function works on the classes
+ * that are used to wrap Trilinos
+ * objects.
+ *
+ * Note that this function is not
+ * very efficient: it needs to
+ * alternatingly read and write
+ * into the matrix, a situation
+ * that Trilinos does not handle
+ * too well. In addition, we only
+ * get rid of rows corresponding to
+ * boundary nodes, but the
+ * corresponding case of deleting
+ * the respective columns (i.e. if
+ * @p eliminate_columns is @p true)
+ * is not presently implemented,
+ * and probably will never because
+ * it is too expensive without
+ * direct access to the Trilinos
+ * data structures. (This leads to
+ * the situation where the action
+ * indicates by the default value
+ * of the last argument is actually
+ * not implemented; that argument
+ * has true
as its
+ * default value to stay consistent
+ * with the other functions of same
+ * name in this class.) A third
+ * reason against this function is
+ * that it doesn't handle the case
+ * where the matrix is distributed
+ * across an MPI system.
+ */
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ TrilinosWrappers::SparseMatrix &matrix,
+ TrilinosWrappers::Vector &solution,
+ TrilinosWrappers::Vector &right_hand_side,
+ const bool eliminate_columns = true);
- /**
- * This function does the same as
- * the one above, except now
- * working on block structures.
- */
- static void
- apply_boundary_values (const std::map &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
- TrilinosWrappers::BlockVector &solution,
- TrilinosWrappers::BlockVector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * This function does the same as
+ * the one above, except now
+ * working on block structures.
+ */
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::BlockVector &solution,
+ TrilinosWrappers::BlockVector &right_hand_side,
+ const bool eliminate_columns = true);
- /**
- * Apply dirichlet boundary
- * conditions to the system matrix
- * and vectors as described in the
- * general documentation. This
- * function works on the classes
- * that are used to wrap Trilinos
- * objects.
- *
- * Note that this function is not
- * very efficient: it needs to
- * alternatingly read and write
- * into the matrix, a situation
- * that Trilinos does not handle
- * too well. In addition, we only
- * get rid of rows corresponding to
- * boundary nodes, but the
- * corresponding case of deleting
- * the respective columns (i.e. if
- * @p eliminate_columns is @p true)
- * is not presently implemented,
- * and probably will never because
- * it is too expensive without
- * direct access to the Trilinos
- * data structures. (This leads to
- * the situation where the action
- * indicates by the default value
- * of the last argument is actually
- * not implemented; that argument
- * has true
as its
- * default value to stay consistent
- * with the other functions of same
- * name in this class.) This
- * function does work on MPI vector
- * types.
- */
- static void
- apply_boundary_values (const std::map &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
- TrilinosWrappers::MPI::Vector &solution,
- TrilinosWrappers::MPI::Vector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * Apply dirichlet boundary
+ * conditions to the system matrix
+ * and vectors as described in the
+ * general documentation. This
+ * function works on the classes
+ * that are used to wrap Trilinos
+ * objects.
+ *
+ * Note that this function is not
+ * very efficient: it needs to
+ * alternatingly read and write
+ * into the matrix, a situation
+ * that Trilinos does not handle
+ * too well. In addition, we only
+ * get rid of rows corresponding to
+ * boundary nodes, but the
+ * corresponding case of deleting
+ * the respective columns (i.e. if
+ * @p eliminate_columns is @p true)
+ * is not presently implemented,
+ * and probably will never because
+ * it is too expensive without
+ * direct access to the Trilinos
+ * data structures. (This leads to
+ * the situation where the action
+ * indicates by the default value
+ * of the last argument is actually
+ * not implemented; that argument
+ * has true
as its
+ * default value to stay consistent
+ * with the other functions of same
+ * name in this class.) This
+ * function does work on MPI vector
+ * types.
+ */
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ TrilinosWrappers::SparseMatrix &matrix,
+ TrilinosWrappers::MPI::Vector &solution,
+ TrilinosWrappers::MPI::Vector &right_hand_side,
+ const bool eliminate_columns = true);
- /**
- * This function does the same as
- * the one above, except now working
- * on block structures.
- */
- static void
- apply_boundary_values (const std::map &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
- TrilinosWrappers::MPI::BlockVector &solution,
- TrilinosWrappers::MPI::BlockVector &right_hand_side,
- const bool eliminate_columns = true);
+ /**
+ * This function does the same as
+ * the one above, except now working
+ * on block structures.
+ */
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::MPI::BlockVector &solution,
+ TrilinosWrappers::MPI::BlockVector &right_hand_side,
+ const bool eliminate_columns = true);
#endif
- /**
- * Rather than applying boundary
- * values to the global matrix
- * and vector after creating the
- * global matrix, this function
- * does so during assembly, by
- * modifying the local matrix and
- * vector contributions. If you
- * call this function on all
- * local contributions, the
- * resulting matrix will have the
- * same entries, and the final
- * call to
- * apply_boundary_values() on the
- * global system will not be
- * necessary.
- *
- * Since this function does not
- * have to work on the
- * complicated data structures of
- * sparse matrices, it is
- * relatively cheap. It may
- * therefore be a win if you have
- * many fixed degrees of freedom
- * (e.g. boundary nodes), or if
- * access to the sparse matrix is
- * expensive (e.g. for block
- * sparse matrices, or for PETSc
- * or trilinos
- * matrices). However, it doesn't
- * work as expected if there are
- * also hanging nodes to be
- * considered. More caveats are
- * listed in the general
- * documentation of this class.
- */
- static void
- local_apply_boundary_values (const std::map &boundary_values,
- const std::vector &local_dof_indices,
- FullMatrix &local_matrix,
- Vector &local_rhs,
- const bool eliminate_columns);
+ /**
+ * Rather than applying boundary
+ * values to the global matrix
+ * and vector after creating the
+ * global matrix, this function
+ * does so during assembly, by
+ * modifying the local matrix and
+ * vector contributions. If you
+ * call this function on all
+ * local contributions, the
+ * resulting matrix will have the
+ * same entries, and the final
+ * call to
+ * apply_boundary_values() on the
+ * global system will not be
+ * necessary.
+ *
+ * Since this function does not
+ * have to work on the
+ * complicated data structures of
+ * sparse matrices, it is
+ * relatively cheap. It may
+ * therefore be a win if you have
+ * many fixed degrees of freedom
+ * (e.g. boundary nodes), or if
+ * access to the sparse matrix is
+ * expensive (e.g. for block
+ * sparse matrices, or for PETSc
+ * or trilinos
+ * matrices). However, it doesn't
+ * work as expected if there are
+ * also hanging nodes to be
+ * considered. More caveats are
+ * listed in the general
+ * documentation of this class.
+ */
+ void
+ local_apply_boundary_values (const std::map &boundary_values,
+ const std::vector &local_dof_indices,
+ FullMatrix &local_matrix,
+ Vector &local_rhs,
+ const bool eliminate_columns);
- /**
- * Exception
- */
- DeclException0 (ExcBlocksDontMatch);
-};
+ /**
+ * Exception
+ */
+ DeclException0 (ExcBlocksDontMatch);
+}
diff --git a/deal.II/source/numerics/matrices.cc b/deal.II/source/numerics/matrices.cc
index 5e5ae0962f..8008c32051 100644
--- a/deal.II/source/numerics/matrices.cc
+++ b/deal.II/source/numerics/matrices.cc
@@ -1917,858 +1917,661 @@ namespace MatrixCreator
} // namespace MatrixCreator
-
-//TODO:[WB] I don't think that the optimized storage of diagonals is needed (GK)
-template
-void
-MatrixTools::apply_boundary_values (const std::map &boundary_values,
- SparseMatrix &matrix,
- Vector &solution,
- Vector &right_hand_side,
- const bool eliminate_columns)
+namespace MatrixTools
{
- // Require that diagonals are first
- // in each row
- Assert (matrix.get_sparsity_pattern().optimize_diagonal(),
- typename SparsityPattern::ExcDiagonalNotOptimized());
- Assert (matrix.n() == right_hand_side.size(),
- ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
- Assert (matrix.n() == solution.size(),
- ExcDimensionMismatch(matrix.n(), solution.size()));
- // if no boundary values are to be applied
- // simply return
- if (boundary_values.size() == 0)
- return;
-
-
- const unsigned int n_dofs = matrix.m();
-
- // if a diagonal entry is zero
- // later, then we use another
- // number instead. take it to be
- // the first nonzero diagonal
- // element of the matrix, or 1 if
- // there is no such thing
- number first_nonzero_diagonal_entry = 1;
- for (unsigned int i=0; i
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ SparseMatrix &matrix,
+ Vector &solution,
+ Vector &right_hand_side,
+ const bool eliminate_columns)
+ {
+ // Require that diagonals are first
+ // in each row
+ Assert (matrix.get_sparsity_pattern().optimize_diagonal(),
+ typename SparsityPattern::ExcDiagonalNotOptimized());
+ Assert (matrix.n() == right_hand_side.size(),
+ ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
+ Assert (matrix.n() == solution.size(),
+ ExcDimensionMismatch(matrix.n(), solution.size()));
+ // if no boundary values are to be applied
+ // simply return
+ if (boundary_values.size() == 0)
+ return;
- std::map::const_iterator dof = boundary_values.begin(),
- endd = boundary_values.end();
- const SparsityPattern &sparsity = matrix.get_sparsity_pattern();
- const std::size_t *sparsity_rowstart = sparsity.get_rowstart_indices();
- const unsigned int *sparsity_colnums = sparsity.get_column_numbers();
- for (; dof != endd; ++dof)
- {
- Assert (dof->first < n_dofs, ExcInternalError());
-
- const unsigned int dof_number = dof->first;
- // for each boundary dof:
-
- // set entries of this line
- // to zero except for the diagonal
- // entry. Note that the diagonal
- // entry is always the first one
- // for square matrices, i.e.
- // we shall not set
- // matrix.global_entry(
- // sparsity_rowstart[dof.first])
- const unsigned int last = sparsity_rowstart[dof_number+1];
- for (unsigned int j=sparsity_rowstart[dof_number]+1; jsecond * matrix.diag_element(dof_number);
- right_hand_side(dof_number) = new_rhs;
- }
- else
- {
- matrix.set (dof_number, dof_number,
- first_nonzero_diagonal_entry);
- new_rhs = dof->second * first_nonzero_diagonal_entry;
- right_hand_side(dof_number) = new_rhs;
- }
+ const unsigned int n_dofs = matrix.m();
- // if the user wants to have
- // the symmetry of the matrix
- // preserved, and if the
- // sparsity pattern is
- // symmetric, then do a Gauss
- // elimination step with the
- // present row
- if (eliminate_columns)
+ // if a diagonal entry is zero
+ // later, then we use another
+ // number instead. take it to be
+ // the first nonzero diagonal
+ // element of the matrix, or 1 if
+ // there is no such thing
+ number first_nonzero_diagonal_entry = 1;
+ for (unsigned int i=0; isecond;
- }
-}
-
-
-template
-void
-MatrixTools::apply_boundary_values (const std::map &boundary_values,
- BlockSparseMatrix &matrix,
- BlockVector &solution,
- BlockVector &right_hand_side,
- const bool eliminate_columns)
-{
- const unsigned int blocks = matrix.n_block_rows();
-
- Assert (matrix.n() == right_hand_side.size(),
- ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
- Assert (matrix.n() == solution.size(),
- ExcDimensionMismatch(matrix.n(), solution.size()));
- Assert (matrix.n_block_rows() == matrix.n_block_cols(),
- ExcNotQuadratic());
- Assert (matrix.get_sparsity_pattern().get_row_indices() ==
- matrix.get_sparsity_pattern().get_column_indices(),
- ExcNotQuadratic());
- Assert (matrix.get_sparsity_pattern().get_column_indices() ==
- solution.get_block_indices (),
- ExcBlocksDontMatch ());
- Assert (matrix.get_sparsity_pattern().get_row_indices() ==
- right_hand_side.get_block_indices (),
- ExcBlocksDontMatch ());
-
- for (unsigned int i=0; i::const_iterator dof = boundary_values.begin(),
+ endd = boundary_values.end();
+ const SparsityPattern &sparsity = matrix.get_sparsity_pattern();
+ const std::size_t *sparsity_rowstart = sparsity.get_rowstart_indices();
+ const unsigned int *sparsity_colnums = sparsity.get_column_numbers();
+ for (; dof != endd; ++dof)
+ {
+ Assert (dof->first < n_dofs, ExcInternalError());
+
+ const unsigned int dof_number = dof->first;
+ // for each boundary dof:
+
+ // set entries of this line
+ // to zero except for the diagonal
+ // entry. Note that the diagonal
+ // entry is always the first one
+ // for square matrices, i.e.
+ // we shall not set
+ // matrix.global_entry(
+ // sparsity_rowstart[dof.first])
+ const unsigned int last = sparsity_rowstart[dof_number+1];
+ for (unsigned int j=sparsity_rowstart[dof_number]+1; jsecond * matrix.diag_element(dof_number);
+ right_hand_side(dof_number) = new_rhs;
+ }
+ else
+ {
+ matrix.set (dof_number, dof_number,
+ first_nonzero_diagonal_entry);
+ new_rhs = dof->second * first_nonzero_diagonal_entry;
+ right_hand_side(dof_number) = new_rhs;
}
- // check whether we have found
- // something in the present
- // block
- if (first_nonzero_diagonal_entry != 0)
- break;
- }
- // nothing found on all diagonal
- // blocks? if so, use 1.0 instead
- if (first_nonzero_diagonal_entry == 0)
- first_nonzero_diagonal_entry = 1;
-
-
- std::map::const_iterator dof = boundary_values.begin(),
- endd = boundary_values.end();
- const BlockSparsityPattern &
- sparsity_pattern = matrix.get_sparsity_pattern();
-
- // pointer to the mapping between
- // global and block indices. since
- // the row and column mappings are
- // equal, store a pointer on only
- // one of them
- const BlockIndices &
- index_mapping = sparsity_pattern.get_column_indices();
-
- // now loop over all boundary dofs
- for (; dof != endd; ++dof)
- {
- Assert (dof->first < n_dofs, ExcInternalError());
-
- // get global index and index
- // in the block in which this
- // dof is located
- const unsigned int dof_number = dof->first;
- const std::pair
- block_index = index_mapping.global_to_local (dof_number);
-
- // for each boundary dof:
-
- // set entries of this line
- // to zero except for the diagonal
- // entry. Note that the diagonal
- // entry is always the first one
- // for square matrices, i.e.
- // we shall not set
- // matrix.global_entry(
- // sparsity_rowstart[dof.first])
- // of the diagonal block
- for (unsigned int block_col=0; block_colsecond *
- matrix.block(block_index.first, block_index.first)
- .diag_element(block_index.second);
- else
- {
- matrix.block(block_index.first, block_index.first)
- .diag_element(block_index.second)
- = first_nonzero_diagonal_entry;
- new_rhs = dof->second * first_nonzero_diagonal_entry;
- }
- right_hand_side.block(block_index.first)(block_index.second)
- = new_rhs;
-
-
- // if the user wants to have
- // the symmetry of the matrix
- // preserved, and if the
- // sparsity pattern is
- // symmetric, then do a Gauss
- // elimination step with the
- // present row. this is a
- // little more complicated for
- // block matrices.
- if (eliminate_columns)
- {
- // store the only nonzero entry
- // of this line for the Gauss
- // elimination step
- const number diagonal_entry
- = matrix.block(block_index.first,block_index.first)
- .diag_element(block_index.second);
-
- // we have to loop over all
- // rows of the matrix which
- // have a nonzero entry in
- // the column which we work
- // in presently. if the
- // sparsity pattern is
- // symmetric, then we can
- // get the positions of
- // these rows cheaply by
- // looking at the nonzero
- // column numbers of the
- // present row.
- //
- // note that if we check
- // whether row @p{row} in
- // block (r,c) is non-zero,
- // then we have to check
- // for the existence of
- // column @p{row} in block
- // (c,r), i.e. of the
- // transpose block
- for (unsigned int block_row=0; block_rowsecond;
- }
-}
+ // if the user wants to have
+ // the symmetry of the matrix
+ // preserved, and if the
+ // sparsity pattern is
+ // symmetric, then do a Gauss
+ // elimination step with the
+ // present row
+ if (eliminate_columns)
+ {
+ // store the only nonzero entry
+ // of this line for the Gauss
+ // elimination step
+ const number diagonal_entry = matrix.diag_element(dof_number);
+
+ // we have to loop over all
+ // rows of the matrix which
+ // have a nonzero entry in
+ // the column which we work
+ // in presently. if the
+ // sparsity pattern is
+ // symmetric, then we can
+ // get the positions of
+ // these rows cheaply by
+ // looking at the nonzero
+ // column numbers of the
+ // present row. we need not
+ // look at the first entry,
+ // since that is the
+ // diagonal element and
+ // thus the present row
+ for (unsigned int j=sparsity_rowstart[dof_number]+1; jsecond;
+ }
+ }
-#ifdef DEAL_II_USE_PETSC
-namespace PETScWrappers
-{
- template
+ template
void
apply_boundary_values (const std::map &boundary_values,
- PETScMatrix &matrix,
- PETScVector &solution,
- PETScVector &right_hand_side,
- const bool eliminate_columns)
+ BlockSparseMatrix &matrix,
+ BlockVector &solution,
+ BlockVector &right_hand_side,
+ const bool eliminate_columns)
{
- Assert (eliminate_columns == false, ExcNotImplemented());
+ const unsigned int blocks = matrix.n_block_rows();
Assert (matrix.n() == right_hand_side.size(),
- ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
+ ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
Assert (matrix.n() == solution.size(),
- ExcDimensionMismatch(matrix.n(), solution.size()));
+ ExcDimensionMismatch(matrix.n(), solution.size()));
+ Assert (matrix.n_block_rows() == matrix.n_block_cols(),
+ ExcNotQuadratic());
+ Assert (matrix.get_sparsity_pattern().get_row_indices() ==
+ matrix.get_sparsity_pattern().get_column_indices(),
+ ExcNotQuadratic());
+ Assert (matrix.get_sparsity_pattern().get_column_indices() ==
+ solution.get_block_indices (),
+ ExcBlocksDontMatch ());
+ Assert (matrix.get_sparsity_pattern().get_row_indices() ==
+ right_hand_side.get_block_indices (),
+ ExcBlocksDontMatch ());
+
+ for (unsigned int i=0; i local_range
- = matrix.local_range();
- Assert (local_range == right_hand_side.local_range(),
- ExcInternalError());
- Assert (local_range == solution.local_range(),
- ExcInternalError());
+ const unsigned int n_dofs = matrix.m();
- // we have to read and write from this
- // matrix (in this order). this will only
- // work if we compress the matrix first,
- // done here
- matrix.compress ();
+ // if a diagonal entry is zero
+ // later, then we use another
+ // number instead. take it to be
+ // the first nonzero diagonal
+ // element of the matrix, or 1 if
+ // there is no such thing
+ number first_nonzero_diagonal_entry = 0;
+ for (unsigned int diag_block=0; diag_block::const_iterator dof = boundary_values.begin(),
+ endd = boundary_values.end();
+ const BlockSparsityPattern &
+ sparsity_pattern = matrix.get_sparsity_pattern();
+
+ // pointer to the mapping between
+ // global and block indices. since
+ // the row and column mappings are
+ // equal, store a pointer on only
+ // one of them
+ const BlockIndices &
+ index_mapping = sparsity_pattern.get_column_indices();
+
+ // now loop over all boundary dofs
+ for (; dof != endd; ++dof)
+ {
+ Assert (dof->first < n_dofs, ExcInternalError());
+
+ // get global index and index
+ // in the block in which this
+ // dof is located
+ const unsigned int dof_number = dof->first;
+ const std::pair
+ block_index = index_mapping.global_to_local (dof_number);
+
+ // for each boundary dof:
+
+ // set entries of this line
+ // to zero except for the diagonal
+ // entry. Note that the diagonal
+ // entry is always the first one
+ // for square matrices, i.e.
+ // we shall not set
+ // matrix.global_entry(
+ // sparsity_rowstart[dof.first])
+ // of the diagonal block
+ for (unsigned int block_col=0; block_col constrained_rows;
- for (std::map::const_iterator
- dof = boundary_values.begin();
- dof != boundary_values.end();
- ++dof)
- if ((dof->first >= local_range.first) &&
- (dof->first < local_range.second))
- constrained_rows.push_back (dof->first);
-
- // then eliminate these rows and set
- // their diagonal entry to what we have
- // determined above. note that for petsc
- // matrices interleaving read with write
- // operations is very expensive. thus, we
- // here always replace the diagonal
- // element, rather than first checking
- // whether it is nonzero and in that case
- // preserving it. this is different from
- // the case of deal.II sparse matrices
- // treated in the other functions.
- matrix.clear_rows (constrained_rows, average_nonzero_diagonal_entry);
-
- // the next thing is to set right hand
- // side to the wanted value. there's one
- // drawback: if we write to individual
- // vector elements, then we have to do
- // that on all processors. however, some
- // processors may not need to set
- // anything because their chunk of
- // matrix/rhs do not contain any boundary
- // nodes. therefore, rather than using
- // individual calls, we use one call for
- // all elements, thereby making sure that
- // all processors call this function,
- // even if some only have an empty set of
- // elements to set
- right_hand_side.compress ();
- solution.compress ();
-
- std::vector indices;
- std::vector solution_values;
- for (std::map::const_iterator
- dof = boundary_values.begin();
- dof != boundary_values.end();
- ++dof)
- if ((dof->first >= local_range.first) &&
- (dof->first < local_range.second))
- {
- indices.push_back (dof->first);
- solution_values.push_back (dof->second);
- }
- solution.set (indices, solution_values);
-
- // now also set appropriate values for
- // the rhs
- for (unsigned int i=0; isecond *
+ matrix.block(block_index.first, block_index.first)
+ .diag_element(block_index.second);
+ else
+ {
+ matrix.block(block_index.first, block_index.first)
+ .diag_element(block_index.second)
+ = first_nonzero_diagonal_entry;
+ new_rhs = dof->second * first_nonzero_diagonal_entry;
+ }
+ right_hand_side.block(block_index.first)(block_index.second)
+ = new_rhs;
+
+
+ // if the user wants to have
+ // the symmetry of the matrix
+ // preserved, and if the
+ // sparsity pattern is
+ // symmetric, then do a Gauss
+ // elimination step with the
+ // present row. this is a
+ // little more complicated for
+ // block matrices.
+ if (eliminate_columns)
+ {
+ // store the only nonzero entry
+ // of this line for the Gauss
+ // elimination step
+ const number diagonal_entry
+ = matrix.block(block_index.first,block_index.first)
+ .diag_element(block_index.second);
+
+ // we have to loop over all
+ // rows of the matrix which
+ // have a nonzero entry in
+ // the column which we work
+ // in presently. if the
+ // sparsity pattern is
+ // symmetric, then we can
+ // get the positions of
+ // these rows cheaply by
+ // looking at the nonzero
+ // column numbers of the
+ // present row.
+ //
+ // note that if we check
+ // whether row @p{row} in
+ // block (r,c) is non-zero,
+ // then we have to check
+ // for the existence of
+ // column @p{row} in block
+ // (c,r), i.e. of the
+ // transpose block
+ for (unsigned int block_row=0; block_row &boundary_values,
- PETScWrappers::SparseMatrix &matrix,
- PETScWrappers::Vector &solution,
- PETScWrappers::Vector &right_hand_side,
- const bool eliminate_columns)
-{
- // simply redirect to the generic function
- // used for both petsc matrix types
- PETScWrappers::apply_boundary_values (boundary_values, matrix, solution,
- right_hand_side, eliminate_columns);
-}
+ // correct right hand side
+ right_hand_side.block(block_row)(row)
+ -= matrix.block(block_row,block_index.first).global_entry(global_entry) /
+ diagonal_entry * new_rhs;
+ // set matrix entry to zero
+ matrix.block(block_row,block_index.first).global_entry(global_entry) = 0.;
+ }
+ }
+ }
+ // preset solution vector
+ solution.block(block_index.first)(block_index.second) = dof->second;
+ }
+ }
-void
-MatrixTools::
-apply_boundary_values (const std::map &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
- PETScWrappers::MPI::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
- const bool eliminate_columns)
-{
- // simply redirect to the generic function
- // used for both petsc matrix types
- PETScWrappers::apply_boundary_values (boundary_values, matrix, solution,
- right_hand_side, eliminate_columns);
- // compress the matrix once we're done
- matrix.compress ();
-}
+#ifdef DEAL_II_USE_PETSC
-void
-MatrixTools::
-apply_boundary_values (const std::map &boundary_values,
- PETScWrappers::MPI::BlockSparseMatrix &matrix,
- PETScWrappers::MPI::BlockVector &solution,
- PETScWrappers::MPI::BlockVector &right_hand_side,
- const bool eliminate_columns)
-{
- Assert (matrix.n() == right_hand_side.size(),
- ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
- Assert (matrix.n() == solution.size(),
- ExcDimensionMismatch(matrix.n(), solution.size()));
- Assert (matrix.n_block_rows() == matrix.n_block_cols(),
- ExcNotQuadratic());
-
- const unsigned int n_blocks = matrix.n_block_rows();
-
- matrix.compress();
-
- // We need to find the subdivision
- // into blocks for the boundary values.
- // To this end, generate a vector of
- // maps with the respective indices.
- std::vector > block_boundary_values(n_blocks);
+ namespace internal
{
- int offset = 0, block=0;
- for (std::map::const_iterator
- dof = boundary_values.begin();
- dof != boundary_values.end();
- ++dof)
+ namespace PETScWrappers
+ {
+ template
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ PETScMatrix &matrix,
+ PETScVector &solution,
+ PETScVector &right_hand_side,
+ const bool eliminate_columns)
{
- if (dof->first >= matrix.block(block,0).m() + offset)
- {
- offset += matrix.block(block,0).m();
- block++;
- }
- const unsigned int index = dof->first - offset;
- block_boundary_values[block].insert(std::pair (index,dof->second));
- }
- }
+ Assert (eliminate_columns == false, ExcNotImplemented());
- // Now call the non-block variants on
- // the diagonal subblocks and the
- // solution/rhs.
- for (unsigned int block=0; block local_range
- = matrix.block(block_m,0).local_range();
+ Assert (matrix.n() == right_hand_side.size(),
+ ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
+ Assert (matrix.n() == solution.size(),
+ ExcDimensionMismatch(matrix.n(), solution.size()));
- std::vector constrained_rows;
- for (std::map::const_iterator
- dof = block_boundary_values[block_m].begin();
- dof != block_boundary_values[block_m].end();
- ++dof)
- if ((dof->first >= local_range.first) &&
- (dof->first < local_range.second))
- constrained_rows.push_back (dof->first);
-
- for (unsigned int block_n=0; block_n local_range
+ = matrix.local_range();
+ Assert (local_range == right_hand_side.local_range(),
+ ExcInternalError());
+ Assert (local_range == solution.local_range(),
+ ExcInternalError());
+
+
+ // we have to read and write from this
+ // matrix (in this order). this will only
+ // work if we compress the matrix first,
+ // done here
+ matrix.compress ();
+
+ // determine the first nonzero diagonal
+ // entry from within the part of the
+ // matrix that we can see. if we can't
+ // find such an entry, take one
+ PetscScalar average_nonzero_diagonal_entry = 1;
+ for (unsigned int i=local_range.first; i constrained_rows;
+ for (std::map::const_iterator
+ dof = boundary_values.begin();
+ dof != boundary_values.end();
+ ++dof)
+ if ((dof->first >= local_range.first) &&
+ (dof->first < local_range.second))
+ constrained_rows.push_back (dof->first);
+ // then eliminate these rows and set
+ // their diagonal entry to what we have
+ // determined above. note that for petsc
+ // matrices interleaving read with write
+ // operations is very expensive. thus, we
+ // here always replace the diagonal
+ // element, rather than first checking
+ // whether it is nonzero and in that case
+ // preserving it. this is different from
+ // the case of deal.II sparse matrices
+ // treated in the other functions.
+ matrix.clear_rows (constrained_rows, average_nonzero_diagonal_entry);
+
+ // the next thing is to set right hand
+ // side to the wanted value. there's one
+ // drawback: if we write to individual
+ // vector elements, then we have to do
+ // that on all processors. however, some
+ // processors may not need to set
+ // anything because their chunk of
+ // matrix/rhs do not contain any boundary
+ // nodes. therefore, rather than using
+ // individual calls, we use one call for
+ // all elements, thereby making sure that
+ // all processors call this function,
+ // even if some only have an empty set of
+ // elements to set
+ right_hand_side.compress ();
+ solution.compress ();
+
+ std::vector indices;
+ std::vector solution_values;
+ for (std::map::const_iterator
+ dof = boundary_values.begin();
+ dof != boundary_values.end();
+ ++dof)
+ if ((dof->first >= local_range.first) &&
+ (dof->first < local_range.second))
+ {
+ indices.push_back (dof->first);
+ solution_values.push_back (dof->second);
+ }
+ solution.set (indices, solution_values);
+
+ // now also set appropriate values for
+ // the rhs
+ for (unsigned int i=0; i
void
apply_boundary_values (const std::map &boundary_values,
- TrilinosMatrix &matrix,
- TrilinosVector &solution,
- TrilinosVector &right_hand_side,
- const bool eliminate_columns)
+ PETScWrappers::SparseMatrix &matrix,
+ PETScWrappers::Vector &solution,
+ PETScWrappers::Vector &right_hand_side,
+ const bool eliminate_columns)
{
- Assert (eliminate_columns == false, ExcNotImplemented());
+ // simply redirect to the generic function
+ // used for both petsc matrix types
+ internal::PETScWrappers::apply_boundary_values (boundary_values, matrix, solution,
+ right_hand_side, eliminate_columns);
+ }
- Assert (matrix.n() == right_hand_side.size(),
- ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
- Assert (matrix.n() == solution.size(),
- ExcDimensionMismatch(matrix.m(), solution.size()));
- // if no boundary values are to be applied
- // simply return
- if (boundary_values.size() == 0)
- return;
- const std::pair local_range
- = matrix.local_range();
- Assert (local_range == right_hand_side.local_range(),
- ExcInternalError());
- Assert (local_range == solution.local_range(),
- ExcInternalError());
-
- // we have to read and write from this
- // matrix (in this order). this will only
- // work if we compress the matrix first,
- // done here
- matrix.compress ();
+ void
- // determine the first nonzero diagonal
- // entry from within the part of the
- // matrix that we can see. if we can't
- // find such an entry, take one
- TrilinosScalar average_nonzero_diagonal_entry = 1;
- for (unsigned int i=local_range.first; i constrained_rows;
- for (std::map::const_iterator
- dof = boundary_values.begin();
- dof != boundary_values.end();
- ++dof)
- if ((dof->first >= local_range.first) &&
- (dof->first < local_range.second))
- constrained_rows.push_back (dof->first);
-
- // then eliminate these rows and
- // set their diagonal entry to
- // what we have determined
- // above. if the value already is
- // nonzero, it will be preserved,
- // in accordance with the basic
- // matrix classes in deal.II.
- matrix.clear_rows (constrained_rows, average_nonzero_diagonal_entry);
-
- // the next thing is to set right
- // hand side to the wanted
- // value. there's one drawback:
- // if we write to individual
- // vector elements, then we have
- // to do that on all
- // processors. however, some
- // processors may not need to set
- // anything because their chunk
- // of matrix/rhs do not contain
- // any boundary nodes. therefore,
- // rather than using individual
- // calls, we use one call for all
- // elements, thereby making sure
- // that all processors call this
- // function, even if some only
- // have an empty set of elements
- // to set
- right_hand_side.compress ();
- solution.compress ();
-
- std::vector indices;
- std::vector solution_values;
- for (std::map::const_iterator
- dof = boundary_values.begin();
- dof != boundary_values.end();
- ++dof)
- if ((dof->first >= local_range.first) &&
- (dof->first < local_range.second))
- {
- indices.push_back (dof->first);
- solution_values.push_back (dof->second);
- }
- solution.set (indices, solution_values);
-
- // now also set appropriate
- // values for the rhs
- for (unsigned int i=0; i &boundary_values,
+ PETScWrappers::MPI::SparseMatrix &matrix,
+ PETScWrappers::MPI::Vector &solution,
+ PETScWrappers::MPI::Vector &right_hand_side,
+ const bool eliminate_columns)
+ {
+ // simply redirect to the generic function
+ // used for both petsc matrix types
+ internal::PETScWrappers::apply_boundary_values (boundary_values, matrix, solution,
+ right_hand_side, eliminate_columns);
+
+ // compress the matrix once we're done
matrix.compress ();
- solution.compress ();
- right_hand_side.compress ();
}
-
- template
void
- apply_block_boundary_values (const std::map &boundary_values,
- TrilinosMatrix &matrix,
- TrilinosBlockVector &solution,
- TrilinosBlockVector &right_hand_side,
- const bool eliminate_columns)
+ apply_boundary_values (const std::map &boundary_values,
+ PETScWrappers::MPI::BlockSparseMatrix &matrix,
+ PETScWrappers::MPI::BlockVector &solution,
+ PETScWrappers::MPI::BlockVector &right_hand_side,
+ const bool eliminate_columns)
{
- Assert (eliminate_columns == false, ExcNotImplemented());
-
Assert (matrix.n() == right_hand_side.size(),
ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
Assert (matrix.n() == solution.size(),
@@ -2780,10 +2583,10 @@ namespace TrilinosWrappers
matrix.compress();
- // We need to find the subdivision
- // into blocks for the boundary values.
- // To this end, generate a vector of
- // maps with the respective indices.
+ // We need to find the subdivision
+ // into blocks for the boundary values.
+ // To this end, generate a vector of
+ // maps with the respective indices.
std::vector > block_boundary_values(n_blocks);
{
int offset = 0, block=0;
@@ -2798,25 +2601,24 @@ namespace TrilinosWrappers
block++;
}
const unsigned int index = dof->first - offset;
- block_boundary_values[block].insert(
- std::pair (index,dof->second));
+ block_boundary_values[block].insert(std::pair (index,dof->second));
}
}
- // Now call the non-block variants on
- // the diagonal subblocks and the
- // solution/rhs.
+ // Now call the non-block variants on
+ // the diagonal subblocks and the
+ // solution/rhs.
for (unsigned int block=0; block local_range
@@ -2837,186 +2639,386 @@ namespace TrilinosWrappers
}
}
-}
+#endif
-void
-MatrixTools::
-apply_boundary_values (const std::map &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
- TrilinosWrappers::Vector &solution,
- TrilinosWrappers::Vector &right_hand_side,
- const bool eliminate_columns)
-{
- // simply redirect to the generic function
- // used for both trilinos matrix types
- TrilinosWrappers::apply_boundary_values (boundary_values, matrix, solution,
- right_hand_side, eliminate_columns);
-}
+#ifdef DEAL_II_USE_TRILINOS
+ namespace internal
+ {
+ namespace TrilinosWrappers
+ {
+ template
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ TrilinosMatrix &matrix,
+ TrilinosVector &solution,
+ TrilinosVector &right_hand_side,
+ const bool eliminate_columns)
+ {
+ Assert (eliminate_columns == false, ExcNotImplemented());
+ Assert (matrix.n() == right_hand_side.size(),
+ ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
+ Assert (matrix.n() == solution.size(),
+ ExcDimensionMismatch(matrix.m(), solution.size()));
-void
-MatrixTools::
-apply_boundary_values (const std::map &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
- TrilinosWrappers::MPI::Vector &solution,
- TrilinosWrappers::MPI::Vector &right_hand_side,
- const bool eliminate_columns)
-{
- // simply redirect to the generic function
- // used for both trilinos matrix types
- TrilinosWrappers::apply_boundary_values (boundary_values, matrix, solution,
- right_hand_side, eliminate_columns);
-}
+ // if no boundary values are to be applied
+ // simply return
+ if (boundary_values.size() == 0)
+ return;
+ const std::pair local_range
+ = matrix.local_range();
+ Assert (local_range == right_hand_side.local_range(),
+ ExcInternalError());
+ Assert (local_range == solution.local_range(),
+ ExcInternalError());
+
+ // we have to read and write from this
+ // matrix (in this order). this will only
+ // work if we compress the matrix first,
+ // done here
+ matrix.compress ();
+
+ // determine the first nonzero diagonal
+ // entry from within the part of the
+ // matrix that we can see. if we can't
+ // find such an entry, take one
+ TrilinosScalar average_nonzero_diagonal_entry = 1;
+ for (unsigned int i=local_range.first; i constrained_rows;
+ for (std::map::const_iterator
+ dof = boundary_values.begin();
+ dof != boundary_values.end();
+ ++dof)
+ if ((dof->first >= local_range.first) &&
+ (dof->first < local_range.second))
+ constrained_rows.push_back (dof->first);
-void
-MatrixTools::
-apply_boundary_values (const std::map &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
- TrilinosWrappers::BlockVector &solution,
- TrilinosWrappers::BlockVector &right_hand_side,
- const bool eliminate_columns)
-{
- TrilinosWrappers::apply_block_boundary_values (boundary_values, matrix,
- solution, right_hand_side,
- eliminate_columns);
-}
+ // then eliminate these rows and
+ // set their diagonal entry to
+ // what we have determined
+ // above. if the value already is
+ // nonzero, it will be preserved,
+ // in accordance with the basic
+ // matrix classes in deal.II.
+ matrix.clear_rows (constrained_rows, average_nonzero_diagonal_entry);
+
+ // the next thing is to set right
+ // hand side to the wanted
+ // value. there's one drawback:
+ // if we write to individual
+ // vector elements, then we have
+ // to do that on all
+ // processors. however, some
+ // processors may not need to set
+ // anything because their chunk
+ // of matrix/rhs do not contain
+ // any boundary nodes. therefore,
+ // rather than using individual
+ // calls, we use one call for all
+ // elements, thereby making sure
+ // that all processors call this
+ // function, even if some only
+ // have an empty set of elements
+ // to set
+ right_hand_side.compress ();
+ solution.compress ();
+
+ std::vector indices;
+ std::vector solution_values;
+ for (std::map::const_iterator
+ dof = boundary_values.begin();
+ dof != boundary_values.end();
+ ++dof)
+ if ((dof->first >= local_range.first) &&
+ (dof->first < local_range.second))
+ {
+ indices.push_back (dof->first);
+ solution_values.push_back (dof->second);
+ }
+ solution.set (indices, solution_values);
+ // now also set appropriate
+ // values for the rhs
+ for (unsigned int i=0; i &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
- TrilinosWrappers::MPI::BlockVector &solution,
- TrilinosWrappers::MPI::BlockVector &right_hand_side,
- const bool eliminate_columns)
-{
- TrilinosWrappers::apply_block_boundary_values (boundary_values, matrix,
- solution, right_hand_side,
- eliminate_columns);
-}
+ // clean up
+ matrix.compress ();
+ solution.compress ();
+ right_hand_side.compress ();
+ }
-#endif
+ template
+ void
+ apply_block_boundary_values (const std::map &boundary_values,
+ TrilinosMatrix &matrix,
+ TrilinosBlockVector &solution,
+ TrilinosBlockVector &right_hand_side,
+ const bool eliminate_columns)
+ {
+ Assert (eliminate_columns == false, ExcNotImplemented());
-void
-MatrixTools::
-local_apply_boundary_values (const std::map &boundary_values,
- const std::vector &local_dof_indices,
- FullMatrix &local_matrix,
- Vector &local_rhs,
- const bool eliminate_columns)
-{
- Assert (local_dof_indices.size() == local_matrix.m(),
- ExcDimensionMismatch(local_dof_indices.size(),
- local_matrix.m()));
- Assert (local_dof_indices.size() == local_matrix.n(),
- ExcDimensionMismatch(local_dof_indices.size(),
- local_matrix.n()));
- Assert (local_dof_indices.size() == local_rhs.size(),
- ExcDimensionMismatch(local_dof_indices.size(),
- local_rhs.size()));
-
- // if there is nothing to do, then exit
- // right away
- if (boundary_values.size() == 0)
- return;
-
- // otherwise traverse all the dofs used in
- // the local matrices and vectors and see
- // what's there to do
-
- // if we need to treat an entry, then we
- // set the diagonal entry to its absolute
- // value. if it is zero, we used to set it
- // to one, which is a really terrible
- // choice that can lead to hours of
- // searching for bugs in programs (I
- // experienced this :-( ) if the matrix
- // entries are otherwise very large. this
- // is so since iterative solvers would
- // simply not correct boundary nodes for
- // their correct values since the residual
- // contributions of their rows of the
- // linear system is almost zero if the
- // diagonal entry is one. thus, set it to
- // the average absolute value of the
- // nonzero diagonal elements.
- //
- // we only compute this value lazily the
- // first time we need it.
- double average_diagonal = 0;
- const unsigned int n_local_dofs = local_dof_indices.size();
- for (unsigned int i=0; i::const_iterator
- boundary_value = boundary_values.find (local_dof_indices[i]);
- if (boundary_value != boundary_values.end())
- {
- // remove this row, except for the
- // diagonal element
- for (unsigned j=0; jsecond;
-
- // finally do the elimination step
- // if requested
- if (eliminate_columns == true)
- {
- for (unsigned int row=0; rowsecond;
- local_matrix(row,i) = 0;
- }
- }
- }
+ Assert (matrix.n() == right_hand_side.size(),
+ ExcDimensionMismatch(matrix.n(), right_hand_side.size()));
+ Assert (matrix.n() == solution.size(),
+ ExcDimensionMismatch(matrix.n(), solution.size()));
+ Assert (matrix.n_block_rows() == matrix.n_block_cols(),
+ ExcNotQuadratic());
+
+ const unsigned int n_blocks = matrix.n_block_rows();
+
+ matrix.compress();
+
+ // We need to find the subdivision
+ // into blocks for the boundary values.
+ // To this end, generate a vector of
+ // maps with the respective indices.
+ std::vector > block_boundary_values(n_blocks);
+ {
+ int offset = 0, block=0;
+ for (std::map::const_iterator
+ dof = boundary_values.begin();
+ dof != boundary_values.end();
+ ++dof)
+ {
+ if (dof->first >= matrix.block(block,0).m() + offset)
+ {
+ offset += matrix.block(block,0).m();
+ block++;
+ }
+ const unsigned int index = dof->first - offset;
+ block_boundary_values[block].insert(
+ std::pair (index,dof->second));
+ }
+ }
+
+ // Now call the non-block variants on
+ // the diagonal subblocks and the
+ // solution/rhs.
+ for (unsigned int block=0; block local_range
+ = matrix.block(block_m,0).local_range();
+
+ std::vector constrained_rows;
+ for (std::map::const_iterator
+ dof = block_boundary_values[block_m].begin();
+ dof != block_boundary_values[block_m].end();
+ ++dof)
+ if ((dof->first >= local_range.first) &&
+ (dof->first < local_range.second))
+ constrained_rows.push_back (dof->first);
+
+ for (unsigned int block_n=0; block_n &boundary_values,
+ TrilinosWrappers::SparseMatrix &matrix,
+ TrilinosWrappers::Vector &solution,
+ TrilinosWrappers::Vector &right_hand_side,
+ const bool eliminate_columns)
+ {
+ // simply redirect to the generic function
+ // used for both trilinos matrix types
+ internal::TrilinosWrappers::apply_boundary_values (boundary_values, matrix, solution,
+ right_hand_side, eliminate_columns);
+ }
+
+
+
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ TrilinosWrappers::SparseMatrix &matrix,
+ TrilinosWrappers::MPI::Vector &solution,
+ TrilinosWrappers::MPI::Vector &right_hand_side,
+ const bool eliminate_columns)
+ {
+ // simply redirect to the generic function
+ // used for both trilinos matrix types
+ internal::TrilinosWrappers::apply_boundary_values (boundary_values, matrix, solution,
+ right_hand_side, eliminate_columns);
+ }
+
+
+
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::BlockVector &solution,
+ TrilinosWrappers::BlockVector &right_hand_side,
+ const bool eliminate_columns)
+ {
+ internal::TrilinosWrappers::apply_block_boundary_values (boundary_values, matrix,
+ solution, right_hand_side,
+ eliminate_columns);
+ }
+
+
+
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::MPI::BlockVector &solution,
+ TrilinosWrappers::MPI::BlockVector &right_hand_side,
+ const bool eliminate_columns)
+ {
+ internal::TrilinosWrappers::apply_block_boundary_values (boundary_values, matrix,
+ solution, right_hand_side,
+ eliminate_columns);
+ }
+
+#endif
+
+
+
+ void
+ local_apply_boundary_values (const std::map &boundary_values,
+ const std::vector &local_dof_indices,
+ FullMatrix &local_matrix,
+ Vector &local_rhs,
+ const bool eliminate_columns)
+ {
+ Assert (local_dof_indices.size() == local_matrix.m(),
+ ExcDimensionMismatch(local_dof_indices.size(),
+ local_matrix.m()));
+ Assert (local_dof_indices.size() == local_matrix.n(),
+ ExcDimensionMismatch(local_dof_indices.size(),
+ local_matrix.n()));
+ Assert (local_dof_indices.size() == local_rhs.size(),
+ ExcDimensionMismatch(local_dof_indices.size(),
+ local_rhs.size()));
+
+ // if there is nothing to do, then exit
+ // right away
+ if (boundary_values.size() == 0)
+ return;
+
+ // otherwise traverse all the dofs used in
+ // the local matrices and vectors and see
+ // what's there to do
+
+ // if we need to treat an entry, then we
+ // set the diagonal entry to its absolute
+ // value. if it is zero, we used to set it
+ // to one, which is a really terrible
+ // choice that can lead to hours of
+ // searching for bugs in programs (I
+ // experienced this :-( ) if the matrix
+ // entries are otherwise very large. this
+ // is so since iterative solvers would
+ // simply not correct boundary nodes for
+ // their correct values since the residual
+ // contributions of their rows of the
+ // linear system is almost zero if the
+ // diagonal entry is one. thus, set it to
+ // the average absolute value of the
+ // nonzero diagonal elements.
+ //
+ // we only compute this value lazily the
+ // first time we need it.
+ double average_diagonal = 0;
+ const unsigned int n_local_dofs = local_dof_indices.size();
+ for (unsigned int i=0; i::const_iterator
+ boundary_value = boundary_values.find (local_dof_indices[i]);
+ if (boundary_value != boundary_values.end())
+ {
+ // remove this row, except for the
+ // diagonal element
+ for (unsigned j=0; jsecond;
+
+ // finally do the elimination step
+ // if requested
+ if (eliminate_columns == true)
+ {
+ for (unsigned int row=0; rowsecond;
+ local_matrix(row,i) = 0;
+ }
+ }
+ }
+ }
+ }
}
@@ -3024,35 +3026,37 @@ local_apply_boundary_values (const std::map &boundary_value
// explicit instantiations
#include "matrices.inst"
-template
-void
-MatrixTools::apply_boundary_values (const std::map &boundary_values,
- SparseMatrix &matrix,
- Vector &solution,
- Vector &right_hand_side,
- const bool eliminate_columns);
-template
-void
-MatrixTools::apply_boundary_values (const std::map &boundary_values,
- SparseMatrix &matrix,
- Vector &solution,
- Vector &right_hand_side,
- const bool eliminate_columns);
-
-template
-void
-MatrixTools::apply_boundary_values (const std::map &boundary_values,
- BlockSparseMatrix &matrix,
- BlockVector &solution,
- BlockVector &right_hand_side,
- const bool eliminate_columns);
-template
-void
-MatrixTools::apply_boundary_values (const std::map &boundary_values,
- BlockSparseMatrix &matrix,
- BlockVector &solution,
- BlockVector &right_hand_side,
- const bool eliminate_columns);
+namespace MatrixTools
+{
+ template
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ SparseMatrix &matrix,
+ Vector &solution,
+ Vector &right_hand_side,
+ const bool eliminate_columns);
+ template
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ SparseMatrix &matrix,
+ Vector &solution,
+ Vector &right_hand_side,
+ const bool eliminate_columns);
+ template
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ BlockSparseMatrix &matrix,
+ BlockVector &solution,
+ BlockVector &right_hand_side,
+ const bool eliminate_columns);
+ template
+ void
+ apply_boundary_values (const std::map &boundary_values,
+ BlockSparseMatrix &matrix,
+ BlockVector &solution,
+ BlockVector &right_hand_side,
+ const bool eliminate_columns);
+}
DEAL_II_NAMESPACE_CLOSE