From 320c51ca56b1513d87720abff64153db68ab89a4 Mon Sep 17 00:00:00 2001 From: kronbichler Date: Sat, 22 Nov 2008 15:20:55 +0000 Subject: [PATCH] Inline the functions that add elements to Trilinos sparse matrices and vectors. These functions are called very frequently, and it is hence advantageous to let the compiler know the number of added elements, so it can optimize some loops away. git-svn-id: https://svn.dealii.org/trunk@17684 0785d39b-7218-0410-832d-ea1e28bc413d --- .../lac/trilinos_block_sparse_matrix.h | 260 +++++++++++++++++ .../lac/include/lac/trilinos_sparse_matrix.h | 276 +++++++++++++++++- .../lac/include/lac/trilinos_vector_base.h | 120 ++++++++ .../source/trilinos_block_sparse_matrix.cc | 252 ---------------- deal.II/lac/source/trilinos_sparse_matrix.cc | 265 ----------------- deal.II/lac/source/trilinos_vector_base.cc | 111 ------- 6 files changed, 655 insertions(+), 629 deletions(-) diff --git a/deal.II/lac/include/lac/trilinos_block_sparse_matrix.h b/deal.II/lac/include/lac/trilinos_block_sparse_matrix.h index 0032a433dc..2b3fb37901 100644 --- a/deal.II/lac/include/lac/trilinos_block_sparse_matrix.h +++ b/deal.II/lac/include/lac/trilinos_block_sparse_matrix.h @@ -747,6 +747,266 @@ namespace TrilinosWrappers + inline + void + BlockSparseMatrix::set (const unsigned int i, + const unsigned int j, + const TrilinosScalar value) + { + BaseClass::set (i, j, value); + } + + + + inline + void + BlockSparseMatrix::set (const std::vector &row_indices, + const std::vector &col_indices, + const FullMatrix &values) + { + Assert (row_indices.size() == values.m(), + ExcDimensionMismatch(row_indices.size(), values.m())); + Assert (col_indices.size() == values.n(), + ExcDimensionMismatch(col_indices.size(), values.n())); + + set (row_indices.size(), &row_indices[0], + col_indices.size(), &col_indices[0], &values(0,0)); + } + + + + inline + void + BlockSparseMatrix::set (const unsigned int row, + const std::vector &col_indices, + const std::vector &values) + { + Assert (col_indices.size() == values.size(), + ExcDimensionMismatch(col_indices.size(), values.size())); + + set (1, &row, col_indices.size(), &col_indices[0], &values[0]); + } + + + + inline + void + BlockSparseMatrix::set (const unsigned int n_rows, + const unsigned int *row_indices, + const unsigned int n_cols, + const unsigned int *col_indices, + const TrilinosScalar *values) + { + // Resize scratch arrays + block_col_indices.resize (this->n_block_cols()); + local_row_length.resize (this->n_block_cols()); + local_col_indices.resize (n_cols); + + // Clear the content in local_row_length + for (unsigned int i=0; in_block_cols(); ++i) + local_row_length[i] = 0; + + // Go through the column indices to find + // out which portions of the values + // should be written into which block + // matrix. This can be done before + // starting the loop over all the rows, + // since we assume a rectangular set of + // matrix data. + { + unsigned int current_block = 0, row_length = 0; + block_col_indices[0] = 0; + for (unsigned int j=0; j + col_index = this->column_block_indices.global_to_local(col_indices[j]); + local_col_indices[j] = col_index.second; + if (col_index.first > current_block) + { + local_row_length[current_block] = row_length; + row_length = 0; + while (col_index.first > current_block) + current_block++; + block_col_indices[current_block] = j; + } + + Assert (col_index.first == current_block, + ExcInternalError()); + } + local_row_length[current_block] = row_length; + Assert (current_block < this->n_block_cols(), + ExcInternalError()); + +#ifdef DEBUG + // If in debug mode, do a check whether + // the right length has been obtained. + unsigned int length = 0; + for (unsigned int i=0; in_block_cols(); ++i) + length += local_row_length[i]; + Assert (length == n_cols, + ExcDimensionMismatch(length, n_cols)); +#endif + } + + // Now we found out about where the + // individual columns should start and + // where we should start reading out + // data. Now let's write the data into + // the individual blocks! + for (unsigned int i=0; i + row_index = this->row_block_indices.global_to_local (row_indices[i]); + for (unsigned int block_col=0; block_col &row_indices, + const std::vector &col_indices, + const FullMatrix &values) + { + Assert (row_indices.size() == values.m(), + ExcDimensionMismatch(row_indices.size(), values.m())); + Assert (col_indices.size() == values.n(), + ExcDimensionMismatch(col_indices.size(), values.n())); + + add (row_indices.size(), &row_indices[0], + col_indices.size(), &col_indices[0], &values(0,0)); + } + + + + inline + void + BlockSparseMatrix::add (const unsigned int row, + const std::vector &col_indices, + const std::vector &values) + { + Assert (col_indices.size() == values.size(), + ExcDimensionMismatch(col_indices.size(), values.size())); + + add (1, &row, col_indices.size(), &col_indices[0], &values[0]); + } + + + + inline + void + BlockSparseMatrix::add (const unsigned int n_rows, + const unsigned int *row_indices, + const unsigned int n_cols, + const unsigned int *col_indices, + const TrilinosScalar *values) + { + // TODO: Look over this to find out + // whether we can do that more + // efficiently. + + // Resize scratch arrays + block_col_indices.resize (this->n_block_cols()); + local_row_length.resize (this->n_block_cols()); + local_col_indices.resize (n_cols); + + // Clear the content in local_row_length + for (unsigned int i=0; in_block_cols(); ++i) + local_row_length[i] = 0; + + // Go through the column indices to find + // out which portions of the values + // should be written into which block + // matrix. This can be done before + // starting the loop over all the rows, + // since we assume a rectangular set of + // matrix data. + { + unsigned int current_block = 0, row_length = 0; + block_col_indices[0] = 0; + for (unsigned int j=0; j + col_index = this->column_block_indices.global_to_local(col_indices[j]); + local_col_indices[j] = col_index.second; + if (col_index.first > current_block) + { + local_row_length[current_block] = row_length; + row_length = 0; + while (col_index.first > current_block) + current_block++; + block_col_indices[current_block] = j; + } + + Assert (col_index.first == current_block, + ExcInternalError()); + } + local_row_length[current_block] = row_length; + Assert (current_block < this->n_block_cols(), + ExcInternalError()); + +#ifdef DEBUG + // If in debug mode, do a check whether + // the right length has been obtained. + unsigned int length = 0; + for (unsigned int i=0; in_block_cols(); ++i) + length += local_row_length[i]; + Assert (length == n_cols, + ExcDimensionMismatch(length, n_cols)); +#endif + } + + // Now we found out about where the + // individual columns should start and + // where we should start reading out + // data. Now let's write the data into + // the individual blocks! + for (unsigned int i=0; i + row_index = this->row_block_indices.global_to_local (row_indices[i]); + for (unsigned int block_col=0; block_col &row_indices, + const std::vector &col_indices, + const FullMatrix &values) + { + Assert (row_indices.size() == values.m(), + ExcDimensionMismatch(row_indices.size(), values.m())); + Assert (col_indices.size() == values.n(), + ExcDimensionMismatch(col_indices.size(), values.n())); + + set (row_indices.size(), &row_indices[0], + col_indices.size(), &col_indices[0], &values(0,0)); + } + + + + inline + void + SparseMatrix::set (const unsigned int row, + const std::vector &col_indices, + const std::vector &values) + { + Assert (col_indices.size() == values.size(), + ExcDimensionMismatch(col_indices.size(), values.size())); + + set (1, &row, col_indices.size(), &col_indices[0], &values[0]); + } + + + + inline + void + SparseMatrix::set (const unsigned int n_rows, + const unsigned int *row_indices, + const unsigned int n_cols, + const unsigned int *col_indices, + const TrilinosScalar *values) + { + int ierr; + if (last_action == Add) + { + ierr = matrix->GlobalAssemble(col_map, row_map, false); + + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + } + + if (last_action != Insert) + last_action = Insert; + + // Now go through all rows that are + // present in the input data. + for (unsigned int i=0; iFilled() == false) + { + ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues( + (int)row_indices[i], (int)n_cols, + const_cast(&values[i*n_cols]), + (int*)&col_indices[0]); + + // When adding up elements, we do + // not want to create exceptions in + // the case when adding elements. + if (ierr > 0) + ierr = 0; + } + else + ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues( + (int)row_indices[i], (int)n_cols, + const_cast(&values[i*n_cols]), + (int*)&col_indices[0]); + } + else + { + // When we're at off-processor data, we + // have to stick with the standard + // SumIntoGlobalValues + // function. Nevertheless, the way we + // call it is the fastest one (any other + // will lead to repeated allocation and + // deallocation of memory in order to + // call the function we already use, + // which is very unefficient if writing + // one element at a time). + compressed = false; + + const TrilinosScalar* value_ptr = &values[i*n_cols]; + + if (matrix->Filled() == false) + { + ierr = matrix->InsertGlobalValues (1, (int*)&i, + (int)n_cols, (int*)&col_indices[0], + &value_ptr, + Epetra_FECrsMatrix::ROW_MAJOR); + if (ierr > 0) + ierr = 0; + } + else + ierr = matrix->ReplaceGlobalValues (1, (int*)&i, + (int)n_cols, (int*)&col_indices[0], + &value_ptr, + Epetra_FECrsMatrix::ROW_MAJOR); + } + + Assert (ierr <= 0, ExcAccessToNonPresentElement(row_indices[i], + col_indices[0])); + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + } + } + + + + inline + void + SparseMatrix::add (const unsigned int i, + const unsigned int j, + const TrilinosScalar value) + { + + Assert (numbers::is_finite(value), + ExcMessage("The given value is not finite but either " + "infinite or Not A Number (NaN)")); + + if (value == 0) + { + // we have to do above actions in any case + // to be consistent with the MPI + // communication model (see the comments + // in the documentation of + // TrilinosWrappers::Vector), but we can + // save some work if the addend is + // zero. However, these actions are done + // in case we pass on to the other + // function. + if (last_action == Insert) + { + int ierr; + ierr = matrix->GlobalAssemble(col_map, row_map, false); + + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + } + + if (last_action != Add) + last_action = Add; + + return; + } + else + add (1, &i, 1, &j, &value); + } + + + + inline + void + SparseMatrix::add (const std::vector &row_indices, + const std::vector &col_indices, + const FullMatrix &values) + { + Assert (row_indices.size() == values.m(), + ExcDimensionMismatch(row_indices.size(), values.m())); + Assert (col_indices.size() == values.n(), + ExcDimensionMismatch(col_indices.size(), values.n())); + + add (row_indices.size(), &row_indices[0], + col_indices.size(), &col_indices[0], &values(0,0)); + } + + + + inline + void + SparseMatrix::add (const unsigned int row, + const std::vector &col_indices, + const std::vector &values) + { + Assert (col_indices.size() == values.size(), + ExcDimensionMismatch(col_indices.size(), values.size())); + + add (1, &row, col_indices.size(), &col_indices[0], &values[0]); + } + + + + inline + void + SparseMatrix::add (const unsigned int n_rows, + const unsigned int *row_indices, + const unsigned int n_cols, + const unsigned int *col_indices, + const TrilinosScalar *values) + { + int ierr; + if (last_action == Insert) + { + ierr = matrix->GlobalAssemble(col_map, row_map, false); + + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + } + + if (last_action != Add) + last_action = Add; + + // Now go through all rows that are + // present in the input data. + for (unsigned int i=0; iEpetra_CrsMatrix::SumIntoGlobalValues( + (int)row_indices[i], (int)n_cols, + const_cast(&values[i*n_cols]), + (int*)&col_indices[0]); + } + else + { + // When we're at off-processor data, we + // have to stick with the standard + // SumIntoGlobalValues + // function. Nevertheless, the way we + // call it is the fastest one (any other + // will lead to repeated allocation and + // deallocation of memory in order to + // call the function we already use, + // which is very unefficient if writing + // one element at a time). + compressed = false; + + const TrilinosScalar* value_ptr = &values[i*n_cols]; + + ierr = matrix->SumIntoGlobalValues (1, (int*)&i, + (int)n_cols, (int*)&col_indices[0], + &value_ptr, + Epetra_FECrsMatrix::ROW_MAJOR); + } + + Assert (ierr <= 0, ExcAccessToNonPresentElement(row_indices[i], + col_indices[0])); + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + } + } + + #endif // DOXYGEN } diff --git a/deal.II/lac/include/lac/trilinos_vector_base.h b/deal.II/lac/include/lac/trilinos_vector_base.h index 5a22e76ef1..d36cc7ff8d 100644 --- a/deal.II/lac/include/lac/trilinos_vector_base.h +++ b/deal.II/lac/include/lac/trilinos_vector_base.h @@ -1066,6 +1066,126 @@ namespace TrilinosWrappers } + + inline + void + VectorBase::set (const std::vector &indices, + const std::vector &values) + { + Assert (indices.size() == values.size(), + ExcDimensionMismatch(indices.size(),values.size())); + + set (indices.size(), &indices[0], &values[0]); + } + + + + inline + void + VectorBase::set (const std::vector &indices, + const ::dealii::Vector &values) + { + Assert (indices.size() == values.size(), + ExcDimensionMismatch(indices.size(),values.size())); + + set (indices.size(), &indices[0], values.begin()); + } + + + + inline + void + VectorBase::set (const unsigned int n_elements, + const unsigned int *indices, + const TrilinosScalar *values) + { + if (last_action == Add) + vector->GlobalAssemble(Add); + + if (last_action != Insert) + last_action = Insert; + + int ierr; + for (unsigned int i=0; iMap().LID(indices[i]); + if (local_row == -1) + { + ierr = vector->ReplaceGlobalValues (1, + (const int*)(&row), + &values[i]); + compressed = false; + } + else + ierr = vector->ReplaceMyValue (local_row, 0, values[i]); + + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + } + } + + + + inline + void + VectorBase::add (const std::vector &indices, + const std::vector &values) + { + Assert (indices.size() == values.size(), + ExcDimensionMismatch(indices.size(),values.size())); + + add (indices.size(), &indices[0], &values[0]); + } + + + + inline + void + VectorBase::add (const std::vector &indices, + const ::dealii::Vector &values) + { + Assert (indices.size() == values.size(), + ExcDimensionMismatch(indices.size(),values.size())); + + add (indices.size(), &indices[0], values.begin()); + } + + + + inline + void + VectorBase::add (const unsigned int n_elements, + const unsigned int *indices, + const TrilinosScalar *values) + { + if (last_action == Insert) + vector->GlobalAssemble(Insert); + + if (last_action != Add) + last_action = Add; + + int ierr; + for (unsigned int i=0; iMap().LID(indices[i]); + if (local_row == -1) + { + ierr = vector->SumIntoGlobalValues (1, + (const int*)(&row), + &values[i]); + compressed = false; + } + else + ierr = vector->SumIntoMyValue (local_row, 0, values[i]); + + AssertThrow (ierr == 0, ExcTrilinosError(ierr)); + } + } + + + + #endif // DOXYGEN } diff --git a/deal.II/lac/source/trilinos_block_sparse_matrix.cc b/deal.II/lac/source/trilinos_block_sparse_matrix.cc index 55169ba569..8fc6101880 100644 --- a/deal.II/lac/source/trilinos_block_sparse_matrix.cc +++ b/deal.II/lac/source/trilinos_block_sparse_matrix.cc @@ -262,258 +262,6 @@ namespace TrilinosWrappers - void - BlockSparseMatrix::set (const unsigned int i, - const unsigned int j, - const TrilinosScalar value) - { - BaseClass::set (i, j, value); - } - - - - void - BlockSparseMatrix::set (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &values) - { - Assert (row_indices.size() == values.m(), - ExcDimensionMismatch(row_indices.size(), values.m())); - Assert (col_indices.size() == values.n(), - ExcDimensionMismatch(col_indices.size(), values.n())); - - set (row_indices.size(), &row_indices[0], - col_indices.size(), &col_indices[0], &values(0,0)); - } - - - - void - BlockSparseMatrix::set (const unsigned int row, - const std::vector &col_indices, - const std::vector &values) - { - Assert (col_indices.size() == values.size(), - ExcDimensionMismatch(col_indices.size(), values.size())); - - set (1, &row, col_indices.size(), &col_indices[0], &values[0]); - } - - - - void - BlockSparseMatrix::set (const unsigned int n_rows, - const unsigned int *row_indices, - const unsigned int n_cols, - const unsigned int *col_indices, - const TrilinosScalar *values) - { - // Resize scratch arrays - block_col_indices.resize (this->n_block_cols()); - local_row_length.resize (this->n_block_cols()); - local_col_indices.resize (n_cols); - - // Clear the content in local_row_length - for (unsigned int i=0; in_block_cols(); ++i) - local_row_length[i] = 0; - - // Go through the column indices to find - // out which portions of the values - // should be written into which block - // matrix. This can be done before - // starting the loop over all the rows, - // since we assume a rectangular set of - // matrix data. - { - unsigned int current_block = 0, row_length = 0; - block_col_indices[0] = 0; - for (unsigned int j=0; j - col_index = this->column_block_indices.global_to_local(col_indices[j]); - local_col_indices[j] = col_index.second; - if (col_index.first > current_block) - { - local_row_length[current_block] = row_length; - row_length = 0; - while (col_index.first > current_block) - current_block++; - block_col_indices[current_block] = j; - } - - Assert (col_index.first == current_block, - ExcInternalError()); - } - local_row_length[current_block] = row_length; - Assert (current_block < this->n_block_cols(), - ExcInternalError()); - -#ifdef DEBUG - // If in debug mode, do a check whether - // the right length has been obtained. - unsigned int length = 0; - for (unsigned int i=0; in_block_cols(); ++i) - length += local_row_length[i]; - Assert (length == n_cols, - ExcDimensionMismatch(length, n_cols)); -#endif - } - - // Now we found out about where the - // individual columns should start and - // where we should start reading out - // data. Now let's write the data into - // the individual blocks! - for (unsigned int i=0; i - row_index = this->row_block_indices.global_to_local (row_indices[i]); - for (unsigned int block_col=0; block_col &row_indices, - const std::vector &col_indices, - const FullMatrix &values) - { - Assert (row_indices.size() == values.m(), - ExcDimensionMismatch(row_indices.size(), values.m())); - Assert (col_indices.size() == values.n(), - ExcDimensionMismatch(col_indices.size(), values.n())); - - add (row_indices.size(), &row_indices[0], - col_indices.size(), &col_indices[0], &values(0,0)); - } - - - - void - BlockSparseMatrix::add (const unsigned int row, - const std::vector &col_indices, - const std::vector &values) - { - Assert (col_indices.size() == values.size(), - ExcDimensionMismatch(col_indices.size(), values.size())); - - add (1, &row, col_indices.size(), &col_indices[0], &values[0]); - } - - - - void - BlockSparseMatrix::add (const unsigned int n_rows, - const unsigned int *row_indices, - const unsigned int n_cols, - const unsigned int *col_indices, - const TrilinosScalar *values) - { - // TODO: Look over this to find out - // whether we can do that more - // efficiently. - - // Resize scratch arrays - block_col_indices.resize (this->n_block_cols()); - local_row_length.resize (this->n_block_cols()); - local_col_indices.resize (n_cols); - - // Clear the content in local_row_length - for (unsigned int i=0; in_block_cols(); ++i) - local_row_length[i] = 0; - - // Go through the column indices to find - // out which portions of the values - // should be written into which block - // matrix. This can be done before - // starting the loop over all the rows, - // since we assume a rectangular set of - // matrix data. - { - unsigned int current_block = 0, row_length = 0; - block_col_indices[0] = 0; - for (unsigned int j=0; j - col_index = this->column_block_indices.global_to_local(col_indices[j]); - local_col_indices[j] = col_index.second; - if (col_index.first > current_block) - { - local_row_length[current_block] = row_length; - row_length = 0; - while (col_index.first > current_block) - current_block++; - block_col_indices[current_block] = j; - } - - Assert (col_index.first == current_block, - ExcInternalError()); - } - local_row_length[current_block] = row_length; - Assert (current_block < this->n_block_cols(), - ExcInternalError()); - -#ifdef DEBUG - // If in debug mode, do a check whether - // the right length has been obtained. - unsigned int length = 0; - for (unsigned int i=0; in_block_cols(); ++i) - length += local_row_length[i]; - Assert (length == n_cols, - ExcDimensionMismatch(length, n_cols)); -#endif - } - - // Now we found out about where the - // individual columns should start and - // where we should start reading out - // data. Now let's write the data into - // the individual blocks! - for (unsigned int i=0; i - row_index = this->row_block_indices.global_to_local (row_indices[i]); - for (unsigned int block_col=0; block_col &row_indices, - const std::vector &col_indices, - const FullMatrix &values) - { - Assert (row_indices.size() == values.m(), - ExcDimensionMismatch(row_indices.size(), values.m())); - Assert (col_indices.size() == values.n(), - ExcDimensionMismatch(col_indices.size(), values.n())); - - set (row_indices.size(), &row_indices[0], - col_indices.size(), &col_indices[0], &values(0,0)); - } - - - - void - SparseMatrix::set (const unsigned int row, - const std::vector &col_indices, - const std::vector &values) - { - Assert (col_indices.size() == values.size(), - ExcDimensionMismatch(col_indices.size(), values.size())); - - set (1, &row, col_indices.size(), &col_indices[0], &values[0]); - } - - - - void - SparseMatrix::set (const unsigned int n_rows, - const unsigned int *row_indices, - const unsigned int n_cols, - const unsigned int *col_indices, - const TrilinosScalar *values) - { - int ierr; - if (last_action == Add) - { - ierr = matrix->GlobalAssemble(col_map, row_map, false); - - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - } - - if (last_action != Insert) - last_action = Insert; - - // Now go through all rows that are - // present in the input data. - for (unsigned int i=0; iFilled() == false) - { - ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues( - (int)row_indices[i], (int)n_cols, - const_cast(&values[i*n_cols]), - (int*)&col_indices[0]); - - // When adding up elements, we do - // not want to create exceptions in - // the case when adding elements. - if (ierr > 0) - ierr = 0; - } - else - ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues( - (int)row_indices[i], (int)n_cols, - const_cast(&values[i*n_cols]), - (int*)&col_indices[0]); - } - else - { - // When we're at off-processor data, we - // have to stick with the standard - // SumIntoGlobalValues - // function. Nevertheless, the way we - // call it is the fastest one (any other - // will lead to repeated allocation and - // deallocation of memory in order to - // call the function we already use, - // which is very unefficient if writing - // one element at a time). - compressed = false; - - const TrilinosScalar* value_ptr = &values[i*n_cols]; - - if (matrix->Filled() == false) - { - ierr = matrix->InsertGlobalValues (1, (int*)&i, - (int)n_cols, (int*)&col_indices[0], - &value_ptr, - Epetra_FECrsMatrix::ROW_MAJOR); - if (ierr > 0) - ierr = 0; - } - else - ierr = matrix->ReplaceGlobalValues (1, (int*)&i, - (int)n_cols, (int*)&col_indices[0], - &value_ptr, - Epetra_FECrsMatrix::ROW_MAJOR); - } - - Assert (ierr <= 0, ExcAccessToNonPresentElement(row_indices[i], - col_indices[0])); - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - } - } - - - - void - SparseMatrix::add (const unsigned int i, - const unsigned int j, - const TrilinosScalar value) - { - - Assert (numbers::is_finite(value), - ExcMessage("The given value is not finite but either " - "infinite or Not A Number (NaN)")); - - if (value == 0) - { - // we have to do above actions in any case - // to be consistent with the MPI - // communication model (see the comments - // in the documentation of - // TrilinosWrappers::Vector), but we can - // save some work if the addend is - // zero. However, these actions are done - // in case we pass on to the other - // function. - if (last_action == Insert) - { - int ierr; - ierr = matrix->GlobalAssemble(col_map, row_map, false); - - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - } - - if (last_action != Add) - last_action = Add; - - return; - } - else - add (1, &i, 1, &j, &value); - } - - - - void - SparseMatrix::add (const std::vector &row_indices, - const std::vector &col_indices, - const FullMatrix &values) - { - Assert (row_indices.size() == values.m(), - ExcDimensionMismatch(row_indices.size(), values.m())); - Assert (col_indices.size() == values.n(), - ExcDimensionMismatch(col_indices.size(), values.n())); - - add (row_indices.size(), &row_indices[0], - col_indices.size(), &col_indices[0], &values(0,0)); - } - - - - void - SparseMatrix::add (const unsigned int row, - const std::vector &col_indices, - const std::vector &values) - { - Assert (col_indices.size() == values.size(), - ExcDimensionMismatch(col_indices.size(), values.size())); - - add (1, &row, col_indices.size(), &col_indices[0], &values[0]); - } - - - - void - SparseMatrix::add (const unsigned int n_rows, - const unsigned int *row_indices, - const unsigned int n_cols, - const unsigned int *col_indices, - const TrilinosScalar *values) - { - int ierr; - if (last_action == Insert) - { - ierr = matrix->GlobalAssemble(col_map, row_map, false); - - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - } - - if (last_action != Add) - last_action = Add; - - // Now go through all rows that are - // present in the input data. - for (unsigned int i=0; iEpetra_CrsMatrix::SumIntoGlobalValues( - (int)row_indices[i], (int)n_cols, - const_cast(&values[i*n_cols]), - (int*)&col_indices[0]); - } - else - { - // When we're at off-processor data, we - // have to stick with the standard - // SumIntoGlobalValues - // function. Nevertheless, the way we - // call it is the fastest one (any other - // will lead to repeated allocation and - // deallocation of memory in order to - // call the function we already use, - // which is very unefficient if writing - // one element at a time). - compressed = false; - - const TrilinosScalar* value_ptr = &values[i*n_cols]; - - ierr = matrix->SumIntoGlobalValues (1, (int*)&i, - (int)n_cols, (int*)&col_indices[0], - &value_ptr, - Epetra_FECrsMatrix::ROW_MAJOR); - } - - Assert (ierr <= 0, ExcAccessToNonPresentElement(row_indices[i], - col_indices[0])); - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - } - } - - - void SparseMatrix::clear_row (const unsigned int row, const TrilinosScalar new_diag_value) diff --git a/deal.II/lac/source/trilinos_vector_base.cc b/deal.II/lac/source/trilinos_vector_base.cc index 7b8c832739..3fa69f8bc6 100644 --- a/deal.II/lac/source/trilinos_vector_base.cc +++ b/deal.II/lac/source/trilinos_vector_base.cc @@ -318,117 +318,6 @@ namespace TrilinosWrappers } - void - VectorBase::set (const std::vector &indices, - const std::vector &values) - { - Assert (indices.size() == values.size(), - ExcDimensionMismatch(indices.size(),values.size())); - - set (indices.size(), &indices[0], &values[0]); - } - - - - void - VectorBase::set (const std::vector &indices, - const ::dealii::Vector &values) - { - Assert (indices.size() == values.size(), - ExcDimensionMismatch(indices.size(),values.size())); - - set (indices.size(), &indices[0], values.begin()); - } - - - - void - VectorBase::set (const unsigned int n_elements, - const unsigned int *indices, - const TrilinosScalar *values) - { - if (last_action == Add) - vector->GlobalAssemble(Add); - - if (last_action != Insert) - last_action = Insert; - - int ierr; - for (unsigned int i=0; iMap().LID(indices[i]); - if (local_row == -1) - { - ierr = vector->ReplaceGlobalValues (1, - (const int*)(&row), - &values[i]); - compressed = false; - } - else - ierr = vector->ReplaceMyValue (local_row, 0, values[i]); - - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - } - } - - - - void - VectorBase::add (const std::vector &indices, - const std::vector &values) - { - Assert (indices.size() == values.size(), - ExcDimensionMismatch(indices.size(),values.size())); - - add (indices.size(), &indices[0], &values[0]); - } - - - - void - VectorBase::add (const std::vector &indices, - const ::dealii::Vector &values) - { - Assert (indices.size() == values.size(), - ExcDimensionMismatch(indices.size(),values.size())); - - add (indices.size(), &indices[0], values.begin()); - } - - - - void - VectorBase::add (const unsigned int n_elements, - const unsigned int *indices, - const TrilinosScalar *values) - { - if (last_action == Insert) - vector->GlobalAssemble(Insert); - - if (last_action != Add) - last_action = Add; - - int ierr; - for (unsigned int i=0; iMap().LID(indices[i]); - if (local_row == -1) - { - ierr = vector->SumIntoGlobalValues (1, - (const int*)(&row), - &values[i]); - compressed = false; - } - else - ierr = vector->SumIntoMyValue (local_row, 0, values[i]); - - AssertThrow (ierr == 0, ExcTrilinosError(ierr)); - } - } - - TrilinosScalar VectorBase::operator * (const VectorBase &vec) const -- 2.39.5