+ inline
+ void
+ BlockSparseMatrix::set (const unsigned int i,
+ const unsigned int j,
+ const TrilinosScalar value)
+ {
+ BaseClass::set (i, j, value);
+ }
+
+
+
+ inline
+ void
+ BlockSparseMatrix::set (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<TrilinosScalar> &values)
+ {
+ Assert (row_indices.size() == values.m(),
+ ExcDimensionMismatch(row_indices.size(), values.m()));
+ Assert (col_indices.size() == values.n(),
+ ExcDimensionMismatch(col_indices.size(), values.n()));
+
+ set (row_indices.size(), &row_indices[0],
+ col_indices.size(), &col_indices[0], &values(0,0));
+ }
+
+
+
+ inline
+ void
+ BlockSparseMatrix::set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<TrilinosScalar> &values)
+ {
+ Assert (col_indices.size() == values.size(),
+ ExcDimensionMismatch(col_indices.size(), values.size()));
+
+ set (1, &row, col_indices.size(), &col_indices[0], &values[0]);
+ }
+
+
+
+ inline
+ void
+ BlockSparseMatrix::set (const unsigned int n_rows,
+ const unsigned int *row_indices,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const TrilinosScalar *values)
+ {
+ // Resize scratch arrays
+ block_col_indices.resize (this->n_block_cols());
+ local_row_length.resize (this->n_block_cols());
+ local_col_indices.resize (n_cols);
+
+ // Clear the content in local_row_length
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ local_row_length[i] = 0;
+
+ // Go through the column indices to find
+ // out which portions of the values
+ // should be written into which block
+ // matrix. This can be done before
+ // starting the loop over all the rows,
+ // since we assume a rectangular set of
+ // matrix data.
+ {
+ unsigned int current_block = 0, row_length = 0;
+ block_col_indices[0] = 0;
+ for (unsigned int j=0; j<n_cols; ++j, ++row_length)
+ {
+ const std::pair<unsigned int, unsigned int>
+ col_index = this->column_block_indices.global_to_local(col_indices[j]);
+ local_col_indices[j] = col_index.second;
+ if (col_index.first > current_block)
+ {
+ local_row_length[current_block] = row_length;
+ row_length = 0;
+ while (col_index.first > current_block)
+ current_block++;
+ block_col_indices[current_block] = j;
+ }
+
+ Assert (col_index.first == current_block,
+ ExcInternalError());
+ }
+ local_row_length[current_block] = row_length;
+ Assert (current_block < this->n_block_cols(),
+ ExcInternalError());
+
+#ifdef DEBUG
+ // If in debug mode, do a check whether
+ // the right length has been obtained.
+ unsigned int length = 0;
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ length += local_row_length[i];
+ Assert (length == n_cols,
+ ExcDimensionMismatch(length, n_cols));
+#endif
+ }
+
+ // Now we found out about where the
+ // individual columns should start and
+ // where we should start reading out
+ // data. Now let's write the data into
+ // the individual blocks!
+ for (unsigned int i=0; i<n_rows; ++i)
+ {
+ const std::pair<unsigned int,unsigned int>
+ row_index = this->row_block_indices.global_to_local (row_indices[i]);
+ for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
+ {
+ if (local_row_length[block_col] == 0)
+ continue;
+
+ block(row_index.first, block_col).set
+ (1, &row_index.second,
+ local_row_length[block_col],
+ &local_col_indices[block_col_indices[block_col]],
+ &values[n_cols*i + block_col_indices[block_col]]);
+ }
+ }
+ }
+
+
+
+ inline
+ void
+ BlockSparseMatrix::add (const unsigned int i,
+ const unsigned int j,
+ const TrilinosScalar value)
+ {
+ // For adding one single element, it is
+ // faster to rely on the BaseClass add
+ // function, than doing all the strange
+ // operations below.
+ BaseClass::add (i, j, value);
+ }
+
+
+
+ inline
+ void
+ BlockSparseMatrix::add (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<TrilinosScalar> &values)
+ {
+ Assert (row_indices.size() == values.m(),
+ ExcDimensionMismatch(row_indices.size(), values.m()));
+ Assert (col_indices.size() == values.n(),
+ ExcDimensionMismatch(col_indices.size(), values.n()));
+
+ add (row_indices.size(), &row_indices[0],
+ col_indices.size(), &col_indices[0], &values(0,0));
+ }
+
+
+
+ inline
+ void
+ BlockSparseMatrix::add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<TrilinosScalar> &values)
+ {
+ Assert (col_indices.size() == values.size(),
+ ExcDimensionMismatch(col_indices.size(), values.size()));
+
+ add (1, &row, col_indices.size(), &col_indices[0], &values[0]);
+ }
+
+
+
+ inline
+ void
+ BlockSparseMatrix::add (const unsigned int n_rows,
+ const unsigned int *row_indices,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const TrilinosScalar *values)
+ {
+ // TODO: Look over this to find out
+ // whether we can do that more
+ // efficiently.
+
+ // Resize scratch arrays
+ block_col_indices.resize (this->n_block_cols());
+ local_row_length.resize (this->n_block_cols());
+ local_col_indices.resize (n_cols);
+
+ // Clear the content in local_row_length
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ local_row_length[i] = 0;
+
+ // Go through the column indices to find
+ // out which portions of the values
+ // should be written into which block
+ // matrix. This can be done before
+ // starting the loop over all the rows,
+ // since we assume a rectangular set of
+ // matrix data.
+ {
+ unsigned int current_block = 0, row_length = 0;
+ block_col_indices[0] = 0;
+ for (unsigned int j=0; j<n_cols; ++j, ++row_length)
+ {
+ const std::pair<unsigned int, unsigned int>
+ col_index = this->column_block_indices.global_to_local(col_indices[j]);
+ local_col_indices[j] = col_index.second;
+ if (col_index.first > current_block)
+ {
+ local_row_length[current_block] = row_length;
+ row_length = 0;
+ while (col_index.first > current_block)
+ current_block++;
+ block_col_indices[current_block] = j;
+ }
+
+ Assert (col_index.first == current_block,
+ ExcInternalError());
+ }
+ local_row_length[current_block] = row_length;
+ Assert (current_block < this->n_block_cols(),
+ ExcInternalError());
+
+#ifdef DEBUG
+ // If in debug mode, do a check whether
+ // the right length has been obtained.
+ unsigned int length = 0;
+ for (unsigned int i=0; i<this->n_block_cols(); ++i)
+ length += local_row_length[i];
+ Assert (length == n_cols,
+ ExcDimensionMismatch(length, n_cols));
+#endif
+ }
+
+ // Now we found out about where the
+ // individual columns should start and
+ // where we should start reading out
+ // data. Now let's write the data into
+ // the individual blocks!
+ for (unsigned int i=0; i<n_rows; ++i)
+ {
+ const std::pair<unsigned int,unsigned int>
+ row_index = this->row_block_indices.global_to_local (row_indices[i]);
+ for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
+ {
+ if (local_row_length[block_col] == 0)
+ continue;
+
+ block(row_index.first, block_col).add
+ (1, &row_index.second,
+ local_row_length[block_col],
+ &local_col_indices[block_col_indices[block_col]],
+ &values[n_cols*i + block_col_indices[block_col]]);
+ }
+ }
+ }
+
+
+
inline
void
BlockSparseMatrix::vmult (MPI::BlockVector &dst,
}
-
+
inline
bool
SparseMatrix::in_local_range (const unsigned int index) const
return compressed;
}
+
+
+ inline
+ void
+ SparseMatrix::set (const unsigned int i,
+ const unsigned int j,
+ const TrilinosScalar value)
+ {
+
+ Assert (numbers::is_finite(value),
+ ExcMessage("The given value is not finite but either "
+ "infinite or Not A Number (NaN)"));
+
+ set (1, &i, 1, &j, &value);
+ }
+
+
+
+ inline
+ void
+ SparseMatrix::set (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<TrilinosScalar> &values)
+ {
+ Assert (row_indices.size() == values.m(),
+ ExcDimensionMismatch(row_indices.size(), values.m()));
+ Assert (col_indices.size() == values.n(),
+ ExcDimensionMismatch(col_indices.size(), values.n()));
+
+ set (row_indices.size(), &row_indices[0],
+ col_indices.size(), &col_indices[0], &values(0,0));
+ }
+
+
+
+ inline
+ void
+ SparseMatrix::set (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<TrilinosScalar> &values)
+ {
+ Assert (col_indices.size() == values.size(),
+ ExcDimensionMismatch(col_indices.size(), values.size()));
+
+ set (1, &row, col_indices.size(), &col_indices[0], &values[0]);
+ }
+
+
+
+ inline
+ void
+ SparseMatrix::set (const unsigned int n_rows,
+ const unsigned int *row_indices,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const TrilinosScalar *values)
+ {
+ int ierr;
+ if (last_action == Add)
+ {
+ ierr = matrix->GlobalAssemble(col_map, row_map, false);
+
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
+
+ if (last_action != Insert)
+ last_action = Insert;
+
+ // Now go through all rows that are
+ // present in the input data.
+ for (unsigned int i=0; i<n_rows; ++i)
+ {
+ // If the calling matrix owns the row to
+ // which we want to add values, we
+ // can directly call the Epetra_CrsMatrix
+ // input function, which is much faster
+ // than the Epetra_FECrsMatrix function.
+ if (row_map.MyGID(i) == true)
+ {
+ if (matrix->Filled() == false)
+ {
+ ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
+ (int)row_indices[i], (int)n_cols,
+ const_cast<TrilinosScalar*>(&values[i*n_cols]),
+ (int*)&col_indices[0]);
+
+ // When adding up elements, we do
+ // not want to create exceptions in
+ // the case when adding elements.
+ if (ierr > 0)
+ ierr = 0;
+ }
+ else
+ ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(
+ (int)row_indices[i], (int)n_cols,
+ const_cast<TrilinosScalar*>(&values[i*n_cols]),
+ (int*)&col_indices[0]);
+ }
+ else
+ {
+ // When we're at off-processor data, we
+ // have to stick with the standard
+ // SumIntoGlobalValues
+ // function. Nevertheless, the way we
+ // call it is the fastest one (any other
+ // will lead to repeated allocation and
+ // deallocation of memory in order to
+ // call the function we already use,
+ // which is very unefficient if writing
+ // one element at a time).
+ compressed = false;
+
+ const TrilinosScalar* value_ptr = &values[i*n_cols];
+
+ if (matrix->Filled() == false)
+ {
+ ierr = matrix->InsertGlobalValues (1, (int*)&i,
+ (int)n_cols, (int*)&col_indices[0],
+ &value_ptr,
+ Epetra_FECrsMatrix::ROW_MAJOR);
+ if (ierr > 0)
+ ierr = 0;
+ }
+ else
+ ierr = matrix->ReplaceGlobalValues (1, (int*)&i,
+ (int)n_cols, (int*)&col_indices[0],
+ &value_ptr,
+ Epetra_FECrsMatrix::ROW_MAJOR);
+ }
+
+ Assert (ierr <= 0, ExcAccessToNonPresentElement(row_indices[i],
+ col_indices[0]));
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
+ }
+
+
+
+ inline
+ void
+ SparseMatrix::add (const unsigned int i,
+ const unsigned int j,
+ const TrilinosScalar value)
+ {
+
+ Assert (numbers::is_finite(value),
+ ExcMessage("The given value is not finite but either "
+ "infinite or Not A Number (NaN)"));
+
+ if (value == 0)
+ {
+ // we have to do above actions in any case
+ // to be consistent with the MPI
+ // communication model (see the comments
+ // in the documentation of
+ // TrilinosWrappers::Vector), but we can
+ // save some work if the addend is
+ // zero. However, these actions are done
+ // in case we pass on to the other
+ // function.
+ if (last_action == Insert)
+ {
+ int ierr;
+ ierr = matrix->GlobalAssemble(col_map, row_map, false);
+
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
+
+ if (last_action != Add)
+ last_action = Add;
+
+ return;
+ }
+ else
+ add (1, &i, 1, &j, &value);
+ }
+
+
+
+ inline
+ void
+ SparseMatrix::add (const std::vector<unsigned int> &row_indices,
+ const std::vector<unsigned int> &col_indices,
+ const FullMatrix<TrilinosScalar> &values)
+ {
+ Assert (row_indices.size() == values.m(),
+ ExcDimensionMismatch(row_indices.size(), values.m()));
+ Assert (col_indices.size() == values.n(),
+ ExcDimensionMismatch(col_indices.size(), values.n()));
+
+ add (row_indices.size(), &row_indices[0],
+ col_indices.size(), &col_indices[0], &values(0,0));
+ }
+
+
+
+ inline
+ void
+ SparseMatrix::add (const unsigned int row,
+ const std::vector<unsigned int> &col_indices,
+ const std::vector<TrilinosScalar> &values)
+ {
+ Assert (col_indices.size() == values.size(),
+ ExcDimensionMismatch(col_indices.size(), values.size()));
+
+ add (1, &row, col_indices.size(), &col_indices[0], &values[0]);
+ }
+
+
+
+ inline
+ void
+ SparseMatrix::add (const unsigned int n_rows,
+ const unsigned int *row_indices,
+ const unsigned int n_cols,
+ const unsigned int *col_indices,
+ const TrilinosScalar *values)
+ {
+ int ierr;
+ if (last_action == Insert)
+ {
+ ierr = matrix->GlobalAssemble(col_map, row_map, false);
+
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
+
+ if (last_action != Add)
+ last_action = Add;
+
+ // Now go through all rows that are
+ // present in the input data.
+ for (unsigned int i=0; i<n_rows; ++i)
+ {
+ // If the calling matrix owns the row to
+ // which we want to add values, we
+ // can directly call the Epetra_CrsMatrix
+ // input function, which is much faster
+ // than the Epetra_FECrsMatrix function.
+ if (row_map.MyGID(i) == true)
+ {
+ ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(
+ (int)row_indices[i], (int)n_cols,
+ const_cast<TrilinosScalar*>(&values[i*n_cols]),
+ (int*)&col_indices[0]);
+ }
+ else
+ {
+ // When we're at off-processor data, we
+ // have to stick with the standard
+ // SumIntoGlobalValues
+ // function. Nevertheless, the way we
+ // call it is the fastest one (any other
+ // will lead to repeated allocation and
+ // deallocation of memory in order to
+ // call the function we already use,
+ // which is very unefficient if writing
+ // one element at a time).
+ compressed = false;
+
+ const TrilinosScalar* value_ptr = &values[i*n_cols];
+
+ ierr = matrix->SumIntoGlobalValues (1, (int*)&i,
+ (int)n_cols, (int*)&col_indices[0],
+ &value_ptr,
+ Epetra_FECrsMatrix::ROW_MAJOR);
+ }
+
+ Assert (ierr <= 0, ExcAccessToNonPresentElement(row_indices[i],
+ col_indices[0]));
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
+ }
+
+
#endif // DOXYGEN
}
}
+
+ inline
+ void
+ VectorBase::set (const std::vector<unsigned int> &indices,
+ const std::vector<TrilinosScalar> &values)
+ {
+ Assert (indices.size() == values.size(),
+ ExcDimensionMismatch(indices.size(),values.size()));
+
+ set (indices.size(), &indices[0], &values[0]);
+ }
+
+
+
+ inline
+ void
+ VectorBase::set (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<TrilinosScalar> &values)
+ {
+ Assert (indices.size() == values.size(),
+ ExcDimensionMismatch(indices.size(),values.size()));
+
+ set (indices.size(), &indices[0], values.begin());
+ }
+
+
+
+ inline
+ void
+ VectorBase::set (const unsigned int n_elements,
+ const unsigned int *indices,
+ const TrilinosScalar *values)
+ {
+ if (last_action == Add)
+ vector->GlobalAssemble(Add);
+
+ if (last_action != Insert)
+ last_action = Insert;
+
+ int ierr;
+ for (unsigned int i=0; i<n_elements; ++i)
+ {
+ const unsigned int row = indices[i];
+ const int local_row = vector->Map().LID(indices[i]);
+ if (local_row == -1)
+ {
+ ierr = vector->ReplaceGlobalValues (1,
+ (const int*)(&row),
+ &values[i]);
+ compressed = false;
+ }
+ else
+ ierr = vector->ReplaceMyValue (local_row, 0, values[i]);
+
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
+ }
+
+
+
+ inline
+ void
+ VectorBase::add (const std::vector<unsigned int> &indices,
+ const std::vector<TrilinosScalar> &values)
+ {
+ Assert (indices.size() == values.size(),
+ ExcDimensionMismatch(indices.size(),values.size()));
+
+ add (indices.size(), &indices[0], &values[0]);
+ }
+
+
+
+ inline
+ void
+ VectorBase::add (const std::vector<unsigned int> &indices,
+ const ::dealii::Vector<TrilinosScalar> &values)
+ {
+ Assert (indices.size() == values.size(),
+ ExcDimensionMismatch(indices.size(),values.size()));
+
+ add (indices.size(), &indices[0], values.begin());
+ }
+
+
+
+ inline
+ void
+ VectorBase::add (const unsigned int n_elements,
+ const unsigned int *indices,
+ const TrilinosScalar *values)
+ {
+ if (last_action == Insert)
+ vector->GlobalAssemble(Insert);
+
+ if (last_action != Add)
+ last_action = Add;
+
+ int ierr;
+ for (unsigned int i=0; i<n_elements; ++i)
+ {
+ const unsigned int row = indices[i];
+ const int local_row = vector->Map().LID(indices[i]);
+ if (local_row == -1)
+ {
+ ierr = vector->SumIntoGlobalValues (1,
+ (const int*)(&row),
+ &values[i]);
+ compressed = false;
+ }
+ else
+ ierr = vector->SumIntoMyValue (local_row, 0, values[i]);
+
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
+ }
+
+
+
+
#endif // DOXYGEN
}
- void
- BlockSparseMatrix::set (const unsigned int i,
- const unsigned int j,
- const TrilinosScalar value)
- {
- BaseClass::set (i, j, value);
- }
-
-
-
- void
- BlockSparseMatrix::set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<TrilinosScalar> &values)
- {
- Assert (row_indices.size() == values.m(),
- ExcDimensionMismatch(row_indices.size(), values.m()));
- Assert (col_indices.size() == values.n(),
- ExcDimensionMismatch(col_indices.size(), values.n()));
-
- set (row_indices.size(), &row_indices[0],
- col_indices.size(), &col_indices[0], &values(0,0));
- }
-
-
-
- void
- BlockSparseMatrix::set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<TrilinosScalar> &values)
- {
- Assert (col_indices.size() == values.size(),
- ExcDimensionMismatch(col_indices.size(), values.size()));
-
- set (1, &row, col_indices.size(), &col_indices[0], &values[0]);
- }
-
-
-
- void
- BlockSparseMatrix::set (const unsigned int n_rows,
- const unsigned int *row_indices,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const TrilinosScalar *values)
- {
- // Resize scratch arrays
- block_col_indices.resize (this->n_block_cols());
- local_row_length.resize (this->n_block_cols());
- local_col_indices.resize (n_cols);
-
- // Clear the content in local_row_length
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
- local_row_length[i] = 0;
-
- // Go through the column indices to find
- // out which portions of the values
- // should be written into which block
- // matrix. This can be done before
- // starting the loop over all the rows,
- // since we assume a rectangular set of
- // matrix data.
- {
- unsigned int current_block = 0, row_length = 0;
- block_col_indices[0] = 0;
- for (unsigned int j=0; j<n_cols; ++j, ++row_length)
- {
- const std::pair<unsigned int, unsigned int>
- col_index = this->column_block_indices.global_to_local(col_indices[j]);
- local_col_indices[j] = col_index.second;
- if (col_index.first > current_block)
- {
- local_row_length[current_block] = row_length;
- row_length = 0;
- while (col_index.first > current_block)
- current_block++;
- block_col_indices[current_block] = j;
- }
-
- Assert (col_index.first == current_block,
- ExcInternalError());
- }
- local_row_length[current_block] = row_length;
- Assert (current_block < this->n_block_cols(),
- ExcInternalError());
-
-#ifdef DEBUG
- // If in debug mode, do a check whether
- // the right length has been obtained.
- unsigned int length = 0;
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
- length += local_row_length[i];
- Assert (length == n_cols,
- ExcDimensionMismatch(length, n_cols));
-#endif
- }
-
- // Now we found out about where the
- // individual columns should start and
- // where we should start reading out
- // data. Now let's write the data into
- // the individual blocks!
- for (unsigned int i=0; i<n_rows; ++i)
- {
- const std::pair<unsigned int,unsigned int>
- row_index = this->row_block_indices.global_to_local (row_indices[i]);
- for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
- {
- if (local_row_length[block_col] == 0)
- continue;
-
- block(row_index.first, block_col).set
- (1, &row_index.second,
- local_row_length[block_col],
- &local_col_indices[block_col_indices[block_col]],
- &values[n_cols*i + block_col_indices[block_col]]);
- }
- }
- }
-
-
-
- void
- BlockSparseMatrix::add (const unsigned int i,
- const unsigned int j,
- const TrilinosScalar value)
- {
- // For adding one single element, it is
- // faster to rely on the BaseClass add
- // function, than doing all the strange
- // operations below.
- BaseClass::add (i, j, value);
- }
-
-
-
- void
- BlockSparseMatrix::add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<TrilinosScalar> &values)
- {
- Assert (row_indices.size() == values.m(),
- ExcDimensionMismatch(row_indices.size(), values.m()));
- Assert (col_indices.size() == values.n(),
- ExcDimensionMismatch(col_indices.size(), values.n()));
-
- add (row_indices.size(), &row_indices[0],
- col_indices.size(), &col_indices[0], &values(0,0));
- }
-
-
-
- void
- BlockSparseMatrix::add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<TrilinosScalar> &values)
- {
- Assert (col_indices.size() == values.size(),
- ExcDimensionMismatch(col_indices.size(), values.size()));
-
- add (1, &row, col_indices.size(), &col_indices[0], &values[0]);
- }
-
-
-
- void
- BlockSparseMatrix::add (const unsigned int n_rows,
- const unsigned int *row_indices,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const TrilinosScalar *values)
- {
- // TODO: Look over this to find out
- // whether we can do that more
- // efficiently.
-
- // Resize scratch arrays
- block_col_indices.resize (this->n_block_cols());
- local_row_length.resize (this->n_block_cols());
- local_col_indices.resize (n_cols);
-
- // Clear the content in local_row_length
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
- local_row_length[i] = 0;
-
- // Go through the column indices to find
- // out which portions of the values
- // should be written into which block
- // matrix. This can be done before
- // starting the loop over all the rows,
- // since we assume a rectangular set of
- // matrix data.
- {
- unsigned int current_block = 0, row_length = 0;
- block_col_indices[0] = 0;
- for (unsigned int j=0; j<n_cols; ++j, ++row_length)
- {
- const std::pair<unsigned int, unsigned int>
- col_index = this->column_block_indices.global_to_local(col_indices[j]);
- local_col_indices[j] = col_index.second;
- if (col_index.first > current_block)
- {
- local_row_length[current_block] = row_length;
- row_length = 0;
- while (col_index.first > current_block)
- current_block++;
- block_col_indices[current_block] = j;
- }
-
- Assert (col_index.first == current_block,
- ExcInternalError());
- }
- local_row_length[current_block] = row_length;
- Assert (current_block < this->n_block_cols(),
- ExcInternalError());
-
-#ifdef DEBUG
- // If in debug mode, do a check whether
- // the right length has been obtained.
- unsigned int length = 0;
- for (unsigned int i=0; i<this->n_block_cols(); ++i)
- length += local_row_length[i];
- Assert (length == n_cols,
- ExcDimensionMismatch(length, n_cols));
-#endif
- }
-
- // Now we found out about where the
- // individual columns should start and
- // where we should start reading out
- // data. Now let's write the data into
- // the individual blocks!
- for (unsigned int i=0; i<n_rows; ++i)
- {
- const std::pair<unsigned int,unsigned int>
- row_index = this->row_block_indices.global_to_local (row_indices[i]);
- for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
- {
- if (local_row_length[block_col] == 0)
- continue;
-
- block(row_index.first, block_col).add
- (1, &row_index.second,
- local_row_length[block_col],
- &local_col_indices[block_col_indices[block_col]],
- &values[n_cols*i + block_col_indices[block_col]]);
- }
- }
- }
-
-
-
TrilinosScalar
BlockSparseMatrix::residual (MPI::BlockVector &dst,
const MPI::BlockVector &x,
- void
- SparseMatrix::set (const unsigned int i,
- const unsigned int j,
- const TrilinosScalar value)
- {
-
- Assert (numbers::is_finite(value),
- ExcMessage("The given value is not finite but either "
- "infinite or Not A Number (NaN)"));
-
- set (1, &i, 1, &j, &value);
- }
-
-
-
- void
- SparseMatrix::set (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<TrilinosScalar> &values)
- {
- Assert (row_indices.size() == values.m(),
- ExcDimensionMismatch(row_indices.size(), values.m()));
- Assert (col_indices.size() == values.n(),
- ExcDimensionMismatch(col_indices.size(), values.n()));
-
- set (row_indices.size(), &row_indices[0],
- col_indices.size(), &col_indices[0], &values(0,0));
- }
-
-
-
- void
- SparseMatrix::set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<TrilinosScalar> &values)
- {
- Assert (col_indices.size() == values.size(),
- ExcDimensionMismatch(col_indices.size(), values.size()));
-
- set (1, &row, col_indices.size(), &col_indices[0], &values[0]);
- }
-
-
-
- void
- SparseMatrix::set (const unsigned int n_rows,
- const unsigned int *row_indices,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const TrilinosScalar *values)
- {
- int ierr;
- if (last_action == Add)
- {
- ierr = matrix->GlobalAssemble(col_map, row_map, false);
-
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- }
-
- if (last_action != Insert)
- last_action = Insert;
-
- // Now go through all rows that are
- // present in the input data.
- for (unsigned int i=0; i<n_rows; ++i)
- {
- // If the calling matrix owns the row to
- // which we want to add values, we
- // can directly call the Epetra_CrsMatrix
- // input function, which is much faster
- // than the Epetra_FECrsMatrix function.
- if (row_map.MyGID(i) == true)
- {
- if (matrix->Filled() == false)
- {
- ierr = matrix->Epetra_CrsMatrix::InsertGlobalValues(
- (int)row_indices[i], (int)n_cols,
- const_cast<TrilinosScalar*>(&values[i*n_cols]),
- (int*)&col_indices[0]);
-
- // When adding up elements, we do
- // not want to create exceptions in
- // the case when adding elements.
- if (ierr > 0)
- ierr = 0;
- }
- else
- ierr = matrix->Epetra_CrsMatrix::ReplaceGlobalValues(
- (int)row_indices[i], (int)n_cols,
- const_cast<TrilinosScalar*>(&values[i*n_cols]),
- (int*)&col_indices[0]);
- }
- else
- {
- // When we're at off-processor data, we
- // have to stick with the standard
- // SumIntoGlobalValues
- // function. Nevertheless, the way we
- // call it is the fastest one (any other
- // will lead to repeated allocation and
- // deallocation of memory in order to
- // call the function we already use,
- // which is very unefficient if writing
- // one element at a time).
- compressed = false;
-
- const TrilinosScalar* value_ptr = &values[i*n_cols];
-
- if (matrix->Filled() == false)
- {
- ierr = matrix->InsertGlobalValues (1, (int*)&i,
- (int)n_cols, (int*)&col_indices[0],
- &value_ptr,
- Epetra_FECrsMatrix::ROW_MAJOR);
- if (ierr > 0)
- ierr = 0;
- }
- else
- ierr = matrix->ReplaceGlobalValues (1, (int*)&i,
- (int)n_cols, (int*)&col_indices[0],
- &value_ptr,
- Epetra_FECrsMatrix::ROW_MAJOR);
- }
-
- Assert (ierr <= 0, ExcAccessToNonPresentElement(row_indices[i],
- col_indices[0]));
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- }
- }
-
-
-
- void
- SparseMatrix::add (const unsigned int i,
- const unsigned int j,
- const TrilinosScalar value)
- {
-
- Assert (numbers::is_finite(value),
- ExcMessage("The given value is not finite but either "
- "infinite or Not A Number (NaN)"));
-
- if (value == 0)
- {
- // we have to do above actions in any case
- // to be consistent with the MPI
- // communication model (see the comments
- // in the documentation of
- // TrilinosWrappers::Vector), but we can
- // save some work if the addend is
- // zero. However, these actions are done
- // in case we pass on to the other
- // function.
- if (last_action == Insert)
- {
- int ierr;
- ierr = matrix->GlobalAssemble(col_map, row_map, false);
-
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- }
-
- if (last_action != Add)
- last_action = Add;
-
- return;
- }
- else
- add (1, &i, 1, &j, &value);
- }
-
-
-
- void
- SparseMatrix::add (const std::vector<unsigned int> &row_indices,
- const std::vector<unsigned int> &col_indices,
- const FullMatrix<TrilinosScalar> &values)
- {
- Assert (row_indices.size() == values.m(),
- ExcDimensionMismatch(row_indices.size(), values.m()));
- Assert (col_indices.size() == values.n(),
- ExcDimensionMismatch(col_indices.size(), values.n()));
-
- add (row_indices.size(), &row_indices[0],
- col_indices.size(), &col_indices[0], &values(0,0));
- }
-
-
-
- void
- SparseMatrix::add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
- const std::vector<TrilinosScalar> &values)
- {
- Assert (col_indices.size() == values.size(),
- ExcDimensionMismatch(col_indices.size(), values.size()));
-
- add (1, &row, col_indices.size(), &col_indices[0], &values[0]);
- }
-
-
-
- void
- SparseMatrix::add (const unsigned int n_rows,
- const unsigned int *row_indices,
- const unsigned int n_cols,
- const unsigned int *col_indices,
- const TrilinosScalar *values)
- {
- int ierr;
- if (last_action == Insert)
- {
- ierr = matrix->GlobalAssemble(col_map, row_map, false);
-
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- }
-
- if (last_action != Add)
- last_action = Add;
-
- // Now go through all rows that are
- // present in the input data.
- for (unsigned int i=0; i<n_rows; ++i)
- {
- // If the calling matrix owns the row to
- // which we want to add values, we
- // can directly call the Epetra_CrsMatrix
- // input function, which is much faster
- // than the Epetra_FECrsMatrix function.
- if (row_map.MyGID(i) == true)
- {
- ierr = matrix->Epetra_CrsMatrix::SumIntoGlobalValues(
- (int)row_indices[i], (int)n_cols,
- const_cast<TrilinosScalar*>(&values[i*n_cols]),
- (int*)&col_indices[0]);
- }
- else
- {
- // When we're at off-processor data, we
- // have to stick with the standard
- // SumIntoGlobalValues
- // function. Nevertheless, the way we
- // call it is the fastest one (any other
- // will lead to repeated allocation and
- // deallocation of memory in order to
- // call the function we already use,
- // which is very unefficient if writing
- // one element at a time).
- compressed = false;
-
- const TrilinosScalar* value_ptr = &values[i*n_cols];
-
- ierr = matrix->SumIntoGlobalValues (1, (int*)&i,
- (int)n_cols, (int*)&col_indices[0],
- &value_ptr,
- Epetra_FECrsMatrix::ROW_MAJOR);
- }
-
- Assert (ierr <= 0, ExcAccessToNonPresentElement(row_indices[i],
- col_indices[0]));
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- }
- }
-
-
-
void
SparseMatrix::clear_row (const unsigned int row,
const TrilinosScalar new_diag_value)
}
- void
- VectorBase::set (const std::vector<unsigned int> &indices,
- const std::vector<TrilinosScalar> &values)
- {
- Assert (indices.size() == values.size(),
- ExcDimensionMismatch(indices.size(),values.size()));
-
- set (indices.size(), &indices[0], &values[0]);
- }
-
-
-
- void
- VectorBase::set (const std::vector<unsigned int> &indices,
- const ::dealii::Vector<TrilinosScalar> &values)
- {
- Assert (indices.size() == values.size(),
- ExcDimensionMismatch(indices.size(),values.size()));
-
- set (indices.size(), &indices[0], values.begin());
- }
-
-
-
- void
- VectorBase::set (const unsigned int n_elements,
- const unsigned int *indices,
- const TrilinosScalar *values)
- {
- if (last_action == Add)
- vector->GlobalAssemble(Add);
-
- if (last_action != Insert)
- last_action = Insert;
-
- int ierr;
- for (unsigned int i=0; i<n_elements; ++i)
- {
- const unsigned int row = indices[i];
- const int local_row = vector->Map().LID(indices[i]);
- if (local_row == -1)
- {
- ierr = vector->ReplaceGlobalValues (1,
- (const int*)(&row),
- &values[i]);
- compressed = false;
- }
- else
- ierr = vector->ReplaceMyValue (local_row, 0, values[i]);
-
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- }
- }
-
-
-
- void
- VectorBase::add (const std::vector<unsigned int> &indices,
- const std::vector<TrilinosScalar> &values)
- {
- Assert (indices.size() == values.size(),
- ExcDimensionMismatch(indices.size(),values.size()));
-
- add (indices.size(), &indices[0], &values[0]);
- }
-
-
-
- void
- VectorBase::add (const std::vector<unsigned int> &indices,
- const ::dealii::Vector<TrilinosScalar> &values)
- {
- Assert (indices.size() == values.size(),
- ExcDimensionMismatch(indices.size(),values.size()));
-
- add (indices.size(), &indices[0], values.begin());
- }
-
-
-
- void
- VectorBase::add (const unsigned int n_elements,
- const unsigned int *indices,
- const TrilinosScalar *values)
- {
- if (last_action == Insert)
- vector->GlobalAssemble(Insert);
-
- if (last_action != Add)
- last_action = Add;
-
- int ierr;
- for (unsigned int i=0; i<n_elements; ++i)
- {
- const unsigned int row = indices[i];
- const int local_row = vector->Map().LID(indices[i]);
- if (local_row == -1)
- {
- ierr = vector->SumIntoGlobalValues (1,
- (const int*)(&row),
- &values[i]);
- compressed = false;
- }
- else
- ierr = vector->SumIntoMyValue (local_row, 0, values[i]);
-
- AssertThrow (ierr == 0, ExcTrilinosError(ierr));
- }
- }
-
-
TrilinosScalar
VectorBase::operator * (const VectorBase &vec) const