* performance when creating the sparsity pattern.
*/
SparsityPattern (const Epetra_Map ¶llel_partitioning,
- const size_type n_entries_per_row = 0);
+ const size_type n_entries_per_row = 0) DEAL_II_DEPRECATED;
/**
* Same as before, but now use the exact number of nonzeros in each m row.
* designed to describe.
*/
SparsityPattern (const Epetra_Map ¶llel_partitioning,
- const std::vector<size_type> &n_entries_per_row);
+ const std::vector<size_type> &n_entries_per_row) DEAL_II_DEPRECATED;
/**
* This constructor is similar to the one above, but it now takes two
*/
SparsityPattern (const Epetra_Map &row_parallel_partitioning,
const Epetra_Map &col_parallel_partitioning,
- const size_type n_entries_per_row = 0);
+ const size_type n_entries_per_row = 0) DEAL_II_DEPRECATED;
/**
* This constructor is similar to the one above, but it now takes two
*/
SparsityPattern (const Epetra_Map &row_parallel_partitioning,
const Epetra_Map &col_parallel_partitioning,
- const std::vector<size_type> &n_entries_per_row);
+ const std::vector<size_type> &n_entries_per_row) DEAL_II_DEPRECATED;
/**
* Reinitialization function for generating a square sparsity pattern
*/
void
reinit (const Epetra_Map ¶llel_partitioning,
- const size_type n_entries_per_row = 0);
+ const size_type n_entries_per_row = 0) DEAL_II_DEPRECATED;
/**
* Same as before, but now use the exact number of nonzeros in each m row.
*/
void
reinit (const Epetra_Map ¶llel_partitioning,
- const std::vector<size_type> &n_entries_per_row);
+ const std::vector<size_type> &n_entries_per_row) DEAL_II_DEPRECATED;
/**
* This reinit function is similar to the one above, but it now takes two
void
reinit (const Epetra_Map &row_parallel_partitioning,
const Epetra_Map &col_parallel_partitioning,
- const size_type n_entries_per_row = 0);
+ const size_type n_entries_per_row = 0) DEAL_II_DEPRECATED;
/**
* This reinit function is similar to the one above, but it now takes two
void
reinit (const Epetra_Map &row_parallel_partitioning,
const Epetra_Map &col_parallel_partitioning,
- const std::vector<size_type> &n_entries_per_row);
+ const std::vector<size_type> &n_entries_per_row) DEAL_II_DEPRECATED;
/**
* Reinit function. Takes one of the deal.II sparsity patterns and a
reinit (const Epetra_Map &row_parallel_partitioning,
const Epetra_Map &col_parallel_partitioning,
const SparsityType &nontrilinos_sparsity_pattern,
- const bool exchange_data = false);
+ const bool exchange_data = false) DEAL_II_DEPRECATED;
/**
* Reinit function. Takes one of the deal.II sparsity patterns and a
void
reinit (const Epetra_Map ¶llel_partitioning,
const SparsityType &nontrilinos_sparsity_pattern,
- const bool exchange_data = false);
+ const bool exchange_data = false) DEAL_II_DEPRECATED;
//@}
/**
* @name Constructors and initialization using an IndexSet description
* pattern, i.e., the partitioning of the vectors matrices based on this
* sparsity pattern are multiplied with.
*/
- const Epetra_Map &domain_partitioner () const;
+ const Epetra_Map &domain_partitioner () const DEAL_II_DEPRECATED;
/**
* Return a const reference to the underlying Trilinos Epetra_Map that
* i.e., the partitioning of the vectors that are result from matrix-
* vector products.
*/
- const Epetra_Map &range_partitioner () const;
+ const Epetra_Map &range_partitioner () const DEAL_II_DEPRECATED;
/**
* Return a const reference to the underlying Trilinos Epetra_Map that
* sets the partitioning of the sparsity pattern rows. Equal to the
* partitioning of the range.
*/
- const Epetra_Map &row_partitioner () const;
+ const Epetra_Map &row_partitioner () const DEAL_II_DEPRECATED;
/**
* Return a const reference to the underlying Trilinos Epetra_Map that
* general not equal to the partitioner Epetra_Map for the domain because
* of overlap in the matrix.
*/
- const Epetra_Map &col_partitioner () const;
+ const Epetra_Map &col_partitioner () const DEAL_II_DEPRECATED;
/**
* Return a const reference to the communicator used for this object.
*/
- const Epetra_Comm &trilinos_communicator () const;
+ const Epetra_Comm &trilinos_communicator () const DEAL_II_DEPRECATED;
+
+ /**
+ * Return the MPI communicator object in use with this matrix.
+ */
+ MPI_Comm get_mpi_communicator () const;
+//@}
+
+ /**
+ * @name Partitioners
+ */
+//@{
+
+ /**
+ * Return the partitioning of the domain space of this matrix, i.e., the
+ * partitioning of the vectors this matrix has to be multiplied with.
+ */
+ IndexSet locally_owned_domain_indices() const;
+
+ /**
+ * Return the partitioning of the range space of this matrix, i.e., the
+ * partitioning of the vectors that are result from matrix-vector
+ * products.
+ */
+ IndexSet locally_owned_range_indices() const;
+
//@}
+
/**
* @name Iterators
*/
inline
- const Epetra_Map &
- SparsityPattern::domain_partitioner () const
- {
- return static_cast<const Epetra_Map &>(graph->DomainMap());
- }
-
-
-
- inline
- const Epetra_Map &
- SparsityPattern::range_partitioner () const
- {
- return static_cast<const Epetra_Map &>(graph->RangeMap());
- }
-
-
-
- inline
- const Epetra_Map &
- SparsityPattern::row_partitioner () const
- {
- return static_cast<const Epetra_Map &>(graph->RowMap());
- }
-
-
-
- inline
- const Epetra_Map &
- SparsityPattern::col_partitioner () const
+ IndexSet
+ SparsityPattern::locally_owned_domain_indices () const
{
- return static_cast<const Epetra_Map &>(graph->ColMap());
+ return IndexSet(static_cast<const Epetra_Map &>(graph->DomainMap()));
}
inline
- const Epetra_Comm &
- SparsityPattern::trilinos_communicator () const
+ IndexSet
+ SparsityPattern::locally_owned_range_indices () const
{
- return graph->RangeMap().Comm();
+ return IndexSet(static_cast<const Epetra_Map &>(graph->RangeMap()));
}
#endif // DOXYGEN
const MPI_Comm &communicator,
const size_type n_entries_per_row)
{
- Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
- false);
- reinit (map, map, n_entries_per_row);
+ reinit (parallel_partitioning, parallel_partitioning, communicator,
+ n_entries_per_row);
}
const MPI_Comm &communicator,
const std::vector<size_type> &n_entries_per_row)
{
- Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
- false);
- reinit (map, map, n_entries_per_row);
+ reinit (parallel_partitioning, parallel_partitioning, communicator,
+ n_entries_per_row);
}
const MPI_Comm &communicator,
const size_type n_entries_per_row)
{
- Epetra_Map row_map =
- row_parallel_partitioning.make_trilinos_map (communicator, false);
- Epetra_Map col_map =
- col_parallel_partitioning.make_trilinos_map (communicator, false);
- reinit (row_map, col_map, n_entries_per_row);
+ reinit (row_parallel_partitioning, col_parallel_partitioning,
+ communicator, n_entries_per_row);
}
const MPI_Comm &communicator,
const std::vector<size_type> &n_entries_per_row)
{
- Epetra_Map row_map =
- row_parallel_partitioning.make_trilinos_map (communicator, false);
- Epetra_Map col_map =
- col_parallel_partitioning.make_trilinos_map (communicator, false);
- reinit (row_map, col_map, n_entries_per_row);
+ reinit (row_parallel_partitioning, col_parallel_partitioning,
+ communicator, n_entries_per_row);
}
void
- SparsityPattern::reinit (const Epetra_Map &input_map,
- const size_type n_entries_per_row)
+ SparsityPattern::reinit (const size_type m,
+ const size_type n,
+ const size_type n_entries_per_row)
{
- reinit (input_map, input_map, n_entries_per_row);
+ reinit (complete_index_set(m), complete_index_set(n), MPI_COMM_SELF,
+ n_entries_per_row);
}
+
void
SparsityPattern::reinit (const size_type m,
const size_type n,
- const size_type n_entries_per_row)
+ const std::vector<size_type> &n_entries_per_row)
{
- const Epetra_Map rows (TrilinosWrappers::types::int_type(m), 0,
- Utilities::Trilinos::comm_self());
- const Epetra_Map columns (TrilinosWrappers::types::int_type(n), 0,
- Utilities::Trilinos::comm_self());
+ reinit (complete_index_set(m), complete_index_set(n), MPI_COMM_SELF,
+ n_entries_per_row);
+ }
+
+
+
+ namespace
+ {
+ typedef SparsityPattern::size_type size_type;
+
+ void
+ reinit_sp (const Epetra_Map &row_map,
+ const Epetra_Map &col_map,
+ const size_type n_entries_per_row,
+ std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
+ std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
+ std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
+ {
+ Assert(row_map.IsOneToOne(),
+ ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
+ "the maps of different processors."));
+ Assert(col_map.IsOneToOne(),
+ ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
+ "the maps of different processors."));
+ nonlocal_graph.reset();
+ graph.reset ();
+ column_space_map.reset (new Epetra_Map (col_map));
+
+ // for more than one processor, need to specify only row map first and
+ // let the matrix entries decide about the column map (which says which
+ // columns are present in the matrix, not to be confused with the
+ // col_map that tells how the domain dofs of the matrix will be
+ // distributed). for only one processor, we can directly assign the
+ // columns as well. If we use a recent Trilinos version, we can also
+ // require building a non-local graph which gives us thread-safe
+ // initialization.
+ if (row_map.Comm().NumProc() > 1)
+ graph.reset (new Epetra_FECrsGraph(Copy, row_map,
+ n_entries_per_row, false
+ // TODO: Check which new Trilinos
+ // version supports this... Remember
+ // to change tests/trilinos/assemble_matrix_parallel_07
+ // too.
+ //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
+ //, true
+ //#endif
+ ));
+ else
+ graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
+ n_entries_per_row, false));
+ }
+
+
+
+ void
+ reinit_sp (const Epetra_Map &row_map,
+ const Epetra_Map &col_map,
+ const std::vector<size_type> &n_entries_per_row,
+ std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
+ std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
+ std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
+ {
+ Assert(row_map.IsOneToOne(),
+ ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
+ "the maps of different processors."));
+ Assert(col_map.IsOneToOne(),
+ ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
+ "the maps of different processors."));
+
+ // release memory before reallocation
+ nonlocal_graph.reset();
+ graph.reset ();
+ AssertDimension (n_entries_per_row.size(),
+ static_cast<size_type>(n_global_elements(row_map)));
+
+ column_space_map.reset (new Epetra_Map (col_map));
+ std::vector<int> local_entries_per_row(max_my_gid(row_map)-
+ min_my_gid(row_map));
+ for (unsigned int i=0; i<local_entries_per_row.size(); ++i)
+ local_entries_per_row[i] = n_entries_per_row[min_my_gid(row_map)+i];
+
+ if (row_map.Comm().NumProc() > 1)
+ graph.reset(new Epetra_FECrsGraph(Copy, row_map,
+ &local_entries_per_row[0],
+ false
+ // TODO: Check which new Trilinos
+ // version supports this... Remember
+ // to change tests/trilinos/assemble_matrix_parallel_07
+ // too.
+ //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
+ //, true
+ //#endif
+ ));
+ else
+ graph.reset(new Epetra_FECrsGraph(Copy, row_map, col_map,
+ &local_entries_per_row[0],
+ false));
+ }
+
+
+
+ template <typename SparsityType>
+ void
+ reinit_sp (const Epetra_Map &row_map,
+ const Epetra_Map &col_map,
+ const SparsityType &sp,
+ const bool exchange_data,
+ std_cxx11::shared_ptr<Epetra_Map> &column_space_map,
+ std_cxx11::shared_ptr<Epetra_FECrsGraph> &graph,
+ std_cxx11::shared_ptr<Epetra_CrsGraph> &nonlocal_graph)
+ {
+ graph.reset ();
+
+ AssertDimension (sp.n_rows(),
+ static_cast<size_type>(n_global_elements(row_map)));
+ AssertDimension (sp.n_cols(),
+ static_cast<size_type>(n_global_elements(col_map)));
+
+ column_space_map.reset (new Epetra_Map (col_map));
+
+ Assert (row_map.LinearMap() == true,
+ ExcMessage ("This function only works if the row map is contiguous."));
+
+ const size_type first_row = min_my_gid(row_map),
+ last_row = max_my_gid(row_map)+1;
+ std::vector<int> n_entries_per_row(last_row - first_row);
- reinit (rows, columns, n_entries_per_row);
+ // Trilinos wants the row length as an int this is hopefully never going
+ // to be a problem.
+ for (size_type row=first_row; row<last_row; ++row)
+ n_entries_per_row[row-first_row] = static_cast<int>(sp.row_length(row));
+
+ if (row_map.Comm().NumProc() > 1)
+ graph.reset(new Epetra_FECrsGraph(Copy, row_map,
+ &n_entries_per_row[0],
+ false));
+ else
+ graph.reset (new Epetra_FECrsGraph(Copy, row_map, col_map,
+ &n_entries_per_row[0],
+ false));
+
+ AssertDimension (sp.n_rows(),
+ static_cast<size_type>(n_global_rows(*graph)));
+
+ std::vector<TrilinosWrappers::types::int_type> row_indices;
+
+ // Include possibility to exchange data since DynamicSparsityPattern is
+ // able to do so
+ if (exchange_data==false)
+ for (size_type row=first_row; row<last_row; ++row)
+ {
+ const TrilinosWrappers::types::int_type row_length = sp.row_length(row);
+ if (row_length == 0)
+ continue;
+
+ row_indices.resize (row_length, -1);
+ {
+ typename SparsityType::iterator p = sp.begin(row);
+ for (size_type col=0; p != sp.end(row); ++p, ++col)
+ row_indices[col] = p->column();
+ }
+ graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
+ &row_indices[0]);
+ }
+ else
+ for (size_type row=0; row<sp.n_rows(); ++row)
+ {
+ const TrilinosWrappers::types::int_type row_length = sp.row_length(row);
+ if (row_length == 0)
+ continue;
+
+ row_indices.resize (row_length, -1);
+ {
+ typename SparsityType::iterator p = sp.begin(row);
+ for (size_type col=0; p != sp.end(row); ++p, ++col)
+ row_indices[col] = p->column();
+ }
+ graph->InsertGlobalIndices (1,
+ reinterpret_cast<TrilinosWrappers::types::int_type *>(&row),
+ row_length, &row_indices[0]);
+ }
+
+ int ierr =
+ graph->GlobalAssemble (*column_space_map,
+ static_cast<const Epetra_Map &>(graph->RangeMap()),
+ true);
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+
+ ierr = graph->OptimizeStorage ();
+ AssertThrow (ierr == 0, ExcTrilinosError(ierr));
+ }
}
void
- SparsityPattern::reinit (const Epetra_Map &input_row_map,
- const Epetra_Map &input_col_map,
+ SparsityPattern::reinit (const Epetra_Map &input_map,
const size_type n_entries_per_row)
{
- Assert(input_row_map.IsOneToOne(),
- ExcMessage("Row map must be 1-to-1, i.e., no overlap between "
- "the maps of different processors."));
- Assert(input_col_map.IsOneToOne(),
- ExcMessage("Column map must be 1-to-1, i.e., no overlap between "
- "the maps of different processors."));
- graph.reset ();
- column_space_map.reset (new Epetra_Map (input_col_map));
-
- // for more than one processor, need to specify only row map first and let
- // the matrix entries decide about the column map (which says which
- // columns are present in the matrix, not to be confused with the col_map
- // that tells how the domain dofs of the matrix will be distributed). for
- // only one processor, we can directly assign the columns as well. If we
- // use a recent Trilinos version, we can also require building a non-local
- // graph which gives us thread-safe initialization.
- if (input_row_map.Comm().NumProc() > 1)
- graph.reset (new Epetra_FECrsGraph(Copy, input_row_map,
- n_entries_per_row, false
- // TODO: Check which new Trilinos
- // version supports this... Remember
- // to change tests/trilinos/assemble_matrix_parallel_07
- // too.
- //#if DEAL_II_TRILINOS_VERSION_GTE(11,14,0)
- //, true
- //#endif
- ));
- else
- graph.reset (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map,
- n_entries_per_row, false));
+ reinit_sp (input_map, input_map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
}
void
- SparsityPattern::reinit (const Epetra_Map &input_map,
- const std::vector<size_type> &n_entries_per_row)
+ SparsityPattern::reinit (const Epetra_Map &input_row_map,
+ const Epetra_Map &input_col_map,
+ const size_type n_entries_per_row)
{
- reinit (input_map, input_map, n_entries_per_row);
+ reinit_sp (input_row_map, input_col_map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
}
void
- SparsityPattern::reinit (const size_type m,
- const size_type n,
+ SparsityPattern::reinit (const Epetra_Map &input_map,
const std::vector<size_type> &n_entries_per_row)
{
- const Epetra_Map rows (TrilinosWrappers::types::int_type(m), 0,
- Utilities::Trilinos::comm_self());
- const Epetra_Map columns (TrilinosWrappers::types::int_type(n), 0,
- Utilities::Trilinos::comm_self());
-
- reinit (rows, columns, n_entries_per_row);
+ reinit_sp (input_map, input_map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
}
const Epetra_Map &input_col_map,
const std::vector<size_type> &n_entries_per_row)
{
- // release memory before reallocation
- graph.reset ();
- AssertDimension (n_entries_per_row.size(),
- static_cast<size_type>(n_global_elements(input_row_map)));
-
- column_space_map.reset (new Epetra_Map (input_col_map));
-
- if (input_row_map.Comm().NumProc() > 1)
- graph.reset(new Epetra_FECrsGraph(Copy, input_row_map,
- n_entries_per_row[min_my_gid(input_row_map)],
- false
-#if DEAL_II_TRILINOS_VERSION_GTE(11,9,0)
- , true
-#endif
- ));
- else
- graph.reset(new Epetra_FECrsGraph(Copy, input_row_map, input_col_map,
- n_entries_per_row[max_my_gid(input_row_map)],
- false));
+ reinit_sp (input_row_map, input_col_map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
}
{
Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
false);
- reinit (map, map, n_entries_per_row);
+ reinit_sp (map, map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
}
{
Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
false);
- reinit (map, map, n_entries_per_row);
+ reinit_sp (map, map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
}
row_parallel_partitioning.make_trilinos_map (communicator, false);
Epetra_Map col_map =
col_parallel_partitioning.make_trilinos_map (communicator, false);
- reinit (row_map, col_map, n_entries_per_row);
+ reinit_sp (row_map, col_map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
}
row_parallel_partitioning.make_trilinos_map (communicator, false);
Epetra_Map col_map =
col_parallel_partitioning.make_trilinos_map (communicator, false);
- reinit (row_map, col_map, n_entries_per_row);
- }
-
-
-
- template<typename SparsityType>
- void
- SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
- const IndexSet &col_parallel_partitioning,
- const SparsityType &nontrilinos_sparsity_pattern,
- const MPI_Comm &communicator,
- const bool exchange_data)
- {
- Epetra_Map row_map =
- row_parallel_partitioning.make_trilinos_map (communicator, false);
- Epetra_Map col_map =
- col_parallel_partitioning.make_trilinos_map (communicator, false);
- reinit (row_map, col_map, nontrilinos_sparsity_pattern, exchange_data);
- }
-
-
-
- template<typename SparsityType>
- void
- SparsityPattern::reinit (const IndexSet ¶llel_partitioning,
- const SparsityType &nontrilinos_sparsity_pattern,
- const MPI_Comm &communicator,
- const bool exchange_data)
- {
- Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
- false);
- reinit (map, map, nontrilinos_sparsity_pattern, exchange_data);
+ reinit_sp (row_map, col_map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
}
const MPI_Comm &communicator,
const size_type n_entries_per_row)
{
- reinit(row_parallel_partitioning, col_parallel_partitioning,
- communicator,n_entries_per_row);
+ Epetra_Map row_map =
+ row_parallel_partitioning.make_trilinos_map (communicator, false);
+ Epetra_Map col_map =
+ col_parallel_partitioning.make_trilinos_map (communicator, false);
+ reinit_sp (row_map, col_map, n_entries_per_row,
+ column_space_map, graph, nonlocal_graph);
IndexSet nonlocal_partitioner = writable_rows;
AssertDimension(nonlocal_partitioner.size(), row_parallel_partitioning.size());
+ template<typename SparsityType>
+ void
+ SparsityPattern::reinit (const IndexSet &row_parallel_partitioning,
+ const IndexSet &col_parallel_partitioning,
+ const SparsityType &nontrilinos_sparsity_pattern,
+ const MPI_Comm &communicator,
+ const bool exchange_data)
+ {
+ Epetra_Map row_map =
+ row_parallel_partitioning.make_trilinos_map (communicator, false);
+ Epetra_Map col_map =
+ col_parallel_partitioning.make_trilinos_map (communicator, false);
+ reinit_sp (row_map, col_map, nontrilinos_sparsity_pattern, exchange_data,
+ column_space_map, graph, nonlocal_graph);
+ }
+
+
+
+ template<typename SparsityType>
+ void
+ SparsityPattern::reinit (const IndexSet ¶llel_partitioning,
+ const SparsityType &nontrilinos_sparsity_pattern,
+ const MPI_Comm &communicator,
+ const bool exchange_data)
+ {
+ Epetra_Map map = parallel_partitioning.make_trilinos_map (communicator,
+ false);
+ reinit_sp (map, map, nontrilinos_sparsity_pattern, exchange_data,
+ column_space_map, graph, nonlocal_graph);
+ }
+
+
+
template <typename SparsityType>
void
SparsityPattern::reinit (const Epetra_Map &input_map,
const SparsityType &sp,
const bool exchange_data)
{
- reinit (input_map, input_map, sp, exchange_data);
+ reinit_sp (input_map, input_map, sp, exchange_data,
+ column_space_map, graph, nonlocal_graph);
}
const SparsityType &sp,
const bool exchange_data)
{
- graph.reset ();
-
- AssertDimension (sp.n_rows(),
- static_cast<size_type>(n_global_elements(input_row_map)));
- AssertDimension (sp.n_cols(),
- static_cast<size_type>(n_global_elements(input_col_map)));
-
- column_space_map.reset (new Epetra_Map (input_col_map));
-
- Assert (input_row_map.LinearMap() == true,
- ExcMessage ("This function is not efficient if the map is not contiguous."));
-
- const size_type first_row = min_my_gid(input_row_map),
- last_row = max_my_gid(input_row_map)+1;
- std::vector<int> n_entries_per_row(last_row - first_row);
-
- // Trilinos wants the row length as an int
- // this is hopefully never going to be a problem.
- for (size_type row=first_row; row<last_row; ++row)
- n_entries_per_row[row-first_row] = static_cast<int>(sp.row_length(row));
-
- if (input_row_map.Comm().NumProc() > 1)
- graph.reset(new Epetra_FECrsGraph(Copy, input_row_map,
- &n_entries_per_row[0],
- false));
- else
- graph.reset (new Epetra_FECrsGraph(Copy, input_row_map, input_col_map,
- &n_entries_per_row[0],
- false));
-
- AssertDimension (sp.n_rows(),
- static_cast<size_type>(n_global_rows(*graph)));
-
- std::vector<TrilinosWrappers::types::int_type> row_indices;
-
- // Include possibility to exchange data
- // since DynamicSparsityPattern is
- // able to do so
- if (exchange_data==false)
- for (size_type row=first_row; row<last_row; ++row)
- {
- const TrilinosWrappers::types::int_type row_length = sp.row_length(row);
- if (row_length == 0)
- continue;
-
- row_indices.resize (row_length, -1);
- {
- typename SparsityType::iterator p = sp.begin(row);
- for (size_type col=0; p != sp.end(row); ++p, ++col)
- row_indices[col] = p->column();
- }
- graph->Epetra_CrsGraph::InsertGlobalIndices (row, row_length,
- &row_indices[0]);
- }
- else
- for (size_type row=0; row<sp.n_rows(); ++row)
- {
- const TrilinosWrappers::types::int_type row_length = sp.row_length(row);
- if (row_length == 0)
- continue;
-
- row_indices.resize (row_length, -1);
- {
- typename SparsityType::iterator p = sp.begin(row);
- for (size_type col=0; p != sp.end(row); ++p, ++col)
- row_indices[col] = p->column();
- }
- graph->InsertGlobalIndices (1,
- reinterpret_cast<TrilinosWrappers::types::int_type *>(&row),
- row_length, &row_indices[0]);
- }
+ reinit_sp (input_row_map, input_col_map, sp, exchange_data,
+ column_space_map, graph, nonlocal_graph);
compress();
}
const Epetra_Map columns (TrilinosWrappers::types::int_type(sp.n_cols()), 0,
Utilities::Trilinos::comm_self());
- reinit (rows, columns, sp);
+ reinit_sp (rows, columns, sp, false,
+ column_space_map, graph, nonlocal_graph);
}
+ const Epetra_Map &
+ SparsityPattern::domain_partitioner () const
+ {
+ return static_cast<const Epetra_Map &>(graph->DomainMap());
+ }
+
+
+
+ const Epetra_Map &
+ SparsityPattern::range_partitioner () const
+ {
+ return static_cast<const Epetra_Map &>(graph->RangeMap());
+ }
+
+
+
+ const Epetra_Map &
+ SparsityPattern::row_partitioner () const
+ {
+ return static_cast<const Epetra_Map &>(graph->RowMap());
+ }
+
+
+
+ const Epetra_Map &
+ SparsityPattern::col_partitioner () const
+ {
+ return static_cast<const Epetra_Map &>(graph->ColMap());
+ }
+
+
+
+ const Epetra_Comm &
+ SparsityPattern::trilinos_communicator () const
+ {
+ return graph->RangeMap().Comm();
+ }
+
+
+
+ MPI_Comm
+ SparsityPattern::get_mpi_communicator () const
+ {
+
+#ifdef DEAL_II_WITH_MPI
+
+ const Epetra_MpiComm *mpi_comm
+ = dynamic_cast<const Epetra_MpiComm *>(&graph->RangeMap().Comm());
+ return mpi_comm->Comm();
+#else
+
+ return MPI_COMM_SELF;
+
+#endif
+
+ }
+
+
+
void
SparsityPattern::write_ascii ()
{