*/
template<int dim, int spacedim>
void compute_block_renumbering (
- const FiniteElement<dim,spacedim> &fe,
+ const FiniteElement<dim,spacedim> &fe,
- std::vector<unsigned int> &renumbering,
- std::vector<unsigned int> &block_data,
+ std::vector<types::global_dof_index> &renumbering,
+ std::vector<types::global_dof_index> &block_data,
bool return_start_indices = true);
/**
* @param R The radius of the circle, which forms the middle line of the torus containing the loop of cells. Must be greater than @p r.
* @param r The radius of the cylinder bend together as loop.
*/
- static void moebius (Triangulation<3,3> &tria,
+ static void moebius (Triangulation<3,3> &tria,
- const unsigned int n_cells,
+ const size_type n_cells,
const unsigned int n_rotations,
const double R,
const double r);
inline
-std::pair<unsigned int,unsigned int>
-BlockIndices::global_to_local (const unsigned int i) const
+std::pair<unsigned int,BlockIndices::size_type>
+BlockIndices::global_to_local (const size_type i) const
{
Assert (i<total_size(), ExcIndexRange(i, 0, total_size()));
+ Assert (n_blocks > 0, ExcLowerRange(i, 1));
- int block = n_blocks-1;
+ unsigned int block = n_blocks-1;
while (i < start_indices[block])
--block;
template <class BlockMatrix>
inline
Accessor<BlockMatrix, true>::Accessor (
- const BlockMatrix *matrix,
+ const BlockMatrix *matrix,
- const unsigned int row,
- const unsigned int col)
+ const size_type row,
+ const size_type col)
:
matrix(matrix),
base_iterator(matrix->block(0,0).begin())
template <class BlockMatrix>
inline
Accessor<BlockMatrix, false>::Accessor (
- BlockMatrix *matrix,
+ BlockMatrix *matrix,
- const unsigned int row,
- const unsigned int col)
+ const size_type row,
+ const size_type col)
:
matrix(matrix),
base_iterator(matrix->block(0,0).begin())
* row indices.
*/
void
- make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
+ make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
- internals::GlobalRowsFromLocal &global_rows) const;
+ internals::GlobalRowsFromLocal &global_rows) const;
/**
* Internal helper function for
// are related to it.
void
ConstraintMatrix::
-make_sorted_row_list (const std::vector<unsigned int> &local_dof_indices,
+make_sorted_row_list (const std::vector<size_type> &local_dof_indices,
- internals::GlobalRowsFromLocal &global_rows) const
+ internals::GlobalRowsFromLocal &global_rows) const
{
- const unsigned int n_local_dofs = local_dof_indices.size();
+ const size_type n_local_dofs = local_dof_indices.size();
AssertDimension (n_local_dofs, global_rows.size());
// when distributing the local data to
* indices.
*/
template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
- const std::vector<OtherNumber> &values);
+ const std::vector<OtherNumber> &values);
/**
* This is a second collective
* functions above.
*/
template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
+ void add (const size_type n_elements,
+ const size_type *indices,
- const OtherNumber *values);
+ const OtherNumber *values);
/**
* Addition of @p s to all
template <typename OtherNumber>
inline
void
- Vector<Number>::add (const std::vector<unsigned int> &indices,
+ Vector<Number>::add (const std::vector<size_type> &indices,
- const std::vector<OtherNumber> &values)
+ const std::vector<OtherNumber> &values)
{
AssertDimension (indices.size(), values.size());
add (indices.size(), &indices[0], &values[0]);
template <typename OtherNumber>
inline
void
- Vector<Number>::add (const unsigned int n_indices,
- const unsigned int *indices,
+ Vector<Number>::add (const size_type n_indices,
+ const size_type *indices,
- const OtherNumber *values)
+ const OtherNumber *values)
{
- for (unsigned int i=0; i<n_indices; ++i)
+ for (size_type i=0; i<n_indices; ++i)
{
Assert (numbers::is_finite(values[i]),
ExcMessage("The given value is not finite but either infinite or Not A Number (NaN)"));
void
Vector<Number>::clear_mpi_requests ()
{
- #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ #ifdef DEAL_II_WITH_MPI
- for (unsigned int j=0; j<compress_requests.size(); j++)
+ for (size_type j=0; j<compress_requests.size(); j++)
MPI_Request_free(&compress_requests[j]);
compress_requests.clear();
- for (unsigned int j=0; j<update_ghost_values_requests.size(); j++)
+ for (size_type j=0; j<update_ghost_values_requests.size(); j++)
MPI_Request_free(&update_ghost_values_requests[j]);
update_ghost_values_requests.clear();
#endif
<< std::endl;
out << std::endl << std::flush;
- #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ #ifdef DEAL_II_WITH_MPI
MPI_Barrier (partitioner->get_communicator());
- for (unsigned int i=partitioner->this_mpi_process()+1;
+ for (size_type i=partitioner->this_mpi_process()+1;
i<partitioner->n_mpi_processes(); i++)
MPI_Barrier (partitioner->get_communicator());
#endif
* <tt>false</tt>, i.e., even zero
* values are inserted/replaced.
*/
- void set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
+ void set (const size_type row,
+ const std::vector<size_type > &col_indices,
- const std::vector<PetscScalar> &values,
+ const std::vector<PetscScalar> &values,
- const bool elide_zero_values = false);
+ const bool elide_zero_values = false);
/**
* Set several elements to values
* <tt>false</tt>, i.e., even zero
* values are inserted/replaced.
*/
- void set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ void set (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
- const PetscScalar *values,
+ const PetscScalar *values,
- const bool elide_zero_values = false);
+ const bool elide_zero_values = false);
/**
* Add @p value to the element
* i.e., zero values won't be added
* into the matrix.
*/
- void add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
+ void add (const size_type row,
+ const std::vector<size_type> &col_indices,
- const std::vector<PetscScalar> &values,
+ const std::vector<PetscScalar> &values,
- const bool elide_zero_values = true);
+ const bool elide_zero_values = true);
/**
* Add an array of values given by
* i.e., zero values won't be added
* into the matrix.
*/
- void add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ void add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
- const PetscScalar *values,
+ const PetscScalar *values,
- const bool elide_zero_values = true,
- const bool col_indices_are_sorted = false);
+ const bool elide_zero_values = true,
+ const bool col_indices_are_sorted = false);
/**
* Remove all elements from
inline
void
- MatrixBase::set (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
+ MatrixBase::set (const size_type row,
+ const std::vector<size_type> &col_indices,
- const std::vector<PetscScalar> &values,
+ const std::vector<PetscScalar> &values,
- const bool elide_zero_values)
+ const bool elide_zero_values)
{
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
inline
void
- MatrixBase::set (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ MatrixBase::set (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
- const PetscScalar *values,
+ const PetscScalar *values,
- const bool elide_zero_values)
+ const bool elide_zero_values)
{
prepare_action(LastAction::insert);
inline
void
- MatrixBase::add (const unsigned int row,
- const std::vector<unsigned int> &col_indices,
+ MatrixBase::add (const size_type row,
+ const std::vector<size_type> &col_indices,
- const std::vector<PetscScalar> &values,
+ const std::vector<PetscScalar> &values,
- const bool elide_zero_values)
+ const bool elide_zero_values)
{
Assert (col_indices.size() == values.size(),
ExcDimensionMismatch(col_indices.size(), values.size()));
inline
void
- MatrixBase::add (const unsigned int row,
- const unsigned int n_cols,
- const unsigned int *col_indices,
+ MatrixBase::add (const size_type row,
+ const size_type n_cols,
+ const size_type *col_indices,
- const PetscScalar *values,
+ const PetscScalar *values,
- const bool elide_zero_values,
+ const bool elide_zero_values,
const bool /*col_indices_are_sorted*/)
{
prepare_action(LastAction::add);
* as to only allow the actual vector
* class to create it.
*/
- VectorReference (const VectorBase &vector,
+ VectorReference (const VectorBase &vector,
- const unsigned int index);
+ const size_type index);
- public:
+
+ public:
+
/**
* This looks like a copy operator,
* but does something different than
* the corresponding values in the
* second.
*/
- void set (const std::vector<unsigned int> &indices,
+ void set (const std::vector<size_type> &indices,
- const std::vector<PetscScalar> &values);
+ const std::vector<PetscScalar> &values);
/**
* A collective add operation: This
* stored in @p values to the vector
* components specified by @p indices.
*/
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
- const std::vector<PetscScalar> &values);
+ const std::vector<PetscScalar> &values);
/**
* This is a second collective
* other two <tt>add()</tt>
* functions above.
*/
- void add (const unsigned int n_elements,
- const unsigned int *indices,
+ void add (const size_type n_elements,
+ const size_type *indices,
- const PetscScalar *values);
+ const PetscScalar *values);
/**
* Return the scalar product of two
* @p add_values flag set to the
* corresponding value.
*/
- void do_set_add_operation (const unsigned int n_elements,
- const unsigned int *indices,
+ void do_set_add_operation (const size_type n_elements,
+ const size_type *indices,
- const PetscScalar *values,
+ const PetscScalar *values,
- const bool add_values);
+ const bool add_values);
};
namespace internal
{
inline
- VectorReference::VectorReference (const VectorBase &vector,
+ VectorReference::VectorReference (const VectorBase &vector,
- const unsigned int index)
+ const size_type index)
:
vector (vector),
index (index)
{
private:
- #ifdef DEAL_II_USE_MUMPS
+ #ifdef DEAL_II_WITH_MUMPS
DMUMPS_STRUC_C id;
- #endif // DEAL_II_USE_MUMPS
+ #endif // DEAL_II_WITH_MUMPS
- double *a;
- double *rhs;
- int *irn;
- int *jcn;
- unsigned int n;
- unsigned int nz;
+ double *a;
+ double *rhs;
+ int *irn;
+ int *jcn;
+ types::global_dof_index n;
+ types::global_dof_index nz;
/**
* This function initializes a MUMPS instance
template <typename number,
typename InVector,
typename OutVector>
- void vmult_on_subrange (const unsigned int begin_row,
- const unsigned int end_row,
- const number *values,
+ void vmult_on_subrange (const size_type begin_row,
+ const size_type end_row,
+ const number *values,
- const std::size_t *rowstart,
+ const std::size_t *rowstart,
- const unsigned int *colnums,
- const InVector &src,
- OutVector &dst,
- const bool add)
+ const size_type *colnums,
+ const InVector &src,
+ OutVector &dst,
+ const bool add)
{
- const number *val_ptr = &values[rowstart[begin_row]];
- const unsigned int *colnum_ptr = &colnums[rowstart[begin_row]];
+ const number *val_ptr = &values[rowstart[begin_row]];
+ const size_type *colnum_ptr = &colnums[rowstart[begin_row]];
typename OutVector::iterator dst_ptr = dst.begin() + begin_row;
if (add == false)
*/
template <typename number,
typename InVector>
- number matrix_norm_sqr_on_subrange (const unsigned int begin_row,
- const unsigned int end_row,
- const number *values,
+ number matrix_norm_sqr_on_subrange (const size_type begin_row,
+ const size_type end_row,
+ const number *values,
- const std::size_t *rowstart,
+ const std::size_t *rowstart,
- const unsigned int *colnums,
- const InVector &v)
+ const size_type *colnums,
+ const InVector &v)
{
number norm_sqr=0.;
*/
template <typename number,
typename InVector>
- number matrix_scalar_product_on_subrange (const unsigned int begin_row,
- const unsigned int end_row,
- const number *values,
+ number matrix_scalar_product_on_subrange (const size_type begin_row,
+ const size_type end_row,
+ const number *values,
- const std::size_t *rowstart,
+ const std::size_t *rowstart,
- const unsigned int *colnums,
- const InVector &u,
- const InVector &v)
+ const size_type *colnums,
+ const InVector &u,
+ const InVector &v)
{
number norm_sqr=0.;
template <typename number,
typename InVector,
typename OutVector>
- number residual_sqr_on_subrange (const unsigned int begin_row,
- const unsigned int end_row,
- const number *values,
+ number residual_sqr_on_subrange (const size_type begin_row,
+ const size_type end_row,
+ const number *values,
- const std::size_t *rowstart,
+ const std::size_t *rowstart,
- const unsigned int *colnums,
- const InVector &u,
- const InVector &b,
- OutVector &dst)
+ const size_type *colnums,
+ const InVector &u,
+ const InVector &b,
+ OutVector &dst)
{
number norm_sqr=0.;
AssertDimension (dst.size(), n());
AssertDimension (src.size(), n());
- const unsigned int n = src.size();
- somenumber *dst_ptr = dst.begin();
- const somenumber *src_ptr = src.begin();
+ const size_type n = src.size();
+ somenumber *dst_ptr = dst.begin();
+ const somenumber *src_ptr = src.begin();
- const std::size_t *rowstart_ptr = &cols->rowstart[0];
+ const std::size_t *rowstart_ptr = &cols->rowstart[0];
// optimize the following loop for
// the case that the relaxation
void
SparseVanka<number>::compute_inverses ()
{
- if (!DEAL_II_USE_MT)
- compute_inverses (0, matrix->m());
- else
- {
- const size_type n_inverses = std::count (selected.begin(),
- selected.end(),
- true);
+ #ifndef DEAL_II_WITH_THREADS
+ compute_inverses (0, matrix->m());
+ #else
- const unsigned int n_inverses = std::count (selected.begin(),
- selected.end(),
- true);
++ const size_type n_inverses = std::count (selected.begin(),
++ selected.end(),
++ true);
- const size_type n_inverses_per_thread = std::max(n_inverses / n_threads,
- const unsigned int n_inverses_per_thread = std::max(n_inverses / n_threads,
- 1U);
++ const size_type n_inverses_per_thread = std::max(n_inverses / n_threads,
+ static_cast<size_type> (1U));
- // set up start and end index
- // for each of the
- // threads. note that we have
- // to work somewhat to get this
- // appropriate, since the
- // indices for which inverses
- // have to be computed may not
- // be evenly distributed in the
- // vector. as an extreme
- // example consider numbering
- // of DoFs by component, then
- // all indices for which we
- // have to do work will be
- // consecutive, with other
- // consecutive regions where we
- // do not have to do something
- std::vector<std::pair<size_type, size_type> > blocking (n_threads);
+ // set up start and end index
+ // for each of the
+ // threads. note that we have
+ // to work somewhat to get this
+ // appropriate, since the
+ // indices for which inverses
+ // have to be computed may not
+ // be evenly distributed in the
+ // vector. as an extreme
+ // example consider numbering
+ // of DoFs by component, then
+ // all indices for which we
+ // have to do work will be
+ // consecutive, with other
+ // consecutive regions where we
+ // do not have to do something
- std::vector<std::pair<unsigned int, unsigned int> > blocking (n_threads);
++ std::vector<std::pair<size_type, unsigned int> > blocking (n_threads);
- unsigned int c = 0;
- unsigned int thread = 0;
- blocking[0].first = 0;
+ unsigned int c = 0;
+ unsigned int thread = 0;
+ blocking[0].first = 0;
- for (size_type i=0; (i<matrix->m()) && (thread+1<n_threads); ++i)
- for (unsigned int i=0; (i<matrix->m()) && (thread+1<n_threads); ++i)
++ for (size_type i=0; (i<matrix->m()) && (thread+1<n_threads); ++i)
+ {
+ if (selected[i] == true)
+ ++c;
+ if (c == n_inverses_per_thread)
{
- if (selected[i] == true)
- ++c;
- if (c == n_inverses_per_thread)
- {
- blocking[thread].second = i;
- blocking[thread+1].first = i;
- ++thread;
+ blocking[thread].second = i;
+ blocking[thread+1].first = i;
+ ++thread;
- c = 0;
- };
+ c = 0;
};
- blocking[n_threads-1].second = matrix->m();
-
- typedef void (SparseVanka<number>::*FunPtr)(const size_type,
- const size_type);
- const FunPtr fun_ptr = &SparseVanka<number>::compute_inverses;
-
- // Now spawn the threads
- Threads::ThreadGroup<> threads;
- for (unsigned int i=0; i<n_threads; ++i)
- threads += Threads::new_thread (fun_ptr, *this,
- blocking[i].first,
- blocking[i].second);
- threads.join_all ();
};
- typedef void (SparseVanka<number>::*FunPtr)(const unsigned int,
- const unsigned int);
+ blocking[n_threads-1].second = matrix->m();
+
++ typedef void (SparseVanka<number>::*FunPtr)(const size_type,
++ const size_type);
+ const FunPtr fun_ptr = &SparseVanka<number>::compute_inverses;
+
+ // Now spawn the threads
+ Threads::ThreadGroup<> threads;
+ for (unsigned int i=0; i<n_threads; ++i)
+ threads += Threads::new_thread (fun_ptr, *this,
+ blocking[i].first,
+ blocking[i].second);
+ threads.join_all ();
+ #endif
}
* matrix structure. It must be compressed. The matrix structure is not
* compressed after this function finishes.
*/
- SparsityPattern (const SparsityPattern &original,
+ SparsityPattern (const SparsityPattern &original,
- const unsigned int max_per_row,
- const unsigned int extra_off_diagonals);
+ const size_type max_per_row,
+ const size_type extra_off_diagonals);
/**
* Destructor.
*/
void
reorder_Cuthill_McKee (const SparsityPattern &sparsity,
- std::vector<unsigned int> &new_indices,
- const std::vector<unsigned int> &starting_indices = std::vector<unsigned int>());
+ std::vector<size_type> &new_indices,
+ const std::vector<size_type> &starting_indices = std::vector<size_type>());
- #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ #ifdef DEAL_II_WITH_MPI
/**
* Communciate rows in a compressed
* sparsity pattern over MPI.
* argument, the corresponding
* values in the second.
*/
- void set (const std::vector<unsigned int> &indices,
+ void set (const std::vector<size_type> &indices,
- const std::vector<TrilinosScalar> &values);
+ const std::vector<TrilinosScalar> &values);
/**
* This is a second collective
inline
void
- VectorBase::set (const std::vector<unsigned int> &indices,
+ VectorBase::set (const std::vector<size_type> &indices,
- const std::vector<TrilinosScalar> &values)
+ const std::vector<TrilinosScalar> &values)
{
// if we have ghost values, do not allow
// writing to this vector at all.
inline
void
- VectorBase::add (const std::vector<unsigned int> &indices,
+ VectorBase::add (const std::vector<size_type> &indices,
- const std::vector<TrilinosScalar> &values)
+ const std::vector<TrilinosScalar> &values)
{
// if we have ghost values, do not allow
// writing to this vector at all.
* indices.
*/
template <typename OtherNumber>
- void add (const std::vector<unsigned int> &indices,
+ void add (const std::vector<size_type> &indices,
- const std::vector<OtherNumber> &values);
+ const std::vector<OtherNumber> &values);
/**
* This is a second collective
* functions above.
*/
template <typename OtherNumber>
- void add (const unsigned int n_elements,
- const unsigned int *indices,
+ void add (const size_type n_elements,
+ const size_type *indices,
- const OtherNumber *values);
+ const OtherNumber *values);
/**
* Addition of @p s to all
template <typename OtherNumber>
inline
void
-Vector<Number>::add (const std::vector<unsigned int> &indices,
+Vector<Number>::add (const std::vector<size_type> &indices,
- const std::vector<OtherNumber> &values)
+ const std::vector<OtherNumber> &values)
{
Assert (indices.size() == values.size(),
ExcDimensionMismatch(indices.size(), values.size()));
template <typename OtherNumber>
inline
void
-Vector<Number>::add (const unsigned int n_indices,
- const unsigned int *indices,
+Vector<Number>::add (const size_type n_indices,
+ const size_type *indices,
- const OtherNumber *values)
+ const OtherNumber *values)
{
- for (unsigned int i=0; i<n_indices; ++i)
+ for (size_type i=0; i<n_indices; ++i)
{
Assert (indices[i] < vec_size, ExcIndexRange(indices[i],0,vec_size));
Assert (numbers::is_finite(values[i]),
* The DoF indices of the
* current cell
*/
- std::vector<unsigned int> indices;
+ std::vector<types::global_dof_index> indices;
/**
- * The DoF indices on the
- * current cell, organized by
- * local blocks
+ * The DoF indices on the current cell, organized by local blocks.
+ * The size of this vector is zero, unless local blocks are used.
*/
- std::vector<std::vector<unsigned int> > indices_by_block;
+ std::vector<std::vector<types::global_dof_index> > indices_by_block;
/**
* Constructor setting the
template <class DHCellIterator, class DHFaceIterator>
void reinit(const DHCellIterator &c,
const DHFaceIterator &f,
- const unsigned int n);
+ const unsigned int face_no);
/**
- * Set the current subface
- * and fill @p indices if the
- * #cell changed.
+ * Set the current subface and fill @p indices if the #cell
+ * changed.
*/
template <class DHCellIterator, class DHFaceIterator>
void reinit(const DHCellIterator &c,
const DHFaceIterator &f,
- const unsigned int n,
- const unsigned int s);
+ const unsigned int face_no,
+ const unsigned int subface_no);
/**
- * Switch to a new face of the
- * same cell. Does not change
- * @p indices and does not reset
- * data in LocalResults.
+ * Switch to a new face of the same cell. Does not change @p
+ * indices and does not reset data in LocalResults.
*/
template <class DHFaceIterator>
void set_face (const DHFaceIterator &f,
- const unsigned int n);
+ const unsigned int face_no);
+
/**
- * Switch to a new subface of the
- * same cell. Does not change
- * @p indices and does not reset
- * data in LocalResults.
+ * Switch to a new subface of the same cell. Does not change @p
+ * indices and does not reset data in LocalResults.
*/
template <class DHFaceIterator>
void set_subface (const DHFaceIterator &f,
template <class DHCellIterator>
void get_indices(const DHCellIterator &c);
- /// Fill index vector with level indices
- //void get_indices(const typename MGDoFHandler<dim, spacedim>::cell_iterator& c);
-
/// Auxiliary vector
- std::vector<unsigned int> indices_org;
+ std::vector<types::global_dof_index> indices_org;
/**
* An auxiliary local
*/
template <typename number>
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- SparseMatrix<number> &matrix,
+ SparseMatrix<number> &matrix,
Vector<number> &solution,
Vector<number> &right_hand_side,
const bool eliminate_columns = true);
* step-18.
*/
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- PETScWrappers::SparseMatrix &matrix,
- PETScWrappers::Vector &solution,
- PETScWrappers::Vector &right_hand_side,
+ PETScWrappers::SparseMatrix &matrix,
+ PETScWrappers::Vector &solution,
+ PETScWrappers::Vector &right_hand_side,
const bool eliminate_columns = true);
/**
* matrices.
*/
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
- PETScWrappers::MPI::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
+ PETScWrappers::MPI::SparseMatrix &matrix,
+ PETScWrappers::MPI::Vector &solution,
+ PETScWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
/**
* to this function is the same.
*/
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- PETScWrappers::MPI::SparseMatrix &matrix,
+ PETScWrappers::MPI::SparseMatrix &matrix,
PETScWrappers::Vector &solution,
- PETScWrappers::MPI::Vector &right_hand_side,
+ PETScWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
/**
* across an MPI system.
*/
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
+ TrilinosWrappers::SparseMatrix &matrix,
TrilinosWrappers::Vector &solution,
TrilinosWrappers::Vector &right_hand_side,
const bool eliminate_columns = true);
* working on block structures.
*/
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
TrilinosWrappers::BlockVector &solution,
TrilinosWrappers::BlockVector &right_hand_side,
const bool eliminate_columns = true);
* types.
*/
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- TrilinosWrappers::SparseMatrix &matrix,
+ TrilinosWrappers::SparseMatrix &matrix,
TrilinosWrappers::MPI::Vector &solution,
TrilinosWrappers::MPI::Vector &right_hand_side,
const bool eliminate_columns = true);
* on block structures.
*/
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- TrilinosWrappers::BlockSparseMatrix &matrix,
+ TrilinosWrappers::BlockSparseMatrix &matrix,
TrilinosWrappers::MPI::BlockVector &solution,
TrilinosWrappers::MPI::BlockVector &right_hand_side,
const bool eliminate_columns = true);
= (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
(&start->get_dof_handler().get_tria())))
{
- #ifdef DEAL_II_USE_P4EST
+ #ifdef DEAL_II_WITH_P4EST
- std::vector<unsigned int> local_dof_count(n_buckets);
+ std::vector<types::global_dof_index> local_dof_count(n_buckets);
for (unsigned int c=0; c<n_buckets; ++c)
local_dof_count[c] = component_to_dof_map[c].size();
= (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
(&start->get_dof_handler().get_tria())))
{
- #ifdef DEAL_II_USE_P4EST
+ #ifdef DEAL_II_WITH_P4EST
- std::vector<unsigned int> local_dof_count(n_buckets);
+ std::vector<types::global_dof_index> local_dof_count(n_buckets);
for (unsigned int c=0; c<n_buckets; ++c)
local_dof_count[c] = block_to_dof_map[c].size();
template <class DH>
void
count_dofs_per_block (const DH &dof_handler,
- std::vector<unsigned int> &dofs_per_block,
+ std::vector<types::global_dof_index> &dofs_per_block,
- const std::vector<unsigned int> &target_block_)
+ const std::vector<unsigned int> &target_block_)
{
std::vector<unsigned int> target_block = target_block_;
template <class DH>
void
map_dofs_to_support_points(const hp::MappingCollection<DH::dimension, DH::space_dimension> &mapping,
- const DH &dof_handler,
- std::vector<Point<DH::space_dimension> > &support_points)
+ const DH &dof_handler,
+ std::vector<Point<DH::space_dimension> > &support_points)
{
// get the data in the form of the map as above
- std::map<unsigned int,Point<DH::space_dimension> > x_support_points;
+ std::map<types::global_dof_index,Point<DH::space_dimension> > x_support_points;
map_dofs_to_support_points(mapping, dof_handler, x_support_points);
// now convert from the map to the linear vector. make sure every
void
GridGenerator::moebius (
- Triangulation<3> &tria,
+ Triangulation<3> &tria,
- const unsigned int n_cells,
+ const size_type n_cells,
const unsigned int n_rotations,
const double R,
const double r)
--- /dev/null
- const Vector<double>&, const std::vector<unsigned int> &, T<S> &, const FullMatrix<double>&) const;
+ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES)
+ {
+ template void ConstraintMatrix::condense<T<S> >(const T<S> &, T<S> &) const;
+ template void ConstraintMatrix::condense<T<S> >(T<S> &vec) const;
+ template void ConstraintMatrix::distribute_local_to_global<T<S> > (
- const Vector<double>&, const std::vector<unsigned int> &, parallel::distributed::T<S> &, const FullMatrix<double>&) const;
++ const Vector<double>&, const std::vector<types::global_dof_index> &, T<S> &, const FullMatrix<double>&) const;
+ template void ConstraintMatrix::distribute<T<S> >(const T<S> &, T<S> &) const;
+ template void ConstraintMatrix::distribute<T<S> >(T<S> &) const;
+ template void ConstraintMatrix::set_zero<T<S> >(T<S> &) const;
+ }
+
+
+ for (S: REAL_SCALARS; T : DEAL_II_VEC_TEMPLATES)
+ {
+ template void ConstraintMatrix::condense<parallel::distributed::T<S> >(const parallel::distributed::T<S> &, parallel::distributed::T<S> &) const;
+ template void ConstraintMatrix::condense<parallel::distributed::T<S> >(parallel::distributed::T<S> &vec) const;
+ template void ConstraintMatrix::distribute_local_to_global<parallel::distributed::T<S> > (
- const Vector<double>&, const std::vector<unsigned int> &, V&, const FullMatrix<double>&) const;
++ const Vector<double>&, const std::vector<types::global_dof_index> &, parallel::distributed::T<S> &, const FullMatrix<double>&) const;
+ template void ConstraintMatrix::distribute<parallel::distributed::T<S> >(const parallel::distributed::T<S> &, parallel::distributed::T<S> &) const;
+ template void ConstraintMatrix::distribute<parallel::distributed::T<S> >(parallel::distributed::T<S> &) const;
+ template void ConstraintMatrix::set_zero<parallel::distributed::T<S> >(parallel::distributed::T<S> &) const;
+ }
+
+
+ for (V: EXTERNAL_SEQUENTIAL_VECTORS)
+ {
+ template void ConstraintMatrix::condense<V >(const V&, V&) const;
+ template void ConstraintMatrix::condense<V >(V&vec) const;
+ template void ConstraintMatrix::distribute_local_to_global<V > (
++ const Vector<double>&, const std::vector<types::global_dof_index> &, V&, const FullMatrix<double>&) const;
+ template void ConstraintMatrix::distribute<V >(const V&, V&) const;
+ template void ConstraintMatrix::distribute<V >(V&) const;
+ template void ConstraintMatrix::set_zero<V >(V&) const;
+ }
+
+
+ for (V: EXTERNAL_PARALLEL_VECTORS)
+ {
+ template void ConstraintMatrix::set_zero<V >(V&) const;
+ }
+
+
+ for (S : REAL_SCALARS)
+ {
+ template void ConstraintMatrix::condense<S>(const SparseMatrix<S>&, SparseMatrix<S> &) const;
+ template void ConstraintMatrix::condense<S>(SparseMatrix<S>&) const;
+ template void ConstraintMatrix::condense<S>(BlockSparseMatrix<S>&) const;
+ }
+
+
+ for (S1 : REAL_SCALARS; S2 : REAL_SCALARS)
+ {
+ template void ConstraintMatrix::condense<S1,Vector<S2> >(SparseMatrix<S1>&, Vector<S2>&) const;
+ template void ConstraintMatrix::condense<S1,BlockVector<S2> >(BlockSparseMatrix<S1>&, BlockVector<S2>&) const;
+ template void ConstraintMatrix::condense<S1,Vector<S2> >(
+ const SparseMatrix<S1>&, const Vector<S2>&, SparseMatrix<S1> &, Vector<S2>&) const;
+ template void ConstraintMatrix::condense<S1,BlockVector<S2> >(
+ const SparseMatrix<S1>&, const BlockVector<S2>&, SparseMatrix<S1> &, BlockVector<S2>&) const;
+ }
- Vector::Vector (const MPI_Comm &communicator,
+ Vector::Vector (const MPI_Comm &communicator,
- const VectorBase &v,
+ const VectorBase &v,
- const unsigned int local_size)
+ const size_type local_size)
:
communicator (communicator)
{
void
- VectorBase::set (const std::vector<unsigned int> &indices,
+ VectorBase::set (const std::vector<size_type> &indices,
- const std::vector<PetscScalar> &values)
+ const std::vector<PetscScalar> &values)
{
Assert (indices.size() == values.size(),
ExcMessage ("Function called with arguments of different sizes"));
void
- VectorBase::add (const std::vector<unsigned int> &indices,
+ VectorBase::add (const std::vector<size_type> &indices,
- const std::vector<PetscScalar> &values)
+ const std::vector<PetscScalar> &values)
{
Assert (indices.size() == values.size(),
ExcMessage ("Function called with arguments of different sizes"));
ExcInternalError());
}
- #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ #ifdef DEAL_II_WITH_MPI
template <class CSP_t>
void distribute_sparsity_pattern(CSP_t &csp,
- const std::vector<unsigned int> &rows_per_cpu,
+ const std::vector<size_type> &rows_per_cpu,
const MPI_Comm &mpi_comm,
const IndexSet &myrange)
{
- SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
+ SparseMatrix::SparseMatrix (const Epetra_Map &input_map,
- const unsigned int n_max_entries_per_row)
+ const size_type n_max_entries_per_row)
:
column_space_map (new Epetra_Map (input_map)),
matrix (new Epetra_FECrsMatrix(Copy, *column_space_map,
- SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
- const Epetra_Map &input_col_map,
+ SparseMatrix::SparseMatrix (const Epetra_Map &input_row_map,
+ const Epetra_Map &input_col_map,
- const unsigned int n_max_entries_per_row)
+ const size_type n_max_entries_per_row)
:
column_space_map (new Epetra_Map (input_col_map)),
matrix (new Epetra_FECrsMatrix(Copy, input_row_map,
}
- SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
+ SparsityPattern::SparsityPattern (const Epetra_Map &input_map,
- const unsigned int n_entries_per_row)
+ const size_type n_entries_per_row)
{
reinit (input_map, input_map, n_entries_per_row);
}
- SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map,
- const Epetra_Map &input_col_map,
+ SparsityPattern::SparsityPattern (const Epetra_Map &input_row_map,
+ const Epetra_Map &input_col_map,
- const unsigned int n_entries_per_row)
+ const size_type n_entries_per_row)
{
reinit (input_row_map, input_col_map, n_entries_per_row);
}
const MPI_Comm &communicator)
{
last_action = Zero;
- Epetra_LocalMap map (static_cast<int>(partitioning.size()),
+ Epetra_LocalMap map (static_cast<TrilinosWrapper::types::int_type>(partitioning.size()),
0,
- #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ #ifdef DEAL_II_WITH_MPI
Epetra_MpiComm(communicator));
#else
Epetra_SerialComm());
const bool fast)
{
if (vector->Map().NumGlobalElements() !=
- static_cast<int>(partitioning.size()))
+ static_cast<TrilinosWrapper::types::int_type>(partitioning.size()))
{
- Epetra_LocalMap map (static_cast<int>(partitioning.size()),
+ Epetra_LocalMap map (static_cast<TrilinosWrapper::types::int_type>(partitioning.size()),
0,
- #ifdef DEAL_II_COMPILER_SUPPORTS_MPI
+ #ifdef DEAL_II_WITH_MPI
Epetra_MpiComm(communicator));
#else
Epetra_SerialComm());
template <int dim, int spacedim>
void
- create_boundary_mass_matrix (const Mapping<dim, spacedim> &mapping,
+ create_boundary_mass_matrix (const Mapping<dim, spacedim> &mapping,
const DoFHandler<dim,spacedim> &dof,
const Quadrature<dim-1> &q,
- SparseMatrix<double> &matrix,
- const typename FunctionMap<spacedim>::type &boundary_functions,
+ SparseMatrix<double> &matrix,
+ const typename FunctionMap<spacedim>::type &boundary_functions,
Vector<double> &rhs_vector,
- std::vector<unsigned int> &dof_to_boundary_mapping,
+ std::vector<types::global_dof_index> &dof_to_boundary_mapping,
const Function<spacedim> *const coefficient,
std::vector<unsigned int> component_mapping)
{
//TODO:[WB] I don't think that the optimized storage of diagonals is needed (GK)
template <typename number>
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- SparseMatrix<number> &matrix,
+ SparseMatrix<number> &matrix,
Vector<number> &solution,
Vector<number> &right_hand_side,
const bool eliminate_columns)
template <typename number>
void
- apply_boundary_values (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values (const std::map<types::global_dof_index,double> &boundary_values,
- BlockSparseMatrix<number> &matrix,
+ BlockSparseMatrix<number> &matrix,
BlockVector<number> &solution,
BlockVector<number> &right_hand_side,
const bool eliminate_columns)
{
template
void
- apply_boundary_values<double> (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values<double> (const std::map<types::global_dof_index,double> &boundary_values,
- SparseMatrix<double> &matrix,
+ SparseMatrix<double> &matrix,
Vector<double> &solution,
Vector<double> &right_hand_side,
const bool eliminate_columns);
template
void
- apply_boundary_values<float> (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values<float> (const std::map<types::global_dof_index,double> &boundary_values,
- SparseMatrix<float> &matrix,
+ SparseMatrix<float> &matrix,
Vector<float> &solution,
Vector<float> &right_hand_side,
const bool eliminate_columns);
template
void
- apply_boundary_values<double> (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values<double> (const std::map<types::global_dof_index,double> &boundary_values,
- BlockSparseMatrix<double> &matrix,
+ BlockSparseMatrix<double> &matrix,
BlockVector<double> &solution,
BlockVector<double> &right_hand_side,
const bool eliminate_columns);
template
void
- apply_boundary_values<float> (const std::map<unsigned int,double> &boundary_values,
+ apply_boundary_values<float> (const std::map<types::global_dof_index,double> &boundary_values,
- BlockSparseMatrix<float> &matrix,
+ BlockSparseMatrix<float> &matrix,
BlockVector<float> &solution,
BlockVector<float> &right_hand_side,
const bool eliminate_columns);