}
+ inline MPI_Datatype mpi_type_id (const unsigned long long int *)
+ {
+ return MPI_UNSIGNED_LONG_LONG;
+ }
+
+
inline MPI_Datatype mpi_type_id (const float *)
{
return MPI_FLOAT;
* the individual processor on the ghost
* elements present (second entry).
*/
- const std::vector<std::pair<unsigned int,unsigned int> > &
+ const std::vector<std::pair<unsigned int, types::global_dof_index> > &
ghost_targets() const;
/**
* but tailored to be iterated over, and some
* indices may be duplicates.
*/
- const std::vector<std::pair<unsigned int, unsigned int> > &
+ const std::vector<std::pair<unsigned int, types::global_dof_index> > &
import_indices() const;
/**
* indices that are ghosts on other
* processors.
*/
- const std::vector<std::pair<unsigned int,unsigned int> > &
+ const std::vector<std::pair<unsigned int, types::global_dof_index> > &
import_targets() const;
/**
* ghost indices belong to and how many those
* indices are
*/
- std::vector<std::pair<unsigned int,unsigned int> > ghost_targets_data;
+ std::vector<std::pair<unsigned int, types::global_dof_index> > ghost_targets_data;
/**
* The set of (local) indices that we are
* but tailored to be iterated over, and some
* indices may be duplicates.
*/
- std::vector<std::pair<unsigned int, unsigned int> > import_indices_data;
+ std::vector<std::pair<unsigned int, types::global_dof_index> > import_indices_data;
/**
* Caches the number of ghost indices. It
* The set of processors and length of data
* field which send us their ghost data
*/
- std::vector<std::pair<unsigned int,unsigned int> > import_targets_data;
+ std::vector<std::pair<unsigned int,types::global_dof_index> > import_targets_data;
/**
* The ID of the current processor in the MPI
inline
- const std::vector<std::pair<unsigned int,unsigned int> > &
+ const std::vector<std::pair<unsigned int, types::global_dof_index> > &
Partitioner::ghost_targets() const
{
return ghost_targets_data;
inline
- const std::vector<std::pair<unsigned int, unsigned int> > &
+ const std::vector<std::pair<unsigned int, types::global_dof_index> > &
Partitioner::import_indices() const
{
return import_indices_data;
inline
- const std::vector<std::pair<unsigned int,unsigned int> > &
+ const std::vector<std::pair<unsigned int,types::global_dof_index> > &
Partitioner::import_targets() const
{
return import_targets_data;
* in hierarchical ordering is the ith deal cell starting
* from begin(0).
*/
- const std::vector<unsigned int> &
+ const std::vector<types::global_dof_index> &
get_p4est_tree_to_coarse_cell_permutation() const;
private:
* by p4est is located on geometrically
* close coarse grid cells.
*/
- std::vector<unsigned int> coarse_cell_to_p4est_tree_permutation;
- std::vector<unsigned int> p4est_tree_to_coarse_cell_permutation;
+ std::vector<types::global_dof_index> coarse_cell_to_p4est_tree_permutation;
+ std::vector<types::global_dof_index> p4est_tree_to_coarse_cell_permutation;
/**
* Return a pointer to the p4est
* these variables at a
* couple places anyway.
*/
- std::vector<unsigned int> coarse_cell_to_p4est_tree_permutation;
- std::vector<unsigned int> p4est_tree_to_coarse_cell_permutation;
+ std::vector<types::global_dof_index> coarse_cell_to_p4est_tree_permutation;
+ std::vector<types::global_dof_index> p4est_tree_to_coarse_cell_permutation;
/**
* dummy settings
+++ /dev/null
-// vector update of the form y += alpha*x with a scalar, x,y vectors
-void daxpy_ (const int* n, const double* alpha, const double* x,
- const int* incx, double* y, const int* incy);
-
-// General Matrix
-// Matrix vector product
-void dgemv_ (const char* trans, const int* m, const int* n,
- const double* alpha, const double* A, const int* lda,
- const double* x, const int* incx,
- const double* b, double* y, const int* incy);
-
-// Matrix matrix product
-void dgemm_ (const char* transa, const char* transb,
- const int* m, const int* n, const int* k,
- const double* alpha, const double* A, const int* lda,
- const double* B, const int* ldb,
- const double* beta, double* C, const int* ldc);
-
-// Compute LU factorization
-void dgetrf_ (const int* m, const int* n, double* A,
- const int* lda, int* ipiv, int* info);
-// Apply forward/backward substitution to LU factorization
-void dgetrs_ (const char* trans, const int* n, const int* nrhs,
- const double* A, const int* lda, const int* ipiv,
- double* b, const int* ldb, int* info);
-// Invert matrix from LU factorization
-void dgetri_ (const int* n, double* A, const int* lda,
- int* ipiv, double* inv_work, const int* lwork, int* info);
-
-// Compute QR factorization (Householder)
-void dgeqrf_ (const int* m, const int* n, double* A,
- const int* lda, double* tau, double* work,
- const int* lwork, int* info);
-// Compute vector Q^T B, where Q is the result from dgeqrf_
-void dormqr_ (const char* side, const char* trans, const int* m,
- const int* n, const int* k, const double* A, const int* lda,
- const double* tau, double* B, const int* ldb,
- double* work, const int* lwork, int* info);
-// Compute matrix Q from the result of dgeqrf_
-void dorgqr_ (const int* m, const int* n, const int* k, const double* A,
- const int* lda, const double* tau, double* work, const int* lwork,
- int* info);
-// Compute Rx = b
-void dtrtrs_ (const char* uplo, const char* trans,
- const char* diag, const int* n, const int* n_rhs,
- const double* A, const int* lda, double* B, const int* ldb,
- int* info);
-
-// Compute eigenvalues and vectors
-void dgeev_ (const char* jobvl, const char* jobvr,
- const int* n, double* A, const int* lda,
- double* lambda_re, double* lambda_im,
- double* vl, const int* ldvl,
- double* vr, const int* ldva,
- double* work, const int* lwork,
- int* info);
-// Compute eigenvalues and vectors (expert)
-void dgeevx_ (const char* balanc, const char* jobvl, const char* jobvr,
- const char* sense,
- const int* n, double* A, const int* lda,
- double* lambda_re, double* lambda_im,
- double* vl, const int* ldvl,
- double* vr, const int* ldvr,
- int* ilo, int* ihi,
- double* scale, double* abnrm,
- double* rconde, double* rcondv,
- double* work, const int* lwork,
- int* iwork, int* info);
-// Eigenvalues for a symmetric matrix
-void dsyev_ (const char *jobz, const char *uplo, const int *n,
- double *A, const int *lda, double *w,
- double *work, const int *lwork, int *info);
-// Same functionality as dsyev_ but with more options: E.g.
-// Compute only eigenvalues in a specific interval,
-// Compute only eigenvalues with a specific index,
-// Set tolerance for eigenvalue computation
-void dsyevx_ (const char* jobz, const char* range,
- const char* uplo, const int* n, double* A, const int* lda,
- const double* vl, const double* vu,
- const int* il, const int* iu, const double* abstol,
- int* m, double* w, double* z,
- const int* ldz, double* work, const int* lwork, int* iwork,
- int* ifail, int* info);
-// Generalized eigenvalues and eigenvectors of
-// 1: A*x = lambda*B*x; 2: A*B*x = lambda*x; 3: B*A*x = lambda*x
-// A and B are symmetric and B is definite
-void dsygv_ (const int* itype, const char* jobz, const char* uplo,
- const int* n, double* A, const int* lda, double* B,
- const int* ldb, double* w, double* work,
- const int* lwork, int* info);
-// Same functionality as dsygv_ but with more options: E.g.
-// Compute only eigenvalues in a specific interval,
-// Compute only eigenvalues with a specific index,
-// Set tolerance for eigenvalue computation
-void dsygvx_ (const int* itype, const char* jobz, const char* range,
- const char* uplo, const int* n, double* A, const int* lda,
- double* B, const int* ldb, const double* vl, const double* vu,
- const int* il, const int* iu, const double* abstol,
- int* m, double* w, double* z,
- const int* ldz, double* work, const int* lwork, int* iwork,
- int* ifail, int* info);
-
-// Compute singular value decomposition using divide and conquer
-void dgesdd_ (const char* jobz,
- const int* m, const int* n, double* A, const int* lda,
- double* s,
- double* u, const int* ldu,
- double* vt, const int* ldvt,
- double* work, const int* lwork,
- int* iwork,
- int* info);
-
-// Compute singular value decomposition
-void dgesvd_ (int* jobu, int* jobvt,
- const int* n, const int* m, double* A, const int* lda,
- double* s,
- double* u, const int* ldu,
- double* vt, const int* ldvt,
- double* work, const int* lwork,
- int* info);
-
-// Solve a least squares problem using SVD
-void dgelsd_ (const int* m, const int* n, const int* nrhs,
- const double* A, const int* lda,
- double* B, const int* ldb,
- double* s, const double* rcond,
- int* rank,
- double* work, const int* lwork, int* iwork,
- int* info);
-
-// Symmetric tridiagonal matrix
-void dstev_ (const char* jobz, const int* n,
- double* d, double* e, double* z,
- const int* ldz, double* work,
- int* info);
-
Assert (ierr == MPI_SUCCESS, ExcInternalError());
Number *read_position = import_data;
- std::vector<std::pair<size_type, size_type> >::const_iterator
+ std::vector<std::pair<unsigned int, size_type> >::const_iterator
my_imports = part.import_indices().begin();
// If add_ghost_data is set, add the imported
{
Assert (import_data != 0, ExcInternalError());
Number *write_position = import_data;
- std::vector<std::pair<size_type, size_type> >::const_iterator
+ std::vector<std::pair<unsigned int, size_type> >::const_iterator
my_imports = part.import_indices().begin();
for ( ; my_imports!=part.import_indices().end(); ++my_imports)
for (size_type j=my_imports->first; j<my_imports->second; j++)
* initialize each block with
* <tt>n[i]</tt> zero elements.
*/
- BlockVector (const std::vector<unsigned int> &n);
+ BlockVector (const std::vector<size_type> &n);
/**
* Constructor. Set the number of
* different blocks.
*/
template <typename InputIterator>
- BlockVector (const std::vector<unsigned int> &n,
- const InputIterator first,
- const InputIterator end);
+ BlockVector (const std::vector<size_type> &n,
+ const InputIterator first,
+ const InputIterator end);
/**
* Destructor. Clears memory
inline
- BlockVector::BlockVector (const size_type n_blocks,
- const MPI_Comm &communicator,
- const size_type block_size,
- const size_type local_size)
+ BlockVector::BlockVector (const unsigned int n_blocks,
+ const MPI_Comm &communicator,
+ const size_type block_size,
+ const size_type local_size)
{
reinit (n_blocks, communicator, block_size, local_size);
}
inline
void
- BlockVector::reinit (const size_type n_blocks,
- const MPI_Comm &communicator,
- const size_type block_size,
- const size_type local_size,
+ BlockVector::reinit (const unsigned int n_blocks,
+ const MPI_Comm &communicator,
+ const size_type block_size,
+ const size_type local_size,
const bool fast)
{
reinit(std::vector<size_type>(n_blocks, block_size),
BlockVector::reinit (const std::vector<IndexSet> ¶llel_partitioning,
const MPI_Comm &communicator)
{
- std::vector<unsigned int> sizes(parallel_partitioning.size());
+ std::vector<size_type> sizes(parallel_partitioning.size());
for (unsigned int i=0; i<parallel_partitioning.size(); ++i)
sizes[i] = parallel_partitioning[i].size();
const SparsityType &sparsity_pattern,
const std::vector<size_type> &local_rows_per_process,
const std::vector<size_type> &local_columns_per_process,
- const size_type this_process,
+ const unsigned int this_process,
const bool preset_nonzero_locations = true);
/**
unsigned int current_index = expanded_ghost_indices[0];
while (current_index >= first_index[current_proc+1])
current_proc++;
- std::vector<std::pair<unsigned int,unsigned int> > ghost_targets_temp
- (1, std::pair<unsigned int, unsigned int>(current_proc, 0));
+ std::vector<std::pair<unsigned int,types::global_dof_index> > ghost_targets_temp
+ (1, std::pair<unsigned int, types::global_dof_index>(current_proc, 0));
n_ghost_targets++;
for (unsigned int iterator=1; iterator<n_ghost_indices_data; ++iterator)
ghost_targets_temp[n_ghost_targets-1].second =
iterator - ghost_targets_temp[n_ghost_targets-1].second;
ghost_targets_temp.push_back(std::pair<unsigned int,
- unsigned int>(current_proc,iterator));
+ types::global_dof_index>(current_proc,iterator));
n_ghost_targets++;
}
}
MPI_INT, communicator);
// allocate memory for import data
- std::vector<std::pair<unsigned int,unsigned int> > import_targets_temp;
+ std::vector<std::pair<unsigned int,types::global_dof_index> > import_targets_temp;
n_import_indices_data = 0;
for (unsigned int i=0; i<n_procs; i++)
if (receive_buffer[i] > 0)
{
n_import_indices_data += receive_buffer[i];
import_targets_temp.push_back(std::pair<unsigned int,
- unsigned int> (i, receive_buffer[i]));
+ types::global_dof_index> (i, receive_buffer[i]));
}
import_targets_data = import_targets_temp;
}
// form of ranges
{
unsigned int last_index = numbers::invalid_unsigned_int-1;
- std::vector<std::pair<unsigned int,unsigned int> > compressed_import_indices;
+ std::vector<std::pair<unsigned int,types::global_dof_index> > compressed_import_indices;
for (unsigned int i=0; i<n_import_indices_data; i++)
{
Assert (expanded_import_indices[i] >= local_range_data.first &&
else
{
compressed_import_indices.push_back
- (std::pair<unsigned int,unsigned int>(new_index,new_index+1));
+ (std::pair<unsigned int,types::global_dof_index>(new_index,new_index+1));
}
last_index = new_index;
}
// sanity check
#ifdef DEBUG
- const unsigned int n_local_dofs = local_range_data.second-local_range_data.first;
+ const types::global_dof_index n_local_dofs = local_range_data.second-local_range_data.first;
for (unsigned int i=0; i<import_indices_data.size(); ++i)
{
AssertIndexRange (import_indices_data[i].first, n_local_dofs);
const std::vector<std::list<
std::pair<typename Triangulation<dim,spacedim>::active_cell_iterator,unsigned int> > >
& vertex_to_cell,
- const std::vector<unsigned int> &coarse_cell_to_p4est_tree_permutation,
+ const std::vector<types::global_dof_index> &coarse_cell_to_p4est_tree_permutation,
const bool set_vertex_info,
typename internal::p4est::types<dim>::connectivity *connectivity)
{
{
public:
RefineAndCoarsenList (const Triangulation<dim,spacedim> &triangulation,
- const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation,
+ const std::vector<types::global_dof_index> &p4est_tree_to_coarse_cell_permutation,
const types::subdomain_id my_subdomain,
typename internal::p4est::types<dim>::forest &forest);
template <int dim, int spacedim>
RefineAndCoarsenList<dim,spacedim>::
RefineAndCoarsenList (const Triangulation<dim,spacedim> &triangulation,
- const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation,
+ const std::vector<types::global_dof_index> &p4est_tree_to_coarse_cell_permutation,
const types::subdomain_id my_subdomain,
typename internal::p4est::types<dim>::forest &forest)
:
template <int dim, int spacedim>
- const std::vector<unsigned int> &
+ const std::vector<types::global_dof_index> &
Triangulation<dim, spacedim>::get_p4est_tree_to_coarse_cell_permutation() const
{
return p4est_tree_to_coarse_cell_permutation;
template <int spacedim>
static
void
- renumber_mg_dofs (const std::vector<unsigned int> &new_numbers,
+ renumber_mg_dofs (const std::vector<dealii::types::global_dof_index> &new_numbers,
const IndexSet &indices,
DoFHandler<1,spacedim> &dof_handler,
const unsigned int level,
}
- for (std::vector<unsigned int>::iterator i=dof_handler.mg_levels[level]->dof_object.dofs.begin();
+ for (std::vector<types::global_dof_index>::iterator i=dof_handler.mg_levels[level]->dof_object.dofs.begin();
i!=dof_handler.mg_levels[level]->dof_object.dofs.end(); ++i)
{
if (*i != DoFHandler<1>::invalid_dof_index)
template <int spacedim>
static
void
- renumber_mg_dofs (const std::vector<unsigned int> &new_numbers,
+ renumber_mg_dofs (const std::vector<dealii::types::global_dof_index> &new_numbers,
const IndexSet &indices,
DoFHandler<2,spacedim> &dof_handler,
const unsigned int level,
const_cast<dealii::Triangulation<2,spacedim> &>(dof_handler.get_tria()).load_user_flags (user_flags);
}
- for (std::vector<unsigned int>::iterator i=dof_handler.mg_levels[level]->dof_object.dofs.begin();
+ for (std::vector<types::global_dof_index>::iterator i=dof_handler.mg_levels[level]->dof_object.dofs.begin();
i!=dof_handler.mg_levels[level]->dof_object.dofs.end(); ++i)
{
if (*i != DoFHandler<2>::invalid_dof_index)
template <int spacedim>
static
void
- renumber_mg_dofs (const std::vector<unsigned int> &,
+ renumber_mg_dofs (const std::vector<dealii::types::global_dof_index> &,
const IndexSet &,
DoFHandler<3,spacedim> &,
const unsigned int ,
// this cell's dof_indices
// need to be sent to
// someone
- std::vector<unsigned int>
+ std::vector<dealii::types::global_dof_index>
local_dof_indices (dealii_cell->get_fe().dofs_per_cell);
dealii_cell->get_mg_dof_indices (local_dof_indices);
Assert(dealii_cell->level()==(int)level, ExcInternalError());
// update dof indices of cell
- std::vector<unsigned int>
+ std::vector<dealii::types::global_dof_index>
dof_indices (dealii_cell->get_fe().dofs_per_cell);
dealii_cell->get_mg_dof_indices(dof_indices);
communicate_dof_indices_on_marked_cells
(const DoFHandler<1,spacedim> &,
const std::map<unsigned int, std::set<dealii::types::subdomain_id> > &,
- const std::vector<unsigned int> &,
- const std::vector<unsigned int> &)
+ const std::vector<dealii::types::global_dof_index> &,
+ const std::vector<dealii::types::global_dof_index> &)
{
Assert (false, ExcNotImplemented());
}
communicate_dof_indices_on_marked_cells
(const DoFHandler<dim,spacedim> &dof_handler,
const std::map<unsigned int, std::set<dealii::types::subdomain_id> > &vertices_with_ghost_neighbors,
- const std::vector<unsigned int> &coarse_cell_to_p4est_tree_permutation,
- const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation)
+ const std::vector<dealii::types::global_dof_index> &coarse_cell_to_p4est_tree_permutation,
+ const std::vector<dealii::types::global_dof_index> &p4est_tree_to_coarse_cell_permutation)
{
#ifndef DEAL_II_WITH_P4EST
(void)vertices_with_ghost_neighbors;
communicate_mg_dof_indices_on_marked_cells
(const DoFHandler<1,spacedim> &,
const std::map<unsigned int, std::set<dealii::types::subdomain_id> > &,
- const std::vector<unsigned int> &,
- const std::vector<unsigned int> &,
+ const std::vector<dealii::types::global_dof_index> &,
+ const std::vector<dealii::types::global_dof_index> &,
const unsigned int)
{
Assert (false, ExcNotImplemented());
communicate_mg_dof_indices_on_marked_cells
(const DoFHandler<dim,spacedim> &dof_handler,
const std::map<unsigned int, std::set<dealii::types::subdomain_id> > &vertices_with_ghost_neighbors,
- const std::vector<unsigned int> &coarse_cell_to_p4est_tree_permutation,
- const std::vector<unsigned int> &p4est_tree_to_coarse_cell_permutation,
+ const std::vector<dealii::types::global_dof_index> &coarse_cell_to_p4est_tree_permutation,
+ const std::vector<dealii::types::global_dof_index> &p4est_tree_to_coarse_cell_permutation,
const unsigned int level)
{
#ifndef DEAL_II_WITH_P4EST
std::set<dealii::types::subdomain_id> senders;
if (level < tr->n_levels())
{
- std::vector<unsigned int> local_dof_indices;
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
typename DoFHandler<dim,spacedim>::level_cell_iterator
cell, endc = dof_handler.end(level);
//* 2. iterate over ghostcells and
//kill dofs that are not owned
//by us
- std::vector<unsigned int> renumbering(n_initial_local_dofs);
- for (unsigned int i=0; i<renumbering.size(); ++i)
+ std::vector<dealii::types::global_dof_index> renumbering(n_initial_local_dofs);
+ for (dealii::types::global_dof_index i=0; i<renumbering.size(); ++i)
renumbering[i] = i;
if (level<tr->n_levels())
{
- std::vector<unsigned int> local_dof_indices;
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
typename DoFHandler<dim,spacedim>::level_cell_iterator
cell = dof_handler.begin(level),
// make indices consecutive
number_cache.n_locally_owned_dofs = 0;
- for (std::vector<unsigned int>::iterator it=renumbering.begin();
+ for (std::vector<dealii::types::global_dof_index>::iterator it=renumbering.begin();
it!=renumbering.end(); ++it)
if (*it != DoFHandler<dim,spacedim>::invalid_dof_index)
*it = number_cache.n_locally_owned_dofs++;
.n_locally_owned_dofs_per_processor.begin()
+ tr->locally_owned_subdomain(),
0);
- for (std::vector<unsigned int>::iterator it=renumbering.begin();
+ for (std::vector<dealii::types::global_dof_index>::iterator it=renumbering.begin();
it!=renumbering.end(); ++it)
if (*it != DoFHandler<dim,spacedim>::invalid_dof_index)
(*it) += shift;
//check that we are really done
if (level < tr->n_levels())
{
- std::vector<unsigned int> local_dof_indices;
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
typename DoFHandler<dim,spacedim>::level_cell_iterator
cell, endc = dof_handler.end(level);
= (dynamic_cast<const parallel::distributed::Triangulation<dim,spacedim>*>
(&dof_handler.get_tria())))
{
- std::vector<unsigned int> local_dof_count = dofs_per_component;
+ std::vector<types::global_dof_index> local_dof_count = dofs_per_component;
MPI_Allreduce ( &local_dof_count[0], &dofs_per_component[0], n_target_components,
MPI_UNSIGNED, MPI_SUM, tria->get_communicator());
= (dynamic_cast<const parallel::distributed::Triangulation<DH::dimension,DH::space_dimension>*>
(&dof_handler.get_tria())))
{
- std::vector<unsigned int> local_dof_count = dofs_per_block;
+ std::vector<types::global_dof_index> local_dof_count = dofs_per_block;
MPI_Allreduce ( &local_dof_count[0], &dofs_per_block[0], n_target_blocks,
MPI_UNSIGNED, MPI_SUM, tria->get_communicator());
}
#define SPARSITY_FUNCTIONS(SparsityType) \
template void SparsityTools::distribute_sparsity_pattern<SparsityType> (SparsityType & csp, \
- const std::vector<unsigned int> & rows_per_cpu,\
+ const std::vector<size_type> & rows_per_cpu,\
const MPI_Comm & mpi_comm,\
const IndexSet & myrange)
if (boundary_values.size() == 0)
return;
- const std::pair<unsigned int, unsigned int> local_range
+ const std::pair<types::global_dof_index, types::global_dof_index> local_range
= matrix.local_range();
Assert (local_range == right_hand_side.local_range(),
ExcInternalError());
// figure out which rows of the matrix we
// have to eliminate on this processor
- std::vector<unsigned int> constrained_rows;
+ std::vector<types::global_dof_index> constrained_rows;
for (std::map<types::global_dof_index,double>::const_iterator
dof = boundary_values.begin();
dof != boundary_values.end();
// into blocks for the boundary values.
// To this end, generate a vector of
// maps with the respective indices.
- std::vector<std::map<unsigned int,double> > block_boundary_values(n_blocks);
+ std::vector<std::map<dealii::types::global_dof_index,double> > block_boundary_values(n_blocks);
{
int offset = 0, block=0;
for (std::map<types::global_dof_index,double>::const_iterator
const std::pair<types::global_dof_index, types::global_dof_index> local_range
= matrix.block(block_m,0).local_range();
- std::vector<unsigned int> constrained_rows;
+ std::vector<types::global_dof_index> constrained_rows;
for (std::map<types::global_dof_index,double>::const_iterator
dof = block_boundary_values[block_m].begin();
dof != block_boundary_values[block_m].end();
// figure out which rows of the matrix we
// have to eliminate on this processor
- std::vector<unsigned int> constrained_rows;
+ std::vector<types::global_dof_index> constrained_rows;
for (std::map<types::global_dof_index,double>::const_iterator
dof = boundary_values.begin();
dof != boundary_values.end();
const std::pair<types::global_dof_index, types::global_dof_index> local_range
= matrix.block(block_m,0).local_range();
- std::vector<unsigned int> constrained_rows;
+ std::vector<types::global_dof_index> constrained_rows;
for (std::map<types::global_dof_index,double>::const_iterator
dof = block_boundary_values[block_m].begin();
dof != block_boundary_values[block_m].end();