*
* The actual type of the sparsity pattern may be SparsityPattern,
* DynamicSparsityPattern, BlockSparsityPattern,
- * BlockDynamicSparsityPattern, BlockDynamicSetSparsityPattern,
- * BlockDynamicSimpleSparsityPattern, or any other class that satisfies
+ * BlockDynamicSparsityPattern, or any other class that satisfies
* similar requirements. It is assumed that the size of the sparsity pattern
* matches the number of degrees of freedom and that enough unused nonzero
* entries are left to fill the sparsity pattern. The nonzero entries
* contents of each of the subobjects. Previous content of this object is
* lost.
*/
- void copy_from (const BlockDynamicSparsityPattern &csp);
+ void copy_from (const BlockDynamicSparsityPattern &dsp);
};
const size_type chunk_size);
/**
- * Copy data from an object of type DynamicSparsityPattern,
- * DynamicSetSparsityPattern or DynamicSimpleSparsityPattern. Previous
+ * Copy data from an object of type DynamicSparsityPattern. Previous
* content of this object is lost, and the sparsity pattern is in compressed
* mode afterwards.
*/
template <typename SparsityType>
- void copy_from (const SparsityType &csp,
+ void copy_from (const SparsityType &dsp,
const size_type chunk_size);
/**
// a lot of entries to each row, which is best handled by the
// DynamicSparsityPattern class.
{
- DynamicSparsityPattern csp (m(), B.n());
- for (size_type i = 0; i < csp.n_rows(); ++i)
+ DynamicSparsityPattern dsp (m(), B.n());
+ for (size_type i = 0; i < dsp.n_rows(); ++i)
{
const size_type *rows = &sp_A.colnums[sp_A.rowstart[i]];
const size_type *const end_rows =
if (sp_B.n_rows() == sp_B.n_cols())
{
++new_cols;
- csp.add(i, col);
+ dsp.add(i, col);
}
- csp.add_entries (i, new_cols, end_new_cols, true);
+ dsp.add_entries (i, new_cols, end_new_cols, true);
}
}
- sp_C.copy_from (csp);
+ sp_C.copy_from (dsp);
}
// reinit matrix C from that information
// a lot of entries to each row, which is best handled by the
// DynamicSparsityPattern class.
{
- DynamicSparsityPattern csp (n(), B.n());
+ DynamicSparsityPattern dsp (n(), B.n());
for (size_type i = 0; i < sp_A.n_rows(); ++i)
{
const size_type *rows =
&sp_A.colnums[sp_A.rowstart[i]];
const size_type *const end_rows =
&sp_A.colnums[sp_A.rowstart[i+1]];
- // cast away constness to conform with csp.add_entries interface
+ // cast away constness to conform with dsp.add_entries interface
size_type *new_cols = const_cast<size_type *>
(&sp_B.colnums[sp_B.rowstart[i]]);
size_type *end_new_cols = const_cast<size_type *>
// if B has a diagonal, need to add that manually. this way,
// we maintain sortedness.
if (sp_B.n_rows() == sp_B.n_cols())
- csp.add(row, i);
+ dsp.add(row, i);
- csp.add_entries (row, new_cols, end_new_cols, true);
+ dsp.add_entries (row, new_cols, end_new_cols, true);
}
}
- sp_C.copy_from (csp);
+ sp_C.copy_from (dsp);
}
// reinit matrix C from that information
* compressed mode afterwards.
*/
template <typename CompressedSparsityType>
- void copy_from (const CompressedSparsityType &csp);
+ void copy_from (const CompressedSparsityType &dsp);
/**
/**
* Communciate rows in a compressed sparsity pattern over MPI.
*
- * @param csp is the sparsity pattern that has been built locally and for
+ * @param dsp is the sparsity pattern that has been built locally and for
* which we need to exchange entries with other processors to make sure that
* each processor knows all the elements of the rows of a matrix it stores
* and that may eventually be written to. This sparsity pattern will be
* @param myrange indicates the range of elements stored locally and should
* be the one used in the constructor of the
* DynamicSparsityPattern. This should be the locally relevant
- * set. Only rows contained in myrange are checked in csp for transfer.
+ * set. Only rows contained in myrange are checked in dsp for transfer.
* This function needs to be used with PETScWrappers::MPI::SparseMatrix for
* it to work correctly in a parallel computation.
*/
- template <class CSP_t>
- void distribute_sparsity_pattern(CSP_t &csp,
- const std::vector<typename CSP_t::size_type> &rows_per_cpu,
+ template <class DSP_t>
+ void distribute_sparsity_pattern(DSP_t &dsp,
+ const std::vector<typename DSP_t::size_type> &rows_per_cpu,
const MPI_Comm &mpi_comm,
const IndexSet &myrange);
* DoFHandler::locally_owned_dofs_per_processor and @p myrange are
* locally_relevant_dofs.
*/
- template <class CSP_t>
- void distribute_sparsity_pattern(CSP_t &csp,
+ template <class DSP_t>
+ void distribute_sparsity_pattern(DSP_t &dsp,
const std::vector<IndexSet> &owned_set_per_cpu,
const MPI_Comm &mpi_comm,
const IndexSet &myrange);
typedef ::dealii::SparsityPattern Sparsity;
typedef ::dealii::SparseMatrix<typename VECTOR::value_type> Matrix;
- template <class CSP, class DH>
- static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const CSP &csp, const DH &)
+ template <class DSP, class DH>
+ static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const DSP &dsp, const DH &)
{
- sparsity.copy_from (csp);
+ sparsity.copy_from (dsp);
matrix.reinit (sparsity);
}
};
typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
- template <class CSP, class DH>
- static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const CSP &csp, DH &dh)
+ template <class DSP, class DH>
+ static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const DSP &dsp, DH &dh)
{
matrix.reinit(dh.locally_owned_mg_dofs(level+1),
dh.locally_owned_mg_dofs(level),
- csp, MPI_COMM_WORLD, true);
+ dsp, MPI_COMM_WORLD, true);
}
};
typedef ::dealii::TrilinosWrappers::SparsityPattern Sparsity;
typedef ::dealii::TrilinosWrappers::SparseMatrix Matrix;
- template <class CSP, class DH>
- static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const CSP &csp, DH &dh)
+ template <class DSP, class DH>
+ static void reinit(Matrix &matrix, Sparsity &sparsity, int level, const DSP &dsp, DH &dh)
{
}
};
for (unsigned int level=sparsity.min_level();
level<=sparsity.max_level(); ++level)
{
- DynamicSparsityPattern c_sparsity(dof_handler.n_dofs(level));
- MGTools::make_flux_sparsity_pattern(dof_handler, c_sparsity, level);
- sparsity[level].copy_from(c_sparsity);
+ DynamicSparsityPattern dsp(dof_handler.n_dofs(level));
+ MGTools::make_flux_sparsity_pattern(dof_handler, dsp, level);
+ sparsity[level].copy_from(dsp);
matrix[level].reinit(sparsity[level]);
matrix_in[level].reinit(sparsity[level]);
matrix_out[level].reinit(sparsity[level]);
dealii::Vector<double> vec (dof.n_dofs());
SparsityPattern sparsity;
{
- DynamicSparsityPattern csp (dof.n_dofs(), dof.n_dofs());
- DoFTools::make_sparsity_pattern (dof, csp, constraints,
+ DynamicSparsityPattern dsp (dof.n_dofs(), dof.n_dofs());
+ DoFTools::make_sparsity_pattern (dof, dsp, constraints,
!constraints_are_compatible);
- sparsity.copy_from (csp);
+ sparsity.copy_from (dsp);
}
SparseMatrix<double> mass_matrix (sparsity);
dealii::Vector<double> tmp (mass_matrix.n());
return;
// set up sparsity structure
- DynamicSparsityPattern c_sparsity(dof.n_boundary_dofs (boundary_functions),
- dof.n_boundary_dofs (boundary_functions));
+ DynamicSparsityPattern dsp(dof.n_boundary_dofs (boundary_functions),
+ dof.n_boundary_dofs (boundary_functions));
DoFTools::make_boundary_sparsity_pattern (dof,
boundary_functions,
dof_to_boundary_mapping,
- c_sparsity);
+ dsp);
SparsityPattern sparsity;
- sparsity.copy_from(c_sparsity);
+ sparsity.copy_from(dsp);
if (use_constraints)
DoFTools::make_hanging_node_constraints (dof_handler, constraints);
constraints.close ();
- DynamicSparsityPattern csp (dof_handler.n_dofs(),
- dof_handler.n_dofs());
- DoFTools::make_sparsity_pattern (dof_handler, csp, constraints);
+ DynamicSparsityPattern dsp (dof_handler.n_dofs(),
+ dof_handler.n_dofs());
+ DoFTools::make_sparsity_pattern (dof_handler, dsp, constraints);
// submit the entries to the boost graph
- for (unsigned int row=0; row<csp.n_rows(); ++row)
- for (unsigned int col=0; col < csp.row_length(row); ++col)
- add_edge (row, csp.column_number (row, col), graph);
+ for (unsigned int row=0; row<dsp.n_rows(); ++row)
+ for (unsigned int col=0; col < dsp.row_length(row); ++col)
+ add_edge (row, dsp.column_number (row, col), graph);
}
boosttypes::graph_traits<boosttypes::Graph>::vertex_iterator ui, ui_end;
}
else
{
- DynamicSparsityPattern csp (dof_handler.n_dofs(),
- dof_handler.n_dofs(),
- dof_handler.locally_owned_dofs());
- DoFTools::make_sparsity_pattern (dof_handler, csp, constraints);
+ DynamicSparsityPattern dsp (dof_handler.n_dofs(),
+ dof_handler.n_dofs(),
+ dof_handler.locally_owned_dofs());
+ DoFTools::make_sparsity_pattern (dof_handler, dsp, constraints);
// If the index set is not complete, need to get indices in local
// index space.
if (dof_handler.locally_owned_dofs().n_elements() !=
dof_handler.locally_owned_dofs().size())
{
- // Create sparsity pattern from csp by transferring its indices to
+ // Create sparsity pattern from dsp by transferring its indices to
// processor-local index space and doing Cuthill-McKee there
std::vector<unsigned int> row_lengths(locally_owned.n_elements());
for (unsigned int i=0; i<locally_owned.n_elements(); ++i)
- row_lengths[i] = csp.row_length(locally_owned.nth_index_in_set(i));
+ row_lengths[i] = dsp.row_length(locally_owned.nth_index_in_set(i));
sparsity.reinit(locally_owned.n_elements(), locally_owned.n_elements(),
row_lengths);
std::vector<types::global_dof_index> row_entries;
const types::global_dof_index row = locally_owned.nth_index_in_set(i);
row_entries.resize(0);
for (DynamicSparsityPattern::row_iterator it =
- csp.row_begin(row); it != csp.row_end(row); ++it)
+ dsp.row_begin(row); it != dsp.row_end(row); ++it)
if (*it != row && locally_owned.is_element(*it))
row_entries.push_back(locally_owned.index_within_set(*it));
sparsity.add_entries(i, row_entries.begin(), row_entries.end(),
sparsity.compress();
}
else
- sparsity.copy_from(csp);
+ sparsity.copy_from(dsp);
}
// constraints are not needed anymore
}
else
{
- DynamicSparsityPattern csp (dof_handler.n_dofs(level),
- dof_handler.n_dofs(level));
- MGTools::make_sparsity_pattern (dof_handler, csp, level);
- sparsity.copy_from (csp);
+ DynamicSparsityPattern dsp (dof_handler.n_dofs(level),
+ dof_handler.n_dofs(level));
+ MGTools::make_sparsity_pattern (dof_handler, dsp, level);
+ sparsity.copy_from (dsp);
}
std::vector<types::global_dof_index> new_indices(sparsity.n_rows());
DoFHandler<dim> dof_handler(triangulation);
dof_handler.distribute_dofs(q1);
- DynamicSparsityPattern c_sparsity_pattern (dof_handler.n_dofs (),
- dof_handler.n_dofs ());
- DoFTools::make_sparsity_pattern (dof_handler, c_sparsity_pattern);
- c_sparsity_pattern.compress ();
+ DynamicSparsityPattern dsp (dof_handler.n_dofs (),
+ dof_handler.n_dofs ());
+ DoFTools::make_sparsity_pattern (dof_handler, dsp);
+ dsp.compress ();
SparsityPattern sparsity_pattern;
- sparsity_pattern.copy_from (c_sparsity_pattern);
+ sparsity_pattern.copy_from (dsp);
sparsity_pattern.compress ();
SparseMatrix<double> S(sparsity_pattern);
void
-BlockSparsityPattern::copy_from (const BlockDynamicSparsityPattern &csp)
+BlockSparsityPattern::copy_from (const BlockDynamicSparsityPattern &dsp)
{
// delete old content, set block
// sizes anew
- reinit (csp.n_block_rows(), csp.n_block_cols());
+ reinit (dsp.n_block_rows(), dsp.n_block_cols());
// copy over blocks
for (size_type i=0; i<n_block_rows(); ++i)
for (size_type j=0; j<n_block_cols(); ++j)
- block(i,j).copy_from (csp.block(i,j));
+ block(i,j).copy_from (dsp.block(i,j));
// and finally enquire their new
// sizes
template <typename SparsityType>
void
-ChunkSparsityPattern::copy_from (const SparsityType &csp,
+ChunkSparsityPattern::copy_from (const SparsityType &dsp,
const size_type chunk_size)
{
Assert (chunk_size > 0, ExcInvalidNumber (chunk_size));
this->chunk_size = chunk_size;
- rows = csp.n_rows();
- cols = csp.n_cols();
+ rows = dsp.n_rows();
+ cols = dsp.n_cols();
// simple case: just use the given sparsity pattern
if (chunk_size == 1)
{
- sparsity_pattern.copy_from (csp);
+ sparsity_pattern.copy_from (dsp);
return;
}
// create a temporary compressed sparsity pattern that collects all entries
// from the input sparsity pattern and then initialize the underlying small
// sparsity pattern
- const size_type m_chunks = (csp.n_rows()+chunk_size-1) / chunk_size,
- n_chunks = (csp.n_cols()+chunk_size-1) / chunk_size;
+ const size_type m_chunks = (dsp.n_rows()+chunk_size-1) / chunk_size,
+ n_chunks = (dsp.n_cols()+chunk_size-1) / chunk_size;
DynamicSparsityPattern temporary_sp(m_chunks, n_chunks);
- for (size_type row = 0; row<csp.n_rows(); ++row)
+ for (size_type row = 0; row<dsp.n_rows(); ++row)
{
- typename SparsityType::row_iterator col_num = csp.row_begin (row);
+ typename SparsityType::row_iterator col_num = dsp.row_begin (row);
const size_type reduced_row = row/chunk_size;
- for (; col_num != csp.row_end (row); ++col_num)
+ for (; col_num != dsp.row_end (row); ++col_num)
temporary_sp.add (reduced_row, *col_num/chunk_size);
}
-template <typename CSP>
+template <typename DSP>
void
-SparsityPattern::copy_from (const CSP &csp)
+SparsityPattern::copy_from (const DSP &dsp)
{
// first determine row lengths for each row. if the matrix is quadratic,
// then we might have to add an additional entry for the diagonal, if that
// is not yet present. as we have to call compress anyway later on, don't
// bother to check whether that diagonal entry is in a certain row or not
- const bool do_diag_optimize = (csp.n_rows() == csp.n_cols());
- std::vector<unsigned int> row_lengths (csp.n_rows());
- for (size_type i=0; i<csp.n_rows(); ++i)
+ const bool do_diag_optimize = (dsp.n_rows() == dsp.n_cols());
+ std::vector<unsigned int> row_lengths (dsp.n_rows());
+ for (size_type i=0; i<dsp.n_rows(); ++i)
{
- row_lengths[i] = csp.row_length(i);
- if (do_diag_optimize && !csp.exists(i,i))
+ row_lengths[i] = dsp.row_length(i);
+ if (do_diag_optimize && !dsp.exists(i,i))
++row_lengths[i];
}
- reinit (csp.n_rows(), csp.n_cols(), row_lengths);
+ reinit (dsp.n_rows(), dsp.n_cols(), row_lengths);
// now enter all the elements into the matrix, if there are any. note that
// if the matrix is quadratic, then we already have the diagonal element
// preallocated
if (n_rows() != 0 && n_cols() != 0)
- for (size_type row = 0; row<csp.n_rows(); ++row)
+ for (size_type row = 0; row<dsp.n_rows(); ++row)
{
size_type *cols = &colnums[rowstart[row]] + (do_diag_optimize ? 1 : 0);
- typename CSP::row_iterator col_num = csp.row_begin (row),
- end_row = csp.row_end (row);
+ typename DSP::row_iterator col_num = dsp.row_begin (row),
+ end_row = dsp.row_end (row);
for (; col_num != end_row; ++col_num)
{
}
// do not need to compress the sparsity pattern since we already have
- // allocated the right amount of data, and the CSP data is sorted, too.
+ // allocated the right amount of data, and the DSP data is sorted, too.
compressed = true;
}
}
#ifdef DEAL_II_WITH_MPI
- template <class CSP_t>
- void distribute_sparsity_pattern(CSP_t &csp,
- const std::vector<typename CSP_t::size_type> &rows_per_cpu,
+ template <class DSP_t>
+ void distribute_sparsity_pattern(DSP_t &dsp,
+ const std::vector<typename DSP_t::size_type> &rows_per_cpu,
const MPI_Comm &mpi_comm,
const IndexSet &myrange)
{
const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
- std::vector<typename CSP_t::size_type> start_index(rows_per_cpu.size()+1);
+ std::vector<typename DSP_t::size_type> start_index(rows_per_cpu.size()+1);
start_index[0]=0;
- for (typename CSP_t::size_type i=0; i<rows_per_cpu.size(); ++i)
+ for (typename DSP_t::size_type i=0; i<rows_per_cpu.size(); ++i)
start_index[i+1]=start_index[i]+rows_per_cpu[i];
- typedef std::map<typename CSP_t::size_type, std::vector<typename CSP_t::size_type> > map_vec_t;
+ typedef std::map<typename DSP_t::size_type, std::vector<typename DSP_t::size_type> > map_vec_t;
map_vec_t send_data;
{
unsigned int dest_cpu=0;
- typename CSP_t::size_type n_local_rel_rows = myrange.n_elements();
- for (typename CSP_t::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
+ typename DSP_t::size_type n_local_rel_rows = myrange.n_elements();
+ for (typename DSP_t::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
{
- typename CSP_t::size_type row=myrange.nth_index_in_set(row_idx);
+ typename DSP_t::size_type row=myrange.nth_index_in_set(row_idx);
//calculate destination CPU
while (row>=start_index[dest_cpu+1])
continue;
}
- typename CSP_t::size_type rlen = csp.row_length(row);
+ typename DSP_t::size_type rlen = dsp.row_length(row);
//skip empty lines
if (!rlen)
continue;
//save entries
- std::vector<typename CSP_t::size_type> &dst = send_data[dest_cpu];
+ std::vector<typename DSP_t::size_type> &dst = send_data[dest_cpu];
dst.push_back(rlen); // number of entries
dst.push_back(row); // row index
- for (typename CSP_t::size_type c=0; c<rlen; ++c)
+ for (typename DSP_t::size_type c=0; c<rlen; ++c)
{
//columns
- typename CSP_t::size_type column = csp.column_number(row, c);
+ typename DSP_t::size_type column = dsp.column_number(row, c);
dst.push_back(column);
}
}
}
//TODO: In the following, we read individual bytes and then reinterpret them
-// as typename CSP_t::size_type objects. this is error prone. use properly typed reads that
+// as typename DSP_t::size_type objects. this is error prone. use properly typed reads that
// match the write above
{
//receive
- std::vector<typename CSP_t::size_type> recv_buf;
+ std::vector<typename DSP_t::size_type> recv_buf;
for (unsigned int index=0; index<num_receive; ++index)
{
MPI_Status status;
Assert (status.MPI_TAG==124, ExcInternalError());
MPI_Get_count(&status, MPI_BYTE, &len);
- Assert( len%sizeof(typename CSP_t::size_type)==0, ExcInternalError());
+ Assert( len%sizeof(typename DSP_t::size_type)==0, ExcInternalError());
- recv_buf.resize(len/sizeof(typename CSP_t::size_type));
+ recv_buf.resize(len/sizeof(typename DSP_t::size_type));
MPI_Recv(&recv_buf[0], len, MPI_BYTE, status.MPI_SOURCE,
status.MPI_TAG, mpi_comm, &status);
- typename std::vector<typename CSP_t::size_type>::const_iterator ptr = recv_buf.begin();
- typename std::vector<typename CSP_t::size_type>::const_iterator end = recv_buf.end();
+ typename std::vector<typename DSP_t::size_type>::const_iterator ptr = recv_buf.begin();
+ typename std::vector<typename DSP_t::size_type>::const_iterator end = recv_buf.end();
while (ptr+1<end)
{
- typename CSP_t::size_type num=*(ptr++);
- typename CSP_t::size_type row=*(ptr++);
+ typename DSP_t::size_type num=*(ptr++);
+ typename DSP_t::size_type row=*(ptr++);
for (unsigned int c=0; c<num; ++c)
{
- csp.add(row, *ptr);
+ dsp.add(row, *ptr);
ptr++;
}
}
}
- template <class CSP_t>
- void distribute_sparsity_pattern(CSP_t &csp,
+ template <class DSP_t>
+ void distribute_sparsity_pattern(DSP_t &dsp,
const std::vector<IndexSet> &owned_set_per_cpu,
const MPI_Comm &mpi_comm,
const IndexSet &myrange)
{
const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
- typedef std::map<typename CSP_t::size_type, std::vector<typename CSP_t::size_type> > map_vec_t;
+ typedef std::map<typename DSP_t::size_type, std::vector<typename DSP_t::size_type> > map_vec_t;
map_vec_t send_data;
{
unsigned int dest_cpu=0;
- typename CSP_t::size_type n_local_rel_rows = myrange.n_elements();
- for (typename CSP_t::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
+ typename DSP_t::size_type n_local_rel_rows = myrange.n_elements();
+ for (typename DSP_t::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
{
- typename CSP_t::size_type row=myrange.nth_index_in_set(row_idx);
+ typename DSP_t::size_type row=myrange.nth_index_in_set(row_idx);
// calculate destination CPU, note that we start the search
// at last destination cpu, because even if the owned ranges
if (dest_cpu==myid)
continue;
- typename CSP_t::size_type rlen = csp.row_length(row);
+ typename DSP_t::size_type rlen = dsp.row_length(row);
//skip empty lines
if (!rlen)
continue;
//save entries
- std::vector<typename CSP_t::size_type> &dst = send_data[dest_cpu];
+ std::vector<typename DSP_t::size_type> &dst = send_data[dest_cpu];
dst.push_back(rlen); // number of entries
dst.push_back(row); // row index
- for (typename CSP_t::size_type c=0; c<rlen; ++c)
+ for (typename DSP_t::size_type c=0; c<rlen; ++c)
{
//columns
- typename CSP_t::size_type column = csp.column_number(row, c);
+ typename DSP_t::size_type column = dsp.column_number(row, c);
dst.push_back(column);
}
}
}
//TODO: In the following, we read individual bytes and then reinterpret them
-// as typename CSP_t::size_type objects. this is error prone. use properly typed reads that
+// as typename DSP_t::size_type objects. this is error prone. use properly typed reads that
// match the write above
{
//receive
- std::vector<typename CSP_t::size_type> recv_buf;
+ std::vector<typename DSP_t::size_type> recv_buf;
for (unsigned int index=0; index<num_receive; ++index)
{
MPI_Status status;
Assert (status.MPI_TAG==124, ExcInternalError());
MPI_Get_count(&status, MPI_BYTE, &len);
- Assert( len%sizeof(typename CSP_t::size_type)==0, ExcInternalError());
+ Assert( len%sizeof(typename DSP_t::size_type)==0, ExcInternalError());
- recv_buf.resize(len/sizeof(typename CSP_t::size_type));
+ recv_buf.resize(len/sizeof(typename DSP_t::size_type));
MPI_Recv(&recv_buf[0], len, MPI_BYTE, status.MPI_SOURCE,
status.MPI_TAG, mpi_comm, &status);
- typename std::vector<typename CSP_t::size_type>::const_iterator ptr = recv_buf.begin();
- typename std::vector<typename CSP_t::size_type>::const_iterator end = recv_buf.end();
+ typename std::vector<typename DSP_t::size_type>::const_iterator ptr = recv_buf.begin();
+ typename std::vector<typename DSP_t::size_type>::const_iterator end = recv_buf.end();
while (ptr+1<end)
{
- typename CSP_t::size_type num=*(ptr++);
- typename CSP_t::size_type row=*(ptr++);
+ typename DSP_t::size_type num=*(ptr++);
+ typename DSP_t::size_type row=*(ptr++);
for (unsigned int c=0; c<num; ++c)
{
- csp.add(row, *ptr);
+ dsp.add(row, *ptr);
ptr++;
}
}
//explicit instantiations
#define SPARSITY_FUNCTIONS(SparsityType) \
- template void SparsityTools::distribute_sparsity_pattern<SparsityType> (SparsityType & csp, \
+ template void SparsityTools::distribute_sparsity_pattern<SparsityType> (SparsityType & dsp, \
const std::vector<SparsityType::size_type> & rows_per_cpu,\
const MPI_Comm & mpi_comm,\
const IndexSet & myrange)
template void SparsityTools::distribute_sparsity_pattern
<BlockDynamicSparsityPattern>
-(BlockDynamicSparsityPattern &csp,
+(BlockDynamicSparsityPattern &dsp,
const std::vector<IndexSet> &owned_set_per_cpu,
const MPI_Comm &mpi_comm,
const IndexSet &myrange);
// distinguish between compressed sparsity types that define row_begin()
// and SparsityPattern that uses begin() as iterator type
template <typename Sparsity>
- void copy_row (const Sparsity &csp,
+ void copy_row (const Sparsity &dsp,
const size_type row,
std::vector<TrilinosWrappers::types::int_type> &row_indices)
{
- typename Sparsity::row_iterator col_num = csp.row_begin (row);
- for (size_type col=0; col_num != csp.row_end (row); ++col_num, ++col)
+ typename Sparsity::row_iterator col_num = dsp.row_begin (row);
+ for (size_type col=0; col_num != dsp.row_end (row); ++col_num, ++col)
row_indices[col] = *col_num;
}
- void copy_row (const dealii::SparsityPattern &csp,
+ void copy_row (const dealii::SparsityPattern &dsp,
const size_type row,
std::vector<TrilinosWrappers::types::int_type> &row_indices)
{
- dealii::SparsityPattern::iterator col_num = csp.begin (row);
- for (size_type col=0; col_num != csp.end (row); ++col_num, ++col)
+ dealii::SparsityPattern::iterator col_num = dsp.begin (row);
+ for (size_type col=0; col_num != dsp.end (row); ++col_num, ++col)
row_indices[col] = col_num->column();
}
}
// distinguish between compressed sparsity types that define row_begin()
// and SparsityPattern that uses begin() as iterator type
template <typename Sparsity>
- void copy_row (const Sparsity &csp,
+ void copy_row (const Sparsity &dsp,
const size_type row,
std::vector<TrilinosWrappers::types::int_type> &row_indices)
{
- typename Sparsity::row_iterator col_num = csp.row_begin (row);
- for (size_type col=0; col_num != csp.row_end (row); ++col_num, ++col)
+ typename Sparsity::row_iterator col_num = dsp.row_begin (row);
+ for (size_type col=0; col_num != dsp.row_end (row); ++col_num, ++col)
row_indices[col] = *col_num;
}
- void copy_row (const dealii::SparsityPattern &csp,
+ void copy_row (const dealii::SparsityPattern &dsp,
const size_type row,
std::vector<TrilinosWrappers::types::int_type> &row_indices)
{
- dealii::SparsityPattern::iterator col_num = csp.begin (row);
- for (size_type col=0; col_num != csp.end (row); ++col_num, ++col)
+ dealii::SparsityPattern::iterator col_num = dsp.begin (row);
+ for (size_type col=0; col_num != dsp.end (row); ++col_num, ++col)
row_indices[col] = col_num->column();
}
}
// increment dofs_per_cell
// since a useless diagonal
// element will be stored
- DynamicSparsityPattern csp (sizes[level+1],
- sizes[level]);
+ DynamicSparsityPattern dsp (sizes[level+1],
+ sizes[level]);
std::vector<types::global_dof_index> entries (dofs_per_cell);
for (typename DoFHandler<dim,spacedim>::cell_iterator cell=mg_dof.begin(level);
cell != mg_dof.end(level); ++cell)
for (unsigned int j=0; j<dofs_per_cell; ++j)
if (prolongation(i,j) != 0)
entries.push_back (dof_indices_parent[j]);
- csp.add_entries (dof_indices_child[i],
+ dsp.add_entries (dof_indices_child[i],
entries.begin(), entries.end());
}
}
internal::MatrixSelector<VECTOR>::reinit(*prolongation_matrices[level],
*prolongation_sparsities[level],
level,
- csp,
+ dsp,
mg_dof);
- csp.reinit(0,0);
+ dsp.reinit(0,0);
FullMatrix<double> prolongation;