#include <deal.II/base/config.h>
#include <deal.II/base/exceptions.h>
-#include <deal.II/lac/sparsity_pattern.h>
+#include <deal.II/lac/block_sparsity_pattern.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
+#include <deal.II/lac/sparsity_pattern.h>
#include <vector>
#ifdef DEAL_II_WITH_MPI
/**
- * Communicate rows in a compressed sparsity pattern over MPI.
+ * Communicate rows in a dynamic sparsity pattern over MPI.
*
- * @param dsp is the sparsity pattern that has been built locally and for
- * which we need to exchange entries with other processors to make sure that
- * each processor knows all the elements of the rows of a matrix it stores
- * and that may eventually be written to. This sparsity pattern will be
- * changed as a result of this function: All entries in rows that belong to
- * a different processor are sent to them and added there.
+ * @param dsp is a dynamic sparsity pattern that has been built locally and
+ * for which we need to exchange entries with other processors to make sure
+ * that each processor knows all the elements of the rows of a matrix it
+ * stores and that may eventually be written to. This sparsity pattern will
+ * be changed as a result of this function: All entries in rows that belong
+ * to a different processor are sent to them and added there.
*
* @param rows_per_cpu determines ownership of rows.
*
* PETScWrappers::MPI::SparseMatrix for it to work correctly in a parallel
* computation.
*/
- template <class DSP_t>
- void distribute_sparsity_pattern(DSP_t &dsp,
- const std::vector<typename DSP_t::size_type> &rows_per_cpu,
- const MPI_Comm &mpi_comm,
- const IndexSet &myrange);
+ void distribute_sparsity_pattern
+ (DynamicSparsityPattern &dsp,
+ const std::vector<DynamicSparsityPattern::size_type> &rows_per_cpu,
+ const MPI_Comm &mpi_comm,
+ const IndexSet &myrange);
/**
- * similar to the function above, but includes support for
- * BlockDynamicSparsityPattern. @p owned_set_per_cpu is typically
- * DoFHandler::locally_owned_dofs_per_processor and @p myrange are
+ * similar to the function above, but for BlockDynamicSparsityPattern
+ * instead. @p owned_set_per_cpu is typically
+ * DoFHandler::locally_owned_dofs_per_processor and @p myrange is
* locally_relevant_dofs.
*/
- template <class DSP_t>
- void distribute_sparsity_pattern(DSP_t &dsp,
- const std::vector<IndexSet> &owned_set_per_cpu,
- const MPI_Comm &mpi_comm,
- const IndexSet &myrange);
+ void distribute_sparsity_pattern
+ (BlockDynamicSparsityPattern &dsp,
+ const std::vector<IndexSet> &owned_set_per_cpu,
+ const MPI_Comm &mpi_comm,
+ const IndexSet &myrange);
#endif
#ifdef DEAL_II_WITH_MPI
- template <class DSP_t>
- void distribute_sparsity_pattern(DSP_t &dsp,
- const std::vector<typename DSP_t::size_type> &rows_per_cpu,
- const MPI_Comm &mpi_comm,
- const IndexSet &myrange)
+ void distribute_sparsity_pattern
+ (DynamicSparsityPattern &dsp,
+ const std::vector<DynamicSparsityPattern::size_type> &rows_per_cpu,
+ const MPI_Comm &mpi_comm,
+ const IndexSet &myrange)
{
const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
- std::vector<typename DSP_t::size_type> start_index(rows_per_cpu.size()+1);
+ std::vector<DynamicSparsityPattern::size_type> start_index(rows_per_cpu.size()+1);
start_index[0]=0;
- for (typename DSP_t::size_type i=0; i<rows_per_cpu.size(); ++i)
+ for (DynamicSparsityPattern::size_type i=0; i<rows_per_cpu.size(); ++i)
start_index[i+1]=start_index[i]+rows_per_cpu[i];
- typedef std::map<typename DSP_t::size_type, std::vector<typename DSP_t::size_type> > map_vec_t;
+ typedef std::map<DynamicSparsityPattern::size_type,
+ std::vector<DynamicSparsityPattern::size_type> >
+ map_vec_t;
map_vec_t send_data;
{
unsigned int dest_cpu=0;
- typename DSP_t::size_type n_local_rel_rows = myrange.n_elements();
- for (typename DSP_t::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
+ DynamicSparsityPattern::size_type n_local_rel_rows = myrange.n_elements();
+ for (DynamicSparsityPattern::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
{
- typename DSP_t::size_type row=myrange.nth_index_in_set(row_idx);
+ DynamicSparsityPattern::size_type row=myrange.nth_index_in_set(row_idx);
//calculate destination CPU
while (row>=start_index[dest_cpu+1])
continue;
}
- typename DSP_t::size_type rlen = dsp.row_length(row);
+ DynamicSparsityPattern::size_type rlen = dsp.row_length(row);
//skip empty lines
if (!rlen)
continue;
//save entries
- std::vector<typename DSP_t::size_type> &dst = send_data[dest_cpu];
+ std::vector<DynamicSparsityPattern::size_type> &dst = send_data[dest_cpu];
dst.push_back(rlen); // number of entries
dst.push_back(row); // row index
- for (typename DSP_t::size_type c=0; c<rlen; ++c)
+ for (DynamicSparsityPattern::size_type c=0; c<rlen; ++c)
{
//columns
- typename DSP_t::size_type column = dsp.column_number(row, c);
+ DynamicSparsityPattern::size_type column = dsp.column_number(row, c);
dst.push_back(column);
}
}
{
std::vector<unsigned int> send_to;
send_to.reserve(send_data.size());
- for (typename map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it)
+ for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it)
send_to.push_back(it->first);
num_receive =
// send data
{
unsigned int idx=0;
- for (typename map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
+ for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
MPI_Isend(&(it->second[0]),
it->second.size(),
DEAL_II_DOF_INDEX_MPI_TYPE,
{
//receive
- std::vector<typename DSP_t::size_type> recv_buf;
+ std::vector<DynamicSparsityPattern::size_type> recv_buf;
for (unsigned int index=0; index<num_receive; ++index)
{
MPI_Status status;
MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
status.MPI_TAG, mpi_comm, &status);
- typename std::vector<typename DSP_t::size_type>::const_iterator ptr = recv_buf.begin();
- typename std::vector<typename DSP_t::size_type>::const_iterator end = recv_buf.end();
+ std::vector<DynamicSparsityPattern::size_type>::const_iterator ptr = recv_buf.begin();
+ std::vector<DynamicSparsityPattern::size_type>::const_iterator end = recv_buf.end();
while (ptr!=end)
{
- typename DSP_t::size_type num=*(ptr++);
+ DynamicSparsityPattern::size_type num=*(ptr++);
Assert(ptr!=end, ExcInternalError());
- typename DSP_t::size_type row=*(ptr++);
+ DynamicSparsityPattern::size_type row=*(ptr++);
for (unsigned int c=0; c<num; ++c)
{
Assert(ptr!=end, ExcInternalError());
}
- template <class DSP_t>
- void distribute_sparsity_pattern(DSP_t &dsp,
+ void distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp,
const std::vector<IndexSet> &owned_set_per_cpu,
- const MPI_Comm &mpi_comm,
- const IndexSet &myrange)
+ const MPI_Comm &mpi_comm,
+ const IndexSet &myrange)
{
const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
- typedef std::map<typename DSP_t::size_type, std::vector<typename DSP_t::size_type> > map_vec_t;
+ typedef std::map<BlockDynamicSparsityPattern::size_type,
+ std::vector<BlockDynamicSparsityPattern::size_type> >
+ map_vec_t;
map_vec_t send_data;
{
unsigned int dest_cpu=0;
- typename DSP_t::size_type n_local_rel_rows = myrange.n_elements();
- for (typename DSP_t::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
+ BlockDynamicSparsityPattern::size_type n_local_rel_rows = myrange.n_elements();
+ for (BlockDynamicSparsityPattern::size_type row_idx=0; row_idx<n_local_rel_rows; ++row_idx)
{
- typename DSP_t::size_type row=myrange.nth_index_in_set(row_idx);
+ BlockDynamicSparsityPattern::size_type row=myrange.nth_index_in_set(row_idx);
// calculate destination CPU, note that we start the search
// at last destination cpu, because even if the owned ranges
if (dest_cpu==myid)
continue;
- typename DSP_t::size_type rlen = dsp.row_length(row);
+ BlockDynamicSparsityPattern::size_type rlen = dsp.row_length(row);
//skip empty lines
if (!rlen)
continue;
//save entries
- std::vector<typename DSP_t::size_type> &dst = send_data[dest_cpu];
+ std::vector<BlockDynamicSparsityPattern::size_type> &dst = send_data[dest_cpu];
dst.push_back(rlen); // number of entries
dst.push_back(row); // row index
- for (typename DSP_t::size_type c=0; c<rlen; ++c)
+ for (BlockDynamicSparsityPattern::size_type c=0; c<rlen; ++c)
{
//columns
- typename DSP_t::size_type column = dsp.column_number(row, c);
+ BlockDynamicSparsityPattern::size_type column = dsp.column_number(row, c);
dst.push_back(column);
}
}
{
std::vector<unsigned int> send_to;
send_to.reserve(send_data.size());
- for (typename map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it)
+ for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it)
send_to.push_back(it->first);
num_receive =
// send data
{
unsigned int idx=0;
- for (typename map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
+ for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx)
MPI_Isend(&(it->second[0]),
it->second.size(),
DEAL_II_DOF_INDEX_MPI_TYPE,
{
//receive
- std::vector<typename DSP_t::size_type> recv_buf;
+ std::vector<BlockDynamicSparsityPattern::size_type> recv_buf;
for (unsigned int index=0; index<num_receive; ++index)
{
MPI_Status status;
MPI_Recv(&recv_buf[0], len, DEAL_II_DOF_INDEX_MPI_TYPE, status.MPI_SOURCE,
status.MPI_TAG, mpi_comm, &status);
- typename std::vector<typename DSP_t::size_type>::const_iterator ptr = recv_buf.begin();
- typename std::vector<typename DSP_t::size_type>::const_iterator end = recv_buf.end();
+ std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator ptr = recv_buf.begin();
+ std::vector<BlockDynamicSparsityPattern::size_type>::const_iterator end = recv_buf.end();
while (ptr!=end)
{
- typename DSP_t::size_type num=*(ptr++);
+ BlockDynamicSparsityPattern::size_type num=*(ptr++);
Assert(ptr!=end, ExcInternalError());
- typename DSP_t::size_type row=*(ptr++);
+ BlockDynamicSparsityPattern::size_type row=*(ptr++);
for (unsigned int c=0; c<num; ++c)
{
Assert(ptr!=end, ExcInternalError());
if (requests.size())
MPI_Waitall(requests.size(), &requests[0], MPI_STATUSES_IGNORE);
}
-
-
#endif
}
-
-//explicit instantiations
-
-#define SPARSITY_FUNCTIONS(SparsityType) \
- template void SparsityTools::distribute_sparsity_pattern<SparsityType> (SparsityType & dsp, \
- const std::vector<SparsityType::size_type> & rows_per_cpu,\
- const MPI_Comm & mpi_comm,\
- const IndexSet & myrange)
-
-#ifdef DEAL_II_WITH_MPI
-SPARSITY_FUNCTIONS(DynamicSparsityPattern);
-
-template void SparsityTools::distribute_sparsity_pattern
-<BlockDynamicSparsityPattern>
-(BlockDynamicSparsityPattern &dsp,
- const std::vector<IndexSet> &owned_set_per_cpu,
- const MPI_Comm &mpi_comm,
- const IndexSet &myrange);
-
-#endif
-
-#undef SPARSITY_FUNCTIONS
-
DEAL_II_NAMESPACE_CLOSE