From 1ecf30df7035617723d832f666938c0e64e7ddf5 Mon Sep 17 00:00:00 2001 From: David Wells Date: Sun, 6 Dec 2015 18:58:35 -0500 Subject: [PATCH] De-template distribute_sparsity_pattern. Formerly, when deal.II supported several dynamic sparsity patterns, these functions had a template argument and were instantiated for each distinct sparsity pattern type. Each function is currently only instantiated once (for DynamicSparsityPattern and BlockDynamicSparsityPattern, respectively) so the template is no longer necessary. --- doc/news/changes.h | 7 ++ include/deal.II/lac/sparsity_tools.h | 43 ++++++----- source/lac/sparsity_tools.cc | 110 +++++++++++---------------- 3 files changed, 73 insertions(+), 87 deletions(-) diff --git a/doc/news/changes.h b/doc/news/changes.h index c07f2f0250..6f91530328 100644 --- a/doc/news/changes.h +++ b/doc/news/changes.h @@ -469,6 +469,13 @@ inconvenience this causes.
    +
  1. Improved: both versions of distribute_sparsity_pattern are now plain, not + template, functions. This is not a breaking change because each function was + instantiated for exactly one template argument. +
    + (David Wells, 2015/12/06) +
  2. +
  3. Improved: The method parallel::distributed::Triangulation::fill_vertices_with_ghost_neighbors() that is used for distributing DoFs on parallel triangulations previously diff --git a/include/deal.II/lac/sparsity_tools.h b/include/deal.II/lac/sparsity_tools.h index 7e69289a2f..9a0a1d1938 100644 --- a/include/deal.II/lac/sparsity_tools.h +++ b/include/deal.II/lac/sparsity_tools.h @@ -19,8 +19,9 @@ #include #include -#include +#include #include +#include #include @@ -171,14 +172,14 @@ namespace SparsityTools #ifdef DEAL_II_WITH_MPI /** - * Communicate rows in a compressed sparsity pattern over MPI. + * Communicate rows in a dynamic sparsity pattern over MPI. * - * @param dsp is the sparsity pattern that has been built locally and for - * which we need to exchange entries with other processors to make sure that - * each processor knows all the elements of the rows of a matrix it stores - * and that may eventually be written to. This sparsity pattern will be - * changed as a result of this function: All entries in rows that belong to - * a different processor are sent to them and added there. + * @param dsp is a dynamic sparsity pattern that has been built locally and + * for which we need to exchange entries with other processors to make sure + * that each processor knows all the elements of the rows of a matrix it + * stores and that may eventually be written to. This sparsity pattern will + * be changed as a result of this function: All entries in rows that belong + * to a different processor are sent to them and added there. * * @param rows_per_cpu determines ownership of rows. * @@ -192,23 +193,23 @@ namespace SparsityTools * PETScWrappers::MPI::SparseMatrix for it to work correctly in a parallel * computation. */ - template - void distribute_sparsity_pattern(DSP_t &dsp, - const std::vector &rows_per_cpu, - const MPI_Comm &mpi_comm, - const IndexSet &myrange); + void distribute_sparsity_pattern + (DynamicSparsityPattern &dsp, + const std::vector &rows_per_cpu, + const MPI_Comm &mpi_comm, + const IndexSet &myrange); /** - * similar to the function above, but includes support for - * BlockDynamicSparsityPattern. @p owned_set_per_cpu is typically - * DoFHandler::locally_owned_dofs_per_processor and @p myrange are + * similar to the function above, but for BlockDynamicSparsityPattern + * instead. @p owned_set_per_cpu is typically + * DoFHandler::locally_owned_dofs_per_processor and @p myrange is * locally_relevant_dofs. */ - template - void distribute_sparsity_pattern(DSP_t &dsp, - const std::vector &owned_set_per_cpu, - const MPI_Comm &mpi_comm, - const IndexSet &myrange); + void distribute_sparsity_pattern + (BlockDynamicSparsityPattern &dsp, + const std::vector &owned_set_per_cpu, + const MPI_Comm &mpi_comm, + const IndexSet &myrange); #endif diff --git a/source/lac/sparsity_tools.cc b/source/lac/sparsity_tools.cc index 952856975e..a3837ac34b 100644 --- a/source/lac/sparsity_tools.cc +++ b/source/lac/sparsity_tools.cc @@ -522,29 +522,31 @@ namespace SparsityTools #ifdef DEAL_II_WITH_MPI - template - void distribute_sparsity_pattern(DSP_t &dsp, - const std::vector &rows_per_cpu, - const MPI_Comm &mpi_comm, - const IndexSet &myrange) + void distribute_sparsity_pattern + (DynamicSparsityPattern &dsp, + const std::vector &rows_per_cpu, + const MPI_Comm &mpi_comm, + const IndexSet &myrange) { const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm); - std::vector start_index(rows_per_cpu.size()+1); + std::vector start_index(rows_per_cpu.size()+1); start_index[0]=0; - for (typename DSP_t::size_type i=0; i > map_vec_t; + typedef std::map > + map_vec_t; map_vec_t send_data; { unsigned int dest_cpu=0; - typename DSP_t::size_type n_local_rel_rows = myrange.n_elements(); - for (typename DSP_t::size_type row_idx=0; row_idx=start_index[dest_cpu+1]) @@ -557,21 +559,21 @@ namespace SparsityTools continue; } - typename DSP_t::size_type rlen = dsp.row_length(row); + DynamicSparsityPattern::size_type rlen = dsp.row_length(row); //skip empty lines if (!rlen) continue; //save entries - std::vector &dst = send_data[dest_cpu]; + std::vector &dst = send_data[dest_cpu]; dst.push_back(rlen); // number of entries dst.push_back(row); // row index - for (typename DSP_t::size_type c=0; c send_to; send_to.reserve(send_data.size()); - for (typename map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it) + for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it) send_to.push_back(it->first); num_receive = @@ -596,7 +598,7 @@ namespace SparsityTools // send data { unsigned int idx=0; - for (typename map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx) + for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx) MPI_Isend(&(it->second[0]), it->second.size(), DEAL_II_DOF_INDEX_MPI_TYPE, @@ -608,7 +610,7 @@ namespace SparsityTools { //receive - std::vector recv_buf; + std::vector recv_buf; for (unsigned int index=0; index::const_iterator ptr = recv_buf.begin(); - typename std::vector::const_iterator end = recv_buf.end(); + std::vector::const_iterator ptr = recv_buf.begin(); + std::vector::const_iterator end = recv_buf.end(); while (ptr!=end) { - typename DSP_t::size_type num=*(ptr++); + DynamicSparsityPattern::size_type num=*(ptr++); Assert(ptr!=end, ExcInternalError()); - typename DSP_t::size_type row=*(ptr++); + DynamicSparsityPattern::size_type row=*(ptr++); for (unsigned int c=0; c - void distribute_sparsity_pattern(DSP_t &dsp, + void distribute_sparsity_pattern(BlockDynamicSparsityPattern &dsp, const std::vector &owned_set_per_cpu, - const MPI_Comm &mpi_comm, - const IndexSet &myrange) + const MPI_Comm &mpi_comm, + const IndexSet &myrange) { const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm); - typedef std::map > map_vec_t; + typedef std::map > + map_vec_t; map_vec_t send_data; { unsigned int dest_cpu=0; - typename DSP_t::size_type n_local_rel_rows = myrange.n_elements(); - for (typename DSP_t::size_type row_idx=0; row_idx &dst = send_data[dest_cpu]; + std::vector &dst = send_data[dest_cpu]; dst.push_back(rlen); // number of entries dst.push_back(row); // row index - for (typename DSP_t::size_type c=0; c send_to; send_to.reserve(send_data.size()); - for (typename map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it) + for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it) send_to.push_back(it->first); num_receive = @@ -717,7 +720,7 @@ namespace SparsityTools // send data { unsigned int idx=0; - for (typename map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx) + for (map_vec_t::iterator it=send_data.begin(); it!=send_data.end(); ++it, ++idx) MPI_Isend(&(it->second[0]), it->second.size(), DEAL_II_DOF_INDEX_MPI_TYPE, @@ -729,7 +732,7 @@ namespace SparsityTools { //receive - std::vector recv_buf; + std::vector recv_buf; for (unsigned int index=0; index::const_iterator ptr = recv_buf.begin(); - typename std::vector::const_iterator end = recv_buf.end(); + std::vector::const_iterator ptr = recv_buf.begin(); + std::vector::const_iterator end = recv_buf.end(); while (ptr!=end) { - typename DSP_t::size_type num=*(ptr++); + BlockDynamicSparsityPattern::size_type num=*(ptr++); Assert(ptr!=end, ExcInternalError()); - typename DSP_t::size_type row=*(ptr++); + BlockDynamicSparsityPattern::size_type row=*(ptr++); for (unsigned int c=0; c (SparsityType & dsp, \ - const std::vector & rows_per_cpu,\ - const MPI_Comm & mpi_comm,\ - const IndexSet & myrange) - -#ifdef DEAL_II_WITH_MPI -SPARSITY_FUNCTIONS(DynamicSparsityPattern); - -template void SparsityTools::distribute_sparsity_pattern - -(BlockDynamicSparsityPattern &dsp, - const std::vector &owned_set_per_cpu, - const MPI_Comm &mpi_comm, - const IndexSet &myrange); - -#endif - -#undef SPARSITY_FUNCTIONS - DEAL_II_NAMESPACE_CLOSE -- 2.39.5