]> https://gitweb.dealii.org/ - dealii-svn.git/commitdiff
Introduce collective add_entries() functions to sparsity patterns.
authorkronbichler <kronbichler@0785d39b-7218-0410-832d-ea1e28bc413d>
Wed, 4 Mar 2009 19:29:52 +0000 (19:29 +0000)
committerkronbichler <kronbichler@0785d39b-7218-0410-832d-ea1e28bc413d>
Wed, 4 Mar 2009 19:29:52 +0000 (19:29 +0000)
git-svn-id: https://svn.dealii.org/trunk@18447 0785d39b-7218-0410-832d-ea1e28bc413d

deal.II/deal.II/include/dofs/dof_constraints.templates.h
deal.II/deal.II/source/dofs/dof_tools.cc
deal.II/lac/include/lac/block_matrix_base.h
deal.II/lac/include/lac/block_sparsity_pattern.h
deal.II/lac/include/lac/compressed_set_sparsity_pattern.h
deal.II/lac/include/lac/compressed_simple_sparsity_pattern.h
deal.II/lac/include/lac/compressed_sparsity_pattern.h
deal.II/lac/include/lac/sparsity_pattern.h
deal.II/lac/include/lac/trilinos_sparsity_pattern.h
deal.II/lac/source/sparsity_pattern.cc
deal.II/lac/source/trilinos_sparsity_pattern.cc

index b54a167a98bf68975204678e41959685a40c8991..db04ee81362c30b1d6a5424ebdc6de6df78d636d 100644 (file)
@@ -908,10 +908,9 @@ distribute_local_to_global (const FullMatrix<double>        &local_matrix,
                                       // for the scratch array.
       n_max_entries_per_row += n_local_dofs;
       if (column_indices.size() < n_max_entries_per_row)
-        {
-         column_indices.resize(n_max_entries_per_row);
-         column_values.resize(n_max_entries_per_row);
-       }
+       column_indices.resize(n_max_entries_per_row);
+      if (column_values.size() < n_max_entries_per_row)
+       column_values.resize(n_max_entries_per_row);
 
                                        // now distribute entries row by row
       for (unsigned int i=0; i<n_local_dofs; ++i)
@@ -1070,13 +1069,13 @@ distribute_local_to_global (const FullMatrix<double>        &local_matrix,
 
                                   // Check whether we did remain within the
                                   // arrays when adding elements into the
-                                  // scratch arrays. Moreover, there should
-                                  // be at least one element in the scratch
-                                  // array (the element diagonal).
+                                  // scratch arrays.
          Assert (col_counter <= n_max_entries_per_row, ExcInternalError());
 
                                   // Finally, write the scratch array into
-                                  // the sparse matrix.
+                                  // the sparse matrix. Of course, do it
+                                  // only in case there is anything to
+                                  // write...
          if (col_counter > 0)
            global_matrix.add(local_dof_indices[i], col_counter, 
                              &column_indices[0], &column_values[0], 
@@ -1117,35 +1116,32 @@ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
              ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs));
     }
 
+                                  // A lock that allows only one thread at
+                                  // time to go on in this function.
+  Threads::ThreadMutex::ScopedLock lock(mutex);
   if (lines.size() == 0)
     {
-      bool add_these_indices;
-      for (unsigned int i=0; i<n_local_dofs; ++i)
-        for (unsigned int j=0; j<n_local_dofs; ++j)
+      if (column_indices.size() < local_dof_indices.size())
+       column_indices.resize(local_dof_indices.size());
+
+                                  // if the dof_mask is not active, we can
+                                  // just submit the local_dof_indices
+                                  // array for one row after the other.
+      if (dof_mask_is_active == false)
+       for (unsigned int i=0; i<n_local_dofs; ++i)
+         sparsity_pattern.add_entries (local_dof_indices[i],
+                                       local_dof_indices.begin(),
+                                       local_dof_indices.end());
+      else
+       for (unsigned int i=0; i<n_local_dofs; ++i)
          {
-                                       // There is one complication when 
-                                       // we call this function with
-                                       // dof_mask argument: When the
-                                       // mask is empty, we cannot
-                                       // access the respective
-                                       // position, and it would even
-                                       // be inefficient to create
-                                       // such a mask. Hence, we need
-                                       // to first check whether
-                                       // there is some information
-                                       // in the mask at all.
-           if (dof_mask_is_active)
-             {
-               if (dof_mask[i][j] == true)
-                 add_these_indices = true;
-               else
-                 add_these_indices = false;
-             }
-           else
-             add_these_indices = true;
-           if (add_these_indices == true)
-             sparsity_pattern.add(local_dof_indices[i],
-                                  local_dof_indices[j]);
+           unsigned int col_counter = 0;
+           for (unsigned int j=0; j<n_local_dofs; ++j)
+             if (dof_mask[i][j] == true)
+               column_indices[col_counter++] = local_dof_indices[j];
+           sparsity_pattern.add_entries (local_dof_indices[i],
+                                         column_indices.begin(),
+                                         column_indices.begin()+col_counter);
          }
     }
   else
@@ -1158,6 +1154,7 @@ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
                                       // constraints, as above)
       Assert (sorted == true, ExcMatrixNotClosed());
 
+      unsigned int n_max_entries_per_row = 0;
       std::vector<const ConstraintLine *>
         constraint_lines (n_local_dofs,
                           static_cast<const ConstraintLine *>(0));
@@ -1173,9 +1170,15 @@ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
 
           if ((position != lines.end()) &&
               (position->line == local_dof_indices[i]))
-            constraint_lines[i] = &*position;
+           {
+             constraint_lines[i] = &*position;
+             n_max_entries_per_row += position->entries.size();
+           }
         }
 
+      n_max_entries_per_row += n_local_dofs;
+      if (column_indices.size() < n_max_entries_per_row)
+       column_indices.resize(n_max_entries_per_row);
 
       bool add_these_indices;
       for (unsigned int i=0; i<n_local_dofs; ++i)
@@ -1183,6 +1186,8 @@ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
           const ConstraintLine *position_i = constraint_lines[i];
           const bool is_constrained_i = (position_i != 0);
 
+         unsigned int col_counter = 0;
+
           for (unsigned int j=0; j<n_local_dofs; ++j)
            {
                                        // Again, first check whether
@@ -1206,21 +1211,22 @@ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
                const bool is_constrained_j = (position_j != 0);
 
                                               // if so requested, add the
-                                              // entry unconditionally, even
-                                              // if it is going to be
-                                              // constrained away
+                                              // entry unconditionally,
+                                              // even if it is going to be
+                                              // constrained away. note
+                                              // that we can use the array
+                                              // of column indices only for
+                                              // those elements that are
+                                              // going to get into the same
+                                              // row.
                if (keep_constrained_entries == true)
-                 sparsity_pattern.add (local_dof_indices[i],
-                                       local_dof_indices[j]);
+                 column_indices[col_counter++] = local_dof_indices[j];
 
 
                if ((is_constrained_i == false) &&
                    (is_constrained_j == false) &&
                    (keep_constrained_entries == false))
-                 {
-                   sparsity_pattern.add (local_dof_indices[i],
-                                         local_dof_indices[j]);
-                 }
+                 column_indices[col_counter++] = local_dof_indices[j];
                else if ((is_constrained_i == true) &&
                         (is_constrained_j == false))
                  {
@@ -1232,8 +1238,7 @@ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
                         (is_constrained_j == true))
                  {
                    for (unsigned int q=0; q<position_j->entries.size(); ++q)
-                     sparsity_pattern.add (local_dof_indices[i],
-                                           position_j->entries[q].first);
+                     column_indices[col_counter++] = position_j->entries[q].first;
                  }
                else if ((is_constrained_i == true) &&
                         (is_constrained_j == true))
@@ -1243,9 +1248,8 @@ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
                        sparsity_pattern.add (position_i->entries[p].first,
                                              position_j->entries[q].first);
 
-                   if (i == j)
-                     sparsity_pattern.add (local_dof_indices[i],
-                                           local_dof_indices[i]);
+                   if (i == j && keep_constrained_entries == false)
+                     column_indices[col_counter++] = local_dof_indices[i];
                  }
                else
                                                 // the only case that can
@@ -1257,6 +1261,20 @@ add_entries_local_to_global (const std::vector<unsigned int> &local_dof_indices,
                          ExcInternalError());
              }
            }
+
+                                  // Check whether we did remain within the
+                                  // arrays when adding elements into the
+                                  // scratch arrays.
+         Assert (col_counter <= n_max_entries_per_row, ExcInternalError());
+
+                                  // Finally, write the scratch array into
+                                  // the sparsity pattern. Of course, do it
+                                  // only in case there is anything to
+                                  // write...
+         if (col_counter > 0)
+           sparsity_pattern.add_entries (local_dof_indices[i],
+                                         column_indices.begin(),
+                                         column_indices.begin()+col_counter);
         }
     }
 }
index 023e7dd28807109a4b8611ee1d9230fc5bdaecaf..077b493fc4af2e445d97ad972b04f51f23d9018f 100644 (file)
@@ -77,13 +77,18 @@ DoFTools::make_sparsity_pattern (const DH               &dof,
                                   // only have to do the work if the
                                   // current cell is owned by the calling
                                   // processor. Otherwise, just continue.
+                                  // 
+                                  // TODO: here, we should actually check
+                                  // the communicator of the sparsity
+                                  // pattern, not MPI_COMM_WORLD as the
+                                  // Utilities function does!
   for (; cell!=endc; ++cell) 
 #ifdef DEAL_II_USE_TRILINOS
     if ((types_are_equal<SparsityPattern,TrilinosWrappers::SparsityPattern>::value
         ||
         types_are_equal<SparsityPattern,TrilinosWrappers::BlockSparsityPattern>::value)
        &&
-       cell->subdomain_id() != 
+       cell->subdomain_id() !=
        Utilities::Trilinos::get_this_mpi_process(Utilities::Trilinos::comm_world()))
       continue;
     else
@@ -265,9 +270,9 @@ DoFTools::make_sparsity_pattern (
           cell_row->get_dof_indices (local_dof_indices_row);
           cell_col->get_dof_indices (local_dof_indices_col);
           for (unsigned int i=0; i<dofs_per_cell_row; ++i)
-            for (unsigned int j=0; j<dofs_per_cell_col; ++j)
-              sparsity.add (local_dof_indices_row[i],
-                            local_dof_indices_col[j]);
+           sparsity.add_entries (local_dof_indices_row[i],
+                                 local_dof_indices_col.begin(),
+                                 local_dof_indices_col.end());
         }
       else if (cell_row->has_children())
         {
@@ -288,9 +293,9 @@ DoFTools::make_sparsity_pattern (
               cell_row_child->get_dof_indices (local_dof_indices_row);
               cell_col->get_dof_indices (local_dof_indices_col);
               for (unsigned int i=0; i<dofs_per_cell_row; ++i)
-                for (unsigned int j=0; j<dofs_per_cell_col; ++j)
-                  sparsity.add (local_dof_indices_row[i],
-                                local_dof_indices_col[j]);
+               sparsity.add_entries (local_dof_indices_row[i],
+                                     local_dof_indices_col.begin(),
+                                     local_dof_indices_col.end());
             }
         }
       else
@@ -312,9 +317,9 @@ DoFTools::make_sparsity_pattern (
               cell_row->get_dof_indices (local_dof_indices_row);
               cell_col_child->get_dof_indices (local_dof_indices_col);
               for (unsigned int i=0; i<dofs_per_cell_row; ++i)
-                for (unsigned int j=0; j<dofs_per_cell_col; ++j)
-                  sparsity.add (local_dof_indices_row[i],
-                                local_dof_indices_col[j]);
+               sparsity.add_entries (local_dof_indices_row[i],
+                                     local_dof_indices_col.begin(),
+                                     local_dof_indices_col.end());
             }
         }
     }
@@ -361,9 +366,9 @@ DoFTools::make_boundary_sparsity_pattern (
          = dof_to_boundary_mapping[cell->vertex_dof_index(direction,i)];
 
       for (unsigned int i=0; i<dofs_per_vertex; ++i)
-       for (unsigned int j=0; j<dofs_per_vertex; ++j)
-         sparsity.add (boundary_dof_boundary_indices[i],
-                       boundary_dof_boundary_indices[j]);
+       sparsity.add_entries (boundary_dof_boundary_indices[i],
+                             boundary_dof_boundary_indices.begin(),
+                             boundary_dof_boundary_indices.end());
     };
 }
 
@@ -582,13 +587,14 @@ DoFTools::make_flux_sparsity_pattern (
                      sub_neighbor->get_dof_indices (dofs_on_other_cell);
 
                       for (unsigned int i=0; i<n_dofs_on_this_cell; ++i)
-                        for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
-                          {
-                            sparsity.add (dofs_on_this_cell[i],
-                                          dofs_on_other_cell[j]);
-                            sparsity.add (dofs_on_other_cell[j],
-                                          dofs_on_this_cell[i]);
-                          }
+                       sparsity.add_entries (dofs_on_this_cell[i],
+                                             dofs_on_other_cell.begin(),
+                                             dofs_on_other_cell.end());
+                     for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
+                       sparsity.add_entries (dofs_on_other_cell[j],
+                                             dofs_on_this_cell.begin(),
+                                             dofs_on_this_cell.end());
+
                      sub_neighbor->face(neighbor_face)->set_user_flag ();
                    }
                }
@@ -605,14 +611,15 @@ DoFTools::make_flux_sparsity_pattern (
                   dofs_on_other_cell.resize (n_dofs_on_neighbor);
 
                   neighbor->get_dof_indices (dofs_on_other_cell);
+
                  for (unsigned int i=0; i<n_dofs_on_this_cell; ++i)
-                    for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
-                      {
-                        sparsity.add (dofs_on_this_cell[i],
-                                      dofs_on_other_cell[j]);
-                        sparsity.add (dofs_on_other_cell[j],
-                                      dofs_on_this_cell[i]);
-                      }
+                   sparsity.add_entries (dofs_on_this_cell[i],
+                                         dofs_on_other_cell.begin(),
+                                         dofs_on_other_cell.end());
+                 for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
+                   sparsity.add_entries (dofs_on_other_cell[j],
+                                         dofs_on_this_cell.begin(),
+                                         dofs_on_this_cell.end());
                  neighbor->face(neighbor_face)->set_user_flag (); 
                }
            } 
@@ -650,8 +657,9 @@ DoFTools::make_flux_sparsity_pattern (
       local_dof_indices.resize (n_dofs_on_this_cell);
       cell->get_dof_indices (local_dof_indices);
       for (unsigned int i=0; i<n_dofs_on_this_cell; ++i)
-       for (unsigned int j=0; j<n_dofs_on_this_cell; ++j)
-         sparsity.add (local_dof_indices[i], local_dof_indices[j]);
+       sparsity.add_entries (local_dof_indices[i],
+                             local_dof_indices.begin(),
+                             local_dof_indices.end());
 
                                       // then do the same for the up
                                       // to 2 neighbors
@@ -671,8 +679,9 @@ DoFTools::make_flux_sparsity_pattern (
 
                                             // compute couplings
            for (unsigned int i=0; i<n_dofs_on_this_cell; ++i)
-             for (unsigned int j=0; j<n_dofs_on_neighbor; ++j)
-               sparsity.add (local_dof_indices[i], neighbor_dof_indices[j]);
+             sparsity.add_entries (local_dof_indices[i],
+                                   neighbor_dof_indices.begin(),
+                                   neighbor_dof_indices.end());
          };
     };
 }
@@ -733,6 +742,8 @@ DoFTools::dof_couplings_from_component_couplings
 
 
 
+// TODO: look whether one can employ collective add operations in sparsity
+// pattern in this function.
 template <class DH, class SparsityPattern>
 void
 DoFTools::make_flux_sparsity_pattern (
index 748bd12c3e61828177a337bf6f65b7d429a129e6..9e28ce92185e593749eb632b32a7f19e71e66a59 100644 (file)
@@ -1150,27 +1150,6 @@ class BlockMatrixBase : public Subscriptor
     void Tvmult_nonblock_nonblock (VectorType       &dst,
                                   const VectorType &src) const;
     
-    
-                                    /**
-                                     * Make the iterator class a
-                                     * friend. We have to work around
-                                     * a compiler bug here again.
-                                     */
-#ifndef DEAL_II_NAMESP_TEMPL_FRIEND_BUG
-    template <typename, bool>
-    friend class BlockMatrixIterators::Accessor;
-
-    template <typename>
-    friend class MatrixIterator;
-#else
-    typedef BlockMatrixIterators::Accessor<BlockMatrixBase, true> ConstAccessor;
-    typedef BlockMatrixIterators::Accessor<BlockMatrixBase, false> Accessor;
-    friend class ConstAccessor;
-    
-    friend class const_iterator;
-#endif
-
-
   private:
                                        /**
                                        * Temporary vector for counting the
@@ -1196,6 +1175,26 @@ class BlockMatrixBase : public Subscriptor
                                        */
     std::vector<std::vector<double> > column_values;
 
+    
+                                    /**
+                                     * Make the iterator class a
+                                     * friend. We have to work around
+                                     * a compiler bug here again.
+                                     */
+#ifndef DEAL_II_NAMESP_TEMPL_FRIEND_BUG
+    template <typename, bool>
+    friend class BlockMatrixIterators::Accessor;
+
+    template <typename>
+    friend class MatrixIterator;
+#else
+    typedef BlockMatrixIterators::Accessor<BlockMatrixBase, true> ConstAccessor;
+    typedef BlockMatrixIterators::Accessor<BlockMatrixBase, false> Accessor;
+    friend class ConstAccessor;
+    
+    friend class const_iterator;
+#endif
+
 };
 
 
index 45b914cba8e3af9b50b209fb28b04dde75914a71..eb0df50d7b4b0826f447ff9b2132bd7e79d0968b 100644 (file)
@@ -280,7 +280,27 @@ class BlockSparsityPatternBase : public Subscriptor
                                      * and then relays to that block.
                                      */
     void add (const unsigned int i, const unsigned int j);
-    
+
+                                    /**
+                                     * Add several nonzero entries to the
+                                     * specified matrix row.  This function
+                                     * may only be called for
+                                     * non-compressed sparsity patterns.
+                                     *
+                                     * If some of the entries already
+                                     * exist, nothing bad happens.
+                                     *
+                                     * This function simply finds out to
+                                     * which blocks <tt>(row,col)</tt> for
+                                     * <tt>col</tt> in the iterator range
+                                     * belong and then relays to those
+                                     * blocks.
+                                     */
+    template <typename ForwardIterator>
+    void add_entries (const unsigned int row, 
+                     ForwardIterator    begin,
+                     ForwardIterator    end);
+
                                     /**
                                      * Return number of rows of this
                                      * matrix, which equals the
@@ -421,6 +441,23 @@ class BlockSparsityPatternBase : public Subscriptor
                                      */
     BlockIndices    column_indices;
 
+  private:
+                                       /**
+                                       * Temporary vector for counting the
+                                       * elements written into the
+                                       * individual blocks when doing a
+                                       * collective add or set.
+                                       */
+    std::vector<unsigned int> counter_within_block; 
+
+                                       /**
+                                       * Temporary vector for column
+                                       * indices on each block when writing
+                                       * local to global data on each
+                                       * sparse matrix.
+                                       */
+    std::vector<std::vector<unsigned int> > block_column_indices; 
+
                                     /**
                                      * Make the block sparse matrix a
                                      * friend, so that it can use our
@@ -1046,6 +1083,91 @@ BlockSparsityPatternBase<SparsityPatternBase>::add (const unsigned int i,
 
 
 
+template <class SparsityPatternBase>
+template <typename ForwardIterator>
+void
+BlockSparsityPatternBase<SparsityPatternBase>::add_entries (const unsigned int row,
+                                                           ForwardIterator    begin,
+                                                           ForwardIterator    end)
+{
+                                  // Resize scratch arrays
+  if (block_column_indices.size() < this->n_block_cols())
+    {
+      block_column_indices.resize (this->n_block_cols());
+      counter_within_block.resize (this->n_block_cols());
+    }
+
+  const unsigned int n_cols = static_cast<unsigned int>(end - begin);
+
+                                  // Resize sub-arrays to n_cols. This
+                                  // is a bit wasteful, but we resize
+                                  // only a few times (then the maximum
+                                  // row length won't increase that
+                                  // much any more). At least we know
+                                  // that all arrays are going to be of
+                                  // the same size, so we can check
+                                  // whether the size of one is large
+                                  // enough before actually going
+                                  // through all of them.
+  if (block_column_indices[0].size() < n_cols)
+    for (unsigned int i=0; i<this->n_block_cols(); ++i)
+      block_column_indices[i].resize(n_cols);
+
+                                  // Reset the number of added elements
+                                  // in each block to zero.
+  for (unsigned int i=0; i<this->n_block_cols(); ++i)
+    counter_within_block[i] = 0;
+
+                                  // Go through the column indices to
+                                  // find out which portions of the
+                                  // values should be set in which
+                                  // block of the matrix. We need to
+                                  // touch all the data, since we can't
+                                  // be sure that the data of one block
+                                  // is stored contiguously (in fact,
+                                  // indices will be intermixed when it
+                                  // comes from an element matrix).
+  for (ForwardIterator it = begin; it != end; ++it)
+    {
+      const unsigned int col = *it;
+
+      const std::pair<unsigned int, unsigned int>
+       col_index = this->column_indices.global_to_local(col);
+
+      const unsigned int local_index = counter_within_block[col_index.first]++;
+
+      block_column_indices[col_index.first][local_index] = col_index.second;
+    }
+
+#ifdef DEBUG
+                                  // If in debug mode, do a check whether
+                                  // the right length has been obtained.
+  unsigned int length = 0;
+  for (unsigned int i=0; i<this->n_block_cols(); ++i)
+    length += counter_within_block[i];
+  Assert (length == n_cols, ExcInternalError());
+#endif
+
+                                  // Now we found out about where the
+                                  // individual columns should start and
+                                  // where we should start reading out
+                                  // data. Now let's write the data into
+                                  // the individual blocks!
+  const std::pair<unsigned int,unsigned int> 
+    row_index = this->row_indices.global_to_local (row);
+  for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
+    {
+      if (counter_within_block[block_col] == 0)
+       continue;
+      sub_objects[row_index.first][block_col]->
+       add_entries (row_index.second,
+                    block_column_indices[block_col].begin(),
+                    block_column_indices[block_col].begin()+counter_within_block[block_col]);
+    }
+}
+
+
+
 template <class SparsityPatternBase>
 inline
 bool
index 584f6580a4b596b215e6b93336610c9385585309..059b8049f150289b19de88594591107cc38f4063 100644 (file)
@@ -216,7 +216,18 @@ class CompressedSetSparsityPattern : public Subscriptor
                                      */
     void add (const unsigned int i, 
              const unsigned int j);
-    
+
+                                    /**
+                                     * Add several nonzero entries to the
+                                     * specified row of the matrix. If the
+                                     * entries already exist, nothing bad
+                                     * happens.
+                                     */
+    template <typename ForwardIterator>
+    void add_entries (const unsigned int row, 
+                     ForwardIterator    begin,
+                     ForwardIterator    end);
+
                                     /**
                                      * Check if a value at a certain
                                      * position may be non-zero.
@@ -375,6 +386,14 @@ class CompressedSetSparsityPattern : public Subscriptor
                                           * this line.
                                           */
         void add (const unsigned int col_num);
+
+                                         /**
+                                          * Add the columns specified by the
+                                          * iterator range to this line.
+                                          */
+        template <typename ForwardIterator>
+       void add_entries (ForwardIterator begin,
+                         ForwardIterator end);
     };
     
         
@@ -405,6 +424,19 @@ CompressedSetSparsityPattern::Line::add (const unsigned int j)
 
 
 
+template <typename ForwardIterator>
+inline
+void
+CompressedSetSparsityPattern::Line::add_entries (ForwardIterator begin,
+                                                ForwardIterator end)
+{
+  // right now, just forward to the individual function.
+  for (ForwardIterator it = begin; it != end; ++it)
+    add (*it);
+}
+
+
+
 inline
 unsigned int
 CompressedSetSparsityPattern::n_rows () const
@@ -436,6 +468,20 @@ CompressedSetSparsityPattern::add (const unsigned int i,
 
 
 
+template <typename ForwardIterator>
+inline
+void
+CompressedSetSparsityPattern::add_entries (const unsigned int row,
+                                          ForwardIterator begin,
+                                          ForwardIterator end)
+{
+  Assert (row < rows, ExcIndexRange (row, 0, rows));
+
+  lines[row].add_entries (begin, end);
+}
+
+
+
 inline
 unsigned int
 CompressedSetSparsityPattern::row_length (const unsigned int row) const
index 69b99aa2b8fb709b56a9de5f4aa5d989c379048b..57033f93d65eaa70b18cc010073a8a7369b56bb2 100644 (file)
@@ -186,6 +186,17 @@ class CompressedSimpleSparsityPattern : public Subscriptor
     void add (const unsigned int i,
              const unsigned int j);
 
+                                    /**
+                                     * Add several nonzero entries to the
+                                     * specified row of the matrix. If the
+                                     * entries already exist, nothing bad
+                                     * happens.
+                                     */
+    template <typename ForwardIterator>
+    void add_entries (const unsigned int row, 
+                     ForwardIterator    begin,
+                     ForwardIterator    end);
+
                                     /**
                                      * Check if a value at a certain
                                      * position may be non-zero.
@@ -347,6 +358,14 @@ class CompressedSimpleSparsityPattern : public Subscriptor
                                           * this line.
                                           */
         void add (const unsigned int col_num);
+
+                                         /**
+                                          * Add the columns specified by the
+                                          * iterator range to this line.
+                                          */
+        template <typename ForwardIterator>
+       void add_entries (ForwardIterator begin,
+                         ForwardIterator end);
     };
 
 
@@ -393,6 +412,77 @@ CompressedSimpleSparsityPattern::Line::add (const unsigned int j)
 
 
 
+template <typename ForwardIterator>
+inline
+void
+CompressedSimpleSparsityPattern::Line::add_entries (ForwardIterator begin,
+                                                   ForwardIterator end)
+{
+  if (end - begin <= 0)
+    return;
+
+  ForwardIterator my_it = begin;
+  const unsigned int n_cols = static_cast<unsigned int>(end - begin);
+
+  // If necessary, increase the size of the array. In order to avoid
+  // allocating just a few entries every time, use five elements at a time
+  // at least.
+  const unsigned int stop_size = entries.size() + (n_cols > 5 ? n_cols : 5);
+  if (stop_size > entries.capacity())
+    entries.reserve (stop_size);
+
+  unsigned int col = *my_it;
+  std::vector<unsigned int>::iterator it, it2;
+  // insert the first element as for one entry only first check the last
+  // element (or if line is still empty)
+  if ( (entries.size()==0) || ( entries.back() < col) ) {
+    entries.push_back(col);
+    it = entries.end()-1;
+  }
+  else { 
+    // do a binary search to find the place where to insert:
+    it2 = std::lower_bound(entries.begin(), entries.end(), col); 
+
+    // If this entry is a duplicate, continue immediately Insert at the
+    // right place in the vector. Vector grows automatically to fit
+    // elements. Always doubles its size.
+    if (*it2 != col)
+      it = entries.insert(it2, col);
+    else
+      it = it2;
+  }
+
+  ++my_it;
+  // Now try to be smart and insert with bias in the direction we are
+  // walking. This has the advantage that for sorted lists, we always search
+  // in the right direction, what should decrease the work needed in here.
+  for ( ; my_it != end; ++my_it)
+    {
+      col = *my_it;
+      // need a special insertion command when we're at the end of the list
+      if (col > entries.back()) {
+       entries.push_back(col);
+       it = entries.end()-1;
+      }
+      // search to the right (preferred search direction)
+      else if (col > *it) {
+       it2 = std::lower_bound(it++, entries.end(), col);
+       if (*it2 != col)
+         it = entries.insert(it2, col);
+      }
+      // search to the left
+      else if (col < *it) {
+       it2 = std::lower_bound(entries.begin(), it, col);
+       if (*it2 != col)
+         it = entries.insert(it2, col);
+      }
+      // if we're neither larger nor smaller, then this was a duplicate and
+      // we can just continue.
+    }
+}
+
+
+
 inline
 unsigned int
 CompressedSimpleSparsityPattern::n_rows () const
@@ -424,6 +514,20 @@ CompressedSimpleSparsityPattern::add (const unsigned int i,
 
 
 
+template <typename ForwardIterator>
+inline
+void
+CompressedSimpleSparsityPattern::add_entries (const unsigned int row,
+                                             ForwardIterator begin,
+                                             ForwardIterator end)
+{
+  Assert (row < rows, ExcIndexRange (row, 0, rows));
+
+  lines[row].add_entries (begin, end);
+}
+
+
+
 inline
 CompressedSimpleSparsityPattern::Line::Line ()
 {}
index 0a329f3a521e2e534c8be94ae1360b9b82f84d18..2ad866c797bea6c37dc0311a2e8da4d9f0411643 100644 (file)
@@ -189,7 +189,18 @@ class CompressedSparsityPattern : public Subscriptor
                                      */
     void add (const unsigned int i, 
              const unsigned int j);
-    
+
+                                    /**
+                                     * Add several nonzero entries to the
+                                     * specified row of the matrix. If the
+                                     * entries already exist, nothing bad
+                                     * happens.
+                                     */
+    template <typename ForwardIterator>
+    void add_entries (const unsigned int row, 
+                     ForwardIterator    begin,
+                     ForwardIterator    end);
+
                                     /**
                                      * Check if a value at a certain
                                      * position may be non-zero.
@@ -417,6 +428,14 @@ class CompressedSparsityPattern : public Subscriptor
                                           */
         void add (const unsigned int col_num);
 
+                                         /**
+                                          * Add the columns specified by the
+                                          * iterator range to this line.
+                                          */
+        template <typename ForwardIterator>
+       void add_entries (ForwardIterator begin,
+                         ForwardIterator end);
+
                                          /**
                                           * Flush the cache my merging it with
                                           * the #entries array.
@@ -460,6 +479,19 @@ CompressedSparsityPattern::Line::add (const unsigned int j)
 
 
 
+template <typename ForwardIterator>
+inline
+void
+CompressedSparsityPattern::Line::add_entries (ForwardIterator begin,
+                                             ForwardIterator end)
+{
+  // right now, just forward to the individual function.
+  for (ForwardIterator it = begin; it != end; ++it)
+    add (*it);
+}
+
+
+
 inline
 unsigned int
 CompressedSparsityPattern::n_rows () const
@@ -491,6 +523,20 @@ CompressedSparsityPattern::add (const unsigned int i,
 
 
 
+template <typename ForwardIterator>
+inline
+void
+CompressedSparsityPattern::add_entries (const unsigned int row,
+                                       ForwardIterator begin,
+                                       ForwardIterator end)
+{
+  Assert (row < rows, ExcIndexRange (row, 0, rows));
+
+  lines[row].add_entries (begin, end);
+}
+
+
+
 inline
 CompressedSparsityPattern::Line::Line ()
                 :
index fdb6b5f534bec587fdcec646b67dbf4999c2e3da..517c0966ddebde9434e83b36b5f8b2122c299300 100644 (file)
@@ -1054,6 +1054,20 @@ class SparsityPattern : public Subscriptor
                                      */
     void add (const unsigned int i, 
              const unsigned int j);
+
+                                    /**
+                                     * Add several nonzero entries to the
+                                     * specified matrix row.  This function
+                                     * may only be called for
+                                     * non-compressed sparsity patterns.
+                                     *
+                                     * If some of the entries already
+                                     * exist, nothing bad happens.
+                                     */
+    template <typename ForwardIterator>
+    void add_entries (const unsigned int row, 
+                     ForwardIterator    begin,
+                     ForwardIterator    end);
     
                                     /**
                                      * Make the sparsity pattern
index a6dd7b29418b6aa67a7b776f87bd8aa53862b1d0..5fcdb2edf211442aaa622fcb9ae251de00c1e4ae 100755 (executable)
@@ -807,9 +807,10 @@ namespace TrilinosWrappers
                                         * Add several elements in one row to
                                         * the sparsity pattern.
                                         */
-      void add (const unsigned int    row,
-               const unsigned int    n_cols,
-               const unsigned int   *col_indices);
+      template <typename ForwardIterator>
+      void add_entries (const unsigned int row,
+                       ForwardIterator    begin,
+                       ForwardIterator    end);
 //@}
 /**
  * @name Access of underlying Trilinos data
@@ -1056,28 +1057,6 @@ namespace TrilinosWrappers
                                         */
       std::auto_ptr<Epetra_FECrsGraph> graph;
 
-                                       /**
-                                       * Scratch array that holds several
-                                       * indices that should be written
-                                       * into the same row of the sparsity
-                                       * pattern. This is to increase the
-                                       * speed of this function.
-                                       */
-      std::vector<unsigned int> cached_row_indices;
-
-                                      /**
-                                       * A number that tells how many
-                                       * indices currently are active in
-                                       * the cache.
-                                       */
-      unsigned int n_cached_elements;
-
-                                      /**
-                                       * The row that is currently in the
-                                       * cache.
-                                       */
-      unsigned int row_in_cache;
-
       friend class SparsityPatternIterators::const_iterator;
       friend class SparsityPatternIterators::const_iterator::Accessor;
   };
@@ -1329,43 +1308,23 @@ namespace TrilinosWrappers
   SparsityPattern::add (const unsigned int i,
                        const unsigned int j)
   {
-                                  // if we want to write an element to the
-                                  // row the cache is currently pointed to,
-                                  // we just append the data to the cache
-    if (i == row_in_cache)
-      {
-                                  // if the size is too small, extend the
-                                  // cache by 10 elements
-       if (n_cached_elements >= cached_row_indices.size())
-         cached_row_indices.resize(cached_row_indices.size() + 10);
-
-       cached_row_indices[n_cached_elements] = j;
-       ++n_cached_elements;
-       return;
-      }
-
-                                  // if we are to write another row data,
-                                  // we write the cache data into the
-                                  // sparsity pattern, and then call this
-                                  // function again
-    add (row_in_cache, n_cached_elements, &cached_row_indices[0]);
-    row_in_cache = i;
-    n_cached_elements = 0;
-    add (i,j);
+    add_entries (i, &j, &j+1);
   }
 
 
 
+  template <typename ForwardIterator>
   inline
   void
-  SparsityPattern::add (const unsigned int    row,
-                       const unsigned int    n_cols,
-                       const unsigned int   *col_indices)
+  SparsityPattern::add_entries (const unsigned int row,
+                               ForwardIterator    begin,
+                               ForwardIterator    end)
   {
-    if (n_cols == 0)
+    if (begin == end)
       return;
 
-    int * col_index_ptr = (int*)(col_indices);
+    int * col_index_ptr = (int*)(&*begin);
+    const int n_cols = static_cast<int>(end - begin);
     compressed = false;
 
     const int ierr = graph->InsertGlobalIndices (1, (int*)&row, 
index 4165e97f32f05002ec26e6f4e14bae1640e89252..63b2dfa2cf2981cd5b0ec9f50afc6b8d7ea90ae7 100644 (file)
@@ -864,6 +864,19 @@ SparsityPattern::add (const unsigned int i,
 }
 
 
+
+template <typename ForwardIterator>
+void
+SparsityPattern::add_entries (const unsigned int row,
+                             ForwardIterator    begin,
+                             ForwardIterator    end)
+{
+  // right now, just forward to the other function.
+  for (ForwardIterator it = begin; it != end; ++it)
+    add (row, *it);
+}
+
+
 bool
 SparsityPattern::exists (const unsigned int i,
                         const unsigned int j) const
@@ -1122,4 +1135,16 @@ partition (const unsigned int         n_partitions,
 template void SparsityPattern::copy_from<float> (const FullMatrix<float> &, bool);
 template void SparsityPattern::copy_from<double> (const FullMatrix<double> &, bool);
 
+template void SparsityPattern::add_entries<const unsigned int*> (const unsigned int,
+                                                                const unsigned int*,
+                                                                const unsigned int*);
+template void SparsityPattern::add_entries<std::vector<unsigned int>::const_iterator> 
+(const unsigned int,
+ std::vector<unsigned int>::const_iterator,
+ std::vector<unsigned int>::const_iterator);
+template void SparsityPattern::add_entries<std::vector<unsigned int>::iterator> 
+(const unsigned int,
+ std::vector<unsigned int>::iterator,
+ std::vector<unsigned int>::iterator);
+
 DEAL_II_NAMESPACE_CLOSE
index 2bac870be1f58c28743e56e4ddef9315d6d0d3c7..7d1a6f1ec5808fc74d13a89f027c900245e2310c 100755 (executable)
@@ -90,10 +90,7 @@ namespace TrilinosWrappers
                  col_map (row_map),
                  compressed (true),
                  graph (std::auto_ptr<Epetra_FECrsGraph>
-                        (new Epetra_FECrsGraph(View, row_map, col_map, 0))),
-                 cached_row_indices (1),
-                 n_cached_elements (0),
-                 row_in_cache (0)
+                        (new Epetra_FECrsGraph(View, row_map, col_map, 0)))
   {
     graph->FillComplete();
   }
@@ -124,10 +121,7 @@ namespace TrilinosWrappers
                                   (new Epetra_FECrsGraph(Copy, row_map, col_map, 
                                                          (int)n_entries_per_row, 
                                                          false)))
-                        ),
-                 cached_row_indices (1),
-                 n_cached_elements (0),
-                 row_in_cache (0)
+                        )
   {}
  
   SparsityPattern::SparsityPattern (const Epetra_Map                &InputMap,
@@ -148,10 +142,7 @@ namespace TrilinosWrappers
                                                 (int*)const_cast<unsigned int*>
                                                 (&(n_entries_per_row[row_map.MinMyGID()])),
                                                 false)))
-                        ),
-                 cached_row_indices (1),
-                 n_cached_elements (0),
-                 row_in_cache (0)
+                        )
   {}
 
   SparsityPattern::SparsityPattern (const Epetra_Map  &InputRowMap,
@@ -171,10 +162,7 @@ namespace TrilinosWrappers
                                   (new Epetra_FECrsGraph(Copy, row_map, col_map, 
                                                          (int)n_entries_per_row, 
                                                          false)))
-                        ),
-                 cached_row_indices (1),
-                 n_cached_elements (0),
-                 row_in_cache (0)
+                        )
   {}
 
   SparsityPattern::SparsityPattern (const Epetra_Map                &InputRowMap,
@@ -196,10 +184,7 @@ namespace TrilinosWrappers
                                                 (int*)const_cast<unsigned int*>
                                                 (&(n_entries_per_row[row_map.MinMyGID()])),
                                                 false)))
-                        ),
-                 cached_row_indices (1),
-                 n_cached_elements (0),
-                 row_in_cache (0)
+                        )
   {}
 
   SparsityPattern::SparsityPattern (const unsigned int m,
@@ -216,10 +201,7 @@ namespace TrilinosWrappers
                  compressed (false),
                  graph (std::auto_ptr<Epetra_FECrsGraph>
                         (new Epetra_FECrsGraph(Copy, row_map, col_map,
-                                               int(n_entries_per_row), false))),
-                 cached_row_indices (1),
-                 n_cached_elements (0),
-                 row_in_cache (0)
+                                               int(n_entries_per_row), false)))
   {}
 
   SparsityPattern::SparsityPattern (const unsigned int               m,
@@ -237,10 +219,7 @@ namespace TrilinosWrappers
                  graph (std::auto_ptr<Epetra_FECrsGraph>
                         (new Epetra_FECrsGraph(Copy, row_map, col_map, 
                          (int*)const_cast<unsigned int*>(&(n_entries_per_row[0])), 
-                                               false))),
-                 cached_row_indices (1),
-                 n_cached_elements (0),
-                 row_in_cache (0)
+                                               false)))
   {}
 
                                   // Copy function is currently not working
@@ -255,10 +234,7 @@ namespace TrilinosWrappers
                  col_map (InputSP.col_map),
                  compressed (false),
                  graph (std::auto_ptr<Epetra_FECrsGraph>
-                         (new Epetra_FECrsGraph(*InputSP.graph))),
-                 cached_row_indices (1),
-                 n_cached_elements (0),
-                 row_in_cache (0)
+                         (new Epetra_FECrsGraph(*InputSP.graph)))
   {}
   */
 
@@ -321,9 +297,6 @@ namespace TrilinosWrappers
     else
       graph = std::auto_ptr<Epetra_FECrsGraph>
        (new Epetra_FECrsGraph(Copy, row_map, col_map, n_entries_per_row, false));
-
-    n_cached_elements = 0;
-    row_in_cache = 0;
   }
 
 
@@ -380,9 +353,6 @@ namespace TrilinosWrappers
        (new Epetra_FECrsGraph(Copy, row_map, col_map,
                               n_entries_per_row[input_row_map.MinMyGID()], 
                               false));      
-
-    n_cached_elements = 0;
-    row_in_cache = 0;
   }
 
 
@@ -579,9 +549,6 @@ namespace TrilinosWrappers
 
     graph->FillComplete();
 
-    n_cached_elements = 0;
-    row_in_cache = 0;
-
     compressed = true;
   }
 
@@ -590,14 +557,6 @@ namespace TrilinosWrappers
   void
   SparsityPattern::compress ()
   {
-                                 // flush buffers
-    if (n_cached_elements > 0)
-      {
-       add (row_in_cache, n_cached_elements, &cached_row_indices[0]);
-       n_cached_elements = 0;
-       row_in_cache = 0;
-      }
-
     int ierr;
     ierr = graph->GlobalAssemble (col_map, row_map, true);
     

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.