From: Martin Kronbichler Date: Tue, 14 Apr 2009 13:43:52 +0000 (+0000) Subject: One more update of the local_to_global data structures. Now the overheads in these... X-Git-Tag: v8.0.0~7856 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=a4e43831d878347df2fe89740692d0ebd261c0d5;p=dealii.git One more update of the local_to_global data structures. Now the overheads in these functions is reduced to a minimum when creating the sorted array of global dofs. git-svn-id: https://svn.dealii.org/trunk@18609 0785d39b-7218-0410-832d-ea1e28bc413d --- diff --git a/deal.II/lac/include/lac/constraint_matrix.templates.h b/deal.II/lac/include/lac/constraint_matrix.templates.h index 175dd70fdc..45e8a1cc6a 100644 --- a/deal.II/lac/include/lac/constraint_matrix.templates.h +++ b/deal.II/lac/include/lac/constraint_matrix.templates.h @@ -961,47 +961,66 @@ ConstraintMatrix::distribute (VectorType &vec) const // local_to_global functions. namespace internals { + // this struct contains all the + // information we need to store about + // which global entries (global_row) are + // given rise by local entries + // (local_row) or some constraints. struct distributing { distributing (const unsigned int global_row = deal_II_numbers::invalid_unsigned_int, const unsigned int local_row = deal_II_numbers::invalid_unsigned_int); + distributing (const distributing &in); + ~distributing (); + distributing & operator = (const distributing &in); + bool operator < (const distributing &in); unsigned int global_row; unsigned int local_row; - std::vector > constraints; + mutable std::vector > *constraints; }; + distributing::distributing (const unsigned int global_row, const unsigned int local_row) : global_row (global_row), - local_row (local_row) {} + local_row (local_row), + constraints (0) {} + + distributing::distributing (const distributing &in) : + constraints (0) + {*this = (in);} + + distributing::~distributing () + { + if (constraints != 0) + { + delete constraints; + constraints = 0; + } + } - // a version of std::lower_bound for a - // pair of unsigned int and distributing, - // without taking into account the second - // argument of the pair. inline - std::vector::iterator - lower_bound (std::vector::iterator begin, - std::vector::iterator end, - unsigned int val) + distributing & distributing::operator = (const distributing &in) { - unsigned int length = end - begin; - unsigned int half; - std::vector::iterator middle; + global_row = in.global_row; + local_row = in.local_row; + if (constraints != 0) + { + delete constraints; + constraints = 0; + } - while (length > 0) + if (in.constraints != 0) { - half = length >> 1; - middle = begin + half; - if (middle->global_row < val) - { - begin = middle; - ++begin; - length = length - half - 1; - } - else - length = half; + constraints = in.constraints; + in.constraints = 0; } - return begin; + return *this; + } + + inline + bool distributing::operator < (const distributing &in) + { + return global_row < in.global_row; } // a function that appends an additional @@ -1009,40 +1028,47 @@ namespace internals // a value to an already existing // row. Similar functionality as for // std::map, - // but here done for a vector of pairs, - // and much faster. + // but here done for a std::vector of + // data type distributing, and much + // faster. inline void insert_index (std::vector &my_indices, const unsigned int row, - const unsigned int local_row, - const std::pair constraint - = std::make_pair(0,0)) + const std::pair constraint) { typedef std::vector::iterator index_iterator; index_iterator pos, pos1; + distributing row_comparison (row); + + // check whether the list was really + // sorted before entering here +#ifdef DEBUG + for (unsigned int i=1; iglobal_row == row) pos1 = pos; else - pos1 = my_indices.insert(pos, distributing(row)); + pos1 = my_indices.insert(pos, row_comparison); } - if (local_row == deal_II_numbers::invalid_unsigned_int) - pos1->constraints.push_back (constraint); + if (&*pos1->constraints == 0) + pos1->constraints = new std::vector > (1,constraint); else - pos1->local_row = local_row; + pos1->constraints->push_back (constraint); } @@ -1068,7 +1094,7 @@ namespace internals // constraints are really empty. #ifdef DEBUG for (unsigned int i=0; i + inline + void + make_block_starts (const BlockType &block_object, + std::vector &row_indices, + std::vector &block_starts) + { + Assert (block_starts.size() == block_object.n_block_rows() + 1, + ExcDimensionMismatch(block_starts.size(), + block_object.n_block_rows()+1)); + + typedef std::vector::iterator row_iterator; + row_iterator col_indices = row_indices.begin(); + + const unsigned int num_blocks = block_object.n_block_rows(); + + // find end of rows. + block_starts[0] = 0; + for (unsigned int i=1;i &local_matrix, // data (containing local columns + // possible jumps from // constraints). Choosing an STL map or - // anything else I know of would be much - // more expensive here! + // anything else M.K. knows of would be + // much more expensive here! std::vector my_indices (n_local_dofs); - std::vector > constraint_lines; - constraint_lines.reserve(n_local_dofs); // cache whether we have to resolve any // indirect rows generated from resolving @@ -1226,7 +1288,6 @@ distribute_local_to_global (const FullMatrix &local_matrix, { have_indirect_rows = true; internals::insert_index(my_indices, position->entries[q].first, - deal_II_numbers::invalid_unsigned_int, std::make_pair (local_row, position->entries[q].second)); } @@ -1270,12 +1331,15 @@ distribute_local_to_global (const FullMatrix &local_matrix, std::vector cols (n_actual_dofs); std::vector vals (n_actual_dofs); + typedef std::vector > constraint_format; + // now do the actual job. for (unsigned int i=0; i &local_matrix, // element is zero. if (have_indirect_rows == false) { - Assert(loc_row < n_local_dofs, - ExcInternalError()); + Assert(loc_row < n_local_dofs, ExcInternalError()); + const double * matrix_ptr = &local_matrix(loc_row, 0); for (unsigned int j=0; j < n_actual_dofs; ++j) { const unsigned int loc_col = my_indices[j].local_row; - Assert(loc_col < n_local_dofs, - ExcInternalError()); + Assert(loc_col < n_local_dofs, ExcInternalError()); - if (local_matrix(loc_row,loc_col) != 0) + const double col_val = matrix_ptr[loc_col]; + if (col_val != 0) { - vals[col_counter] = local_matrix(loc_row,loc_col); - cols[col_counter] = my_indices[j].global_row; - col_counter++; + *val_ptr++ = col_val; + *col_ptr++ = my_indices[j].global_row; } } @@ -1314,15 +1377,21 @@ distribute_local_to_global (const FullMatrix &local_matrix, // side. for (unsigned int i=0; iinhomogeneity * - local_matrix(loc_row,constraint_lines[i].first); + matrix_ptr[constraint_lines[i].first]; } } - // slower functions when there are + // more difficult part when there are // indirect references and when we need // to do some more checks. else { + const double * matrix_ptr = 0; + if (loc_row != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_row < n_local_dofs, ExcInternalError()); + matrix_ptr = &local_matrix(loc_row, 0); + } for (unsigned int j=0; j < n_actual_dofs; ++j) { double col_val; @@ -1332,16 +1401,12 @@ distribute_local_to_global (const FullMatrix &local_matrix, // local matrix if (loc_row != deal_II_numbers::invalid_unsigned_int) { - Assert (loc_row < n_local_dofs, - ExcInternalError()); - // case 1a: col has direct contribution // in local matrix if (loc_col != deal_II_numbers::invalid_unsigned_int) { - Assert (loc_col < n_local_dofs, - ExcInternalError()); - col_val = local_matrix(loc_row,loc_col); + Assert (loc_col < n_local_dofs, ExcInternalError()); + col_val = matrix_ptr[loc_col]; } // case 1b: col has no direct // contribution in local matrix @@ -1350,11 +1415,14 @@ distribute_local_to_global (const FullMatrix &local_matrix, // account for indirect contributions by // constraints - for (unsigned int p=0; p &local_matrix, // constraints in row, going trough the // direct and indirect references in the // given column. - for (unsigned int q=0; q 0, ExcInternalError()); + + for (unsigned int q=0; q &local_matrix, // append it to the array of values. if (col_val != 0) { - cols[col_counter] = my_indices[j].global_row; - vals[col_counter] = col_val; - col_counter++; + *val_ptr++ = col_val; + *col_ptr++ = my_indices[j].global_row; } } @@ -1416,17 +1485,23 @@ distribute_local_to_global (const FullMatrix &local_matrix, val = local_vector(loc_row); for (unsigned int i=0; iinhomogeneity * - local_matrix(loc_row,constraint_lines[i].first); + matrix_ptr[constraint_lines[i].first]; } - for (unsigned int q=0; q < my_indices[i].constraints.size(); ++q) + if (my_indices[i].constraints != 0) { - const unsigned int loc_row_q = my_indices[i].constraints[q].first; - double add_this = local_vector (loc_row_q); - for (unsigned int k=0; kinhomogeneity * - local_matrix(loc_row_q,constraint_lines[k].first); - val += add_this * my_indices[i].constraints[q].second; + std::vector > &constraint_i = + *my_indices[i].constraints; + + for (unsigned int q=0; qinhomogeneity * + local_matrix(loc_row_q,constraint_lines[k].first); + val += add_this * constraint_i[q].second; + } } } } @@ -1436,8 +1511,11 @@ distribute_local_to_global (const FullMatrix &local_matrix, // process into the global matrix row and // into the vector Threads::ThreadMutex::ScopedLock lock(mutex); - if (col_counter > 0) - global_matrix.add(row, col_counter, &cols[0], &vals[0], false, true); + const unsigned int n_values = col_ptr - &cols[0]; + Assert (n_values == (unsigned int)(val_ptr - &vals[0]), + ExcInternalError()); + if (n_values > 0) + global_matrix.add(row, n_values, &cols[0], &vals[0], false, true); if (val != 0) global_vector(row) += val; } @@ -1490,7 +1568,6 @@ distribute_local_to_global (const FullMatrix &local_matrix, std::vector my_indices (n_local_dofs); std::vector > constraint_lines; - constraint_lines.reserve(n_local_dofs); bool have_indirect_rows = false; { @@ -1535,7 +1612,6 @@ distribute_local_to_global (const FullMatrix &local_matrix, { have_indirect_rows = true; internals::insert_index(my_indices, position->entries[q].first, - deal_II_numbers::invalid_unsigned_int, std::make_pair (local_row, position->entries[q].second)); } @@ -1555,31 +1631,11 @@ distribute_local_to_global (const FullMatrix &local_matrix, // additional construct that also takes // care of block indices. std::vector block_starts(num_blocks+1, n_actual_dofs); - { - typedef std::vector::iterator row_iterator; - row_iterator col_indices = localized_indices.begin(); - - // find end of rows. - block_starts[0] = 0; - for (unsigned int i=1;i cols (n_actual_dofs); std::vector vals (n_actual_dofs); + typedef std::vector > constraint_format; // the basic difference to the // non-block variant from now onwards @@ -1600,26 +1656,31 @@ distribute_local_to_global (const FullMatrix &local_matrix, double * val_ptr = &vals[0]; if (have_indirect_rows == false) { - Assert(loc_row < n_local_dofs, - ExcInternalError()); + Assert(loc_row < n_local_dofs, ExcInternalError()); + const double * matrix_ptr = &local_matrix(loc_row, 0); for (unsigned int j=block_starts[block_col]; j < next_block_col; ++j) { const unsigned int loc_col = my_indices[j].local_row; - Assert(loc_col < n_local_dofs, - ExcInternalError()); + Assert(loc_col < n_local_dofs, ExcInternalError()); - const double mat_val = local_matrix(loc_row, loc_col); - if (mat_val != 0) + const double col_val = matrix_ptr[loc_col]; + if (col_val != 0) { + *val_ptr++ = col_val; *col_ptr++ = localized_indices[j]; - *val_ptr++ = mat_val; } } } else { + const double * matrix_ptr = 0; + if (loc_row != deal_II_numbers::invalid_unsigned_int) + { + Assert (loc_row < n_local_dofs, ExcInternalError()); + matrix_ptr = &local_matrix(loc_row, 0); + } for (unsigned int j=block_starts[block_col]; j < next_block_col; ++j) { double col_val; @@ -1627,47 +1688,50 @@ distribute_local_to_global (const FullMatrix &local_matrix, if (loc_row != deal_II_numbers::invalid_unsigned_int) { - Assert (loc_row < n_local_dofs, - ExcInternalError()); + col_val = loc_col != deal_II_numbers::invalid_unsigned_int ? + matrix_ptr[loc_col] : 0; - if (loc_col != deal_II_numbers::invalid_unsigned_int) + // account for indirect contributions by + // constraints + if (my_indices[j].constraints != 0) { - Assert (loc_col < n_local_dofs, - ExcInternalError()); - col_val = local_matrix(loc_row,loc_col); + constraint_format &constraint_j = + *my_indices[j].constraints; + + for (unsigned int p=0; p &local_matrix, local_matrix(loc_row,constraint_lines[i].first); } - for (unsigned int q=0; q < my_indices[i].constraints.size(); ++q) + if (my_indices[i].constraints != 0) { - const unsigned int loc_row_q = my_indices[i].constraints[q].first; - double add_this = local_vector (loc_row_q); - for (unsigned int k=0; kinhomogeneity * - local_matrix(loc_row_q,constraint_lines[k].first); - val += add_this * my_indices[i].constraints[q].second; + std::vector > &constraint_i = + *my_indices[i].constraints; + + for (unsigned int q=0; qinhomogeneity * + local_matrix(loc_row_q,constraint_lines[k].first); + val += add_this * constraint_i[q].second; + } } if (val != 0) { @@ -1740,8 +1810,6 @@ add_entries_local_to_global (const std::vector &local_dof_indices, const Table<2,bool> &dof_mask, internal::bool2type ) const { - // similar to the function for distributing - // matrix entries. Assert (sparsity_pattern.n_rows() == sparsity_pattern.n_cols(), ExcNotQuadratic()); const unsigned int n_local_dofs = local_dof_indices.size(); @@ -1753,10 +1821,111 @@ add_entries_local_to_global (const std::vector &local_dof_indices, ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs)); } - std::vector my_indices (n_local_dofs); + // if the dof mask is not active, all we + // have to do is to add some indices in a + // matrix format. To do this, we first + // create an array of all the indices + // that are to be added. these indices + // are the local dof indices plus some + // indices that come from constraints. + if (dof_mask_is_active == false) + { + std::vector actual_dof_indices (n_local_dofs); + unsigned int added_rows = 0; + bool have_indirect_rows = false; + std::vector > constraint_lines; + for (unsigned int i = 0; i::const_iterator + position = std::lower_bound (lines.begin(), + lines.end(), + index_comparison); + Assert (position->line == local_dof_indices[i], + ExcInternalError()); + + constraint_lines.push_back (std::make_pair(i,&*position)); + } + Assert (constraint_lines.size() + added_rows == n_local_dofs, + ExcInternalError()); + actual_dof_indices.resize (added_rows); + std::sort (actual_dof_indices.begin(), actual_dof_indices.end()); + + const unsigned int n_constrained_dofs = constraint_lines.size(); + for (unsigned int i=0; ientries.size(); ++q) + { + have_indirect_rows = true; + const unsigned int new_index = position->entries[q].first; + if (actual_dof_indices.back() < new_index) + { + actual_dof_indices.push_back(new_index); + } + else + { + std::vector::iterator it = + std::lower_bound(actual_dof_indices.begin(), + actual_dof_indices.end(), + new_index); + if (*it != new_index) + actual_dof_indices.insert(it, new_index); + } + } + + Threads::ThreadMutex::ScopedLock lock(mutex); + + if (keep_constrained_entries == true) + { + for (unsigned int j=0; j my_indices (n_local_dofs); std::vector > constraint_lines; - constraint_lines.reserve(n_local_dofs); // cache whether we have to resolve any // indirect rows generated from resolving @@ -1810,7 +1979,6 @@ add_entries_local_to_global (const std::vector &local_dof_indices, { have_indirect_rows = true; internals::insert_index(my_indices, position->entries[q].first, - deal_II_numbers::invalid_unsigned_int, std::make_pair (local_row, position->entries[q].second)); } @@ -1827,18 +1995,10 @@ add_entries_local_to_global (const std::vector &local_dof_indices, { for (unsigned int j=0; j &local_dof_indices, // sparsity pattern. std::vector cols (n_actual_dofs); - // easy case - we add all indices (i.e., - // the dof_mask is not active). so to - // each global row we add all the - // indices. - if (dof_mask_is_active == false) - { - for (unsigned int i=0; i::iterator col_ptr = cols.begin(); @@ -1895,8 +2033,7 @@ add_entries_local_to_global (const std::vector &local_dof_indices, for (unsigned int j=0; j < n_actual_dofs; ++j) { const unsigned int loc_col = my_indices[j].local_row; - Assert(loc_col < n_local_dofs, - ExcInternalError()); + Assert(loc_col < n_local_dofs, ExcInternalError()); if (dof_mask[loc_row][loc_col] == true) *col_ptr++ = my_indices[j].global_row; @@ -1918,45 +2055,59 @@ add_entries_local_to_global (const std::vector &local_dof_indices, // local matrix if (loc_row != deal_II_numbers::invalid_unsigned_int) { - Assert (loc_row < n_local_dofs, - ExcInternalError()); + Assert (loc_row < n_local_dofs, ExcInternalError()); // case 1a: col has direct contribution // in local matrix if (loc_col != deal_II_numbers::invalid_unsigned_int) { - Assert (loc_col < n_local_dofs, - ExcInternalError()); + Assert (loc_col < n_local_dofs, ExcInternalError()); if (dof_mask[loc_row][loc_col] == true) goto add_this_index; } // account for indirect contributions by // constraints - for (unsigned int p=0; p > &constraint_j = + *my_indices[j].constraints; + + for (unsigned int p=0; p > &constraint_i = + *my_indices[i].constraints; + for (unsigned int q=0; q > &constraint_j = + *my_indices[j].constraints; + + for (unsigned int p=0; p &local_dof_indices, ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs)); } - std::vector my_indices (n_local_dofs); + // if the dof mask is not active, all we + // have to do is to add some indices in a + // matrix format. To do this, we first + // create an array of all the indices + // that are to be added. these indices + // are the local dof indices plus some + // indices that come from constraints. + if (dof_mask_is_active == false) + { + std::vector actual_dof_indices (n_local_dofs); + unsigned int added_rows = 0; + bool have_indirect_rows = false; + std::vector > constraint_lines; + for (unsigned int i = 0; i::const_iterator + position = std::lower_bound (lines.begin(), + lines.end(), + index_comparison); + Assert (position->line == local_dof_indices[i], + ExcInternalError()); + + constraint_lines.push_back (std::make_pair(i,&*position)); + } + Assert (constraint_lines.size() + added_rows == n_local_dofs, + ExcInternalError()); + actual_dof_indices.resize (added_rows); + std::sort (actual_dof_indices.begin(), actual_dof_indices.end()); + + const unsigned int n_constrained_dofs = constraint_lines.size(); + for (unsigned int i=0; ientries.size(); ++q) + { + have_indirect_rows = true; + const unsigned int new_index = position->entries[q].first; + if (actual_dof_indices.back() < new_index) + { + actual_dof_indices.push_back(new_index); + } + else + { + std::vector::iterator it = + std::lower_bound(actual_dof_indices.begin(), + actual_dof_indices.end(), + new_index); + if (*it != new_index) + actual_dof_indices.insert(it, new_index); + } + } + + Threads::ThreadMutex::ScopedLock lock(mutex); + + if (keep_constrained_entries == true) + { + for (unsigned int j=0; j block_starts(num_blocks+1, n_actual_dofs); + internals::make_block_starts (sparsity_pattern, actual_dof_indices, + block_starts); + + // easy operation - just go trough the + // individual blocks and add the same + // array for each row + Threads::ThreadMutex::ScopedLock lock(mutex); + for (unsigned int block=0; block::iterator index_it = actual_dof_indices.begin(); + for (unsigned int block_col = 0; block_col my_indices (n_local_dofs); std::vector > constraint_lines; - constraint_lines.reserve(n_local_dofs); // cache whether we have to resolve any // indirect rows generated from resolving @@ -2068,35 +2339,20 @@ add_entries_local_to_global (const std::vector &local_dof_indices, { have_indirect_rows = true; internals::insert_index(my_indices, position->entries[q].first, - deal_II_numbers::invalid_unsigned_int, std::make_pair (local_row, position->entries[q].second)); } Threads::ThreadMutex::ScopedLock lock(mutex); - // need to add the whole row and column - // structure in case we keep constrained - // entries. Unfortunately, we can't use - // the nice matrix structure we use - // elsewhere, so manually add those - // indices one by one. if (keep_constrained_entries == true) { for (unsigned int j=0; j &local_dof_indices, // additional construct that also takes // care of block indices. std::vector block_starts(num_blocks+1, n_actual_dofs); - { - typedef std::vector::iterator row_iterator; - row_iterator col_indices = localized_indices.begin(); - - // find end of rows. - block_starts[0] = 0; - for (unsigned int i=1;i::iterator index_it = localized_indices.begin(); - for (unsigned int block_col = 0; block_col cols (n_actual_dofs); @@ -2226,31 +2433,53 @@ add_entries_local_to_global (const std::vector &local_dof_indices, goto add_this_index; } - for (unsigned int p=0; p > + &constraint_j = *my_indices[j].constraints; + + for (unsigned int p=0; p > + &constraint_i = *my_indices[i].constraints; + for (unsigned int q=0; q > + &constraint_j = *my_indices[j].constraints; + + for (unsigned int p=0; p::add (const unsigned int row, Assert (this_cols[counter] == col_indices[i] || values[i] == 0, ExcInvalidIndex(row,col_indices[i])); - if (values[i] != 0) - val_ptr[counter] += values[i]; + val_ptr[counter] += values[i]; } Assert (counter < cols->row_length(row), ExcInternalError()); } @@ -399,8 +398,7 @@ SparseMatrix::add (const unsigned int row, Assert (this_cols[counter] == col_indices[i] || values[i] == 0, ExcInvalidIndex(row,col_indices[i])); - if (values[i] != 0) - val_ptr[counter] += values[i]; + val_ptr[counter] += values[i]; } Assert (counter < cols->row_length(row), ExcInternalError()); }