From: turcksin Date: Wed, 10 Apr 2013 20:05:29 +0000 (+0000) Subject: Merge from Mainline. X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=aefe2915285e7e44362a5c9f0c3712fd1a44bbdf;p=dealii-svn.git Merge from Mainline. git-svn-id: https://svn.dealii.org/branches/branch_bigger_global_dof_indices_4@29248 0785d39b-7218-0410-832d-ea1e28bc413d --- aefe2915285e7e44362a5c9f0c3712fd1a44bbdf diff --cc deal.II/include/deal.II/lac/vector.h index 24c83ea22d,05ebaa680d..d8da052cf7 --- a/deal.II/include/deal.II/lac/vector.h +++ b/deal.II/include/deal.II/lac/vector.h @@@ -299,7 -299,7 +299,7 @@@ public * @p PETScWrappers::Vector class. * * For the PETSc vector wrapper class, -- * thios function compresses the ++ * this function compresses the * underlying representation of the PETSc * object, i.e. flushes the buffers of * the vector object if it has any. This diff --cc deal.II/source/lac/constraint_matrix.cc index 0c3a96a578,711c024189..108f6b5dee --- a/deal.II/source/lac/constraint_matrix.cc +++ b/deal.II/source/lac/constraint_matrix.cc @@@ -127,16 -127,12 +127,12 @@@ ConstraintMatrix::add_entrie ConstraintLine *line_ptr = &lines[lines_cache[calculate_line_index(line)]]; Assert (line_ptr->line == line, ExcInternalError()); - // if in debug mode, check whether an - // entry for this column already - // exists and if its the same as - // the one entered at present + // if in debug mode, check whether an entry for this column already + // exists and if its the same as the one entered at present // - // in any case: skip this entry if - // an entry for this column already - // exists, since we don't want to - // enter it twice + // in any case: skip this entry if an entry for this column already + // exists, since we don't want to enter it twice - for (std::vector >::const_iterator + for (std::vector >::const_iterator col_val_pair = col_val_pairs.begin(); col_val_pair!=col_val_pairs.end(); ++col_val_pair) { @@@ -195,24 -190,21 +190,21 @@@ void ConstraintMatrix::close ( // sort the lines std::sort (lines.begin(), lines.end()); - // update list of pointers and give the - // vector a sharp size since we won't - // modify the size any more after this - // point. + // update list of pointers and give the vector a sharp size since we + // won't modify the size any more after this point. { - std::vector new_lines (lines_cache.size(), - numbers::invalid_unsigned_int); - unsigned int counter = 0; + std::vector new_lines (lines_cache.size(), + numbers::invalid_size_type); + size_type counter = 0; for (std::vector::const_iterator line=lines.begin(); line!=lines.end(); ++line, ++counter) new_lines[calculate_line_index(line->line)] = counter; std::swap (lines_cache, new_lines); } - // in debug mode: check whether we really - // set the pointers correctly. + // in debug mode: check whether we really set the pointers correctly. - for (unsigned int i=0; iB->C->A without the number of entries growing. - unsigned int n_replacements = 0; + size_type n_replacements = 0; #endif - - - // loop over all entries of - // this line (including - // ones that we have - // appended in this go - // around) and see whether - // they are further - // constrained. ignore - // elements that we don't - // store on the current - // processor + // loop over all entries of this line (including ones that we + // have appended in this go around) and see whether they are + // further constrained. ignore elements that we don't store on + // the current processor - unsigned int entry = 0; + size_type entry = 0; while (entry < line->entries.size()) if (((local_lines.size() == 0) || @@@ -312,16 -278,12 +278,12 @@@ && is_constrained (line->entries[entry].first)) { - // ok, this entry is - // further - // constrained: + // ok, this entry is further constrained: chained_constraint_replaced = true; - // look up the chain - // of constraints for - // this entry + // look up the chain of constraints for this entry - const unsigned int dof_index = line->entries[entry].first; - const double weight = line->entries[entry].second; + const size_type dof_index = line->entries[entry].first; + const double weight = line->entries[entry].second; Assert (dof_index != line->line, ExcMessage ("Cycle in constraints detected!")); @@@ -450,16 -374,12 +374,12 @@@ { std::sort (line->entries.begin(), line->entries.end()); - // loop over the now sorted list and - // see whether any of the entries - // references the same dofs more than - // once in order to find how many - // non-duplicate entries we have. This - // lets us allocate the correct amount - // of memory for the constraint - // entries. + // loop over the now sorted list and see whether any of the entries + // references the same dofs more than once in order to find how many + // non-duplicate entries we have. This lets us allocate the correct + // amount of memory for the constraint entries. - unsigned int duplicates = 0; - for (unsigned int i=1; ientries.size(); ++i) + size_type duplicates = 0; + for (size_type i=1; ientries.size(); ++i) if (line->entries[i].first == line->entries[i-1].first) duplicates++; @@@ -493,11 -410,9 +410,9 @@@ Assert (new_entries.size() == line->entries.size() - duplicates, ExcInternalError()); - // make sure there are - // really no duplicates - // left and that the list - // is still sorted + // make sure there are really no duplicates left and that the + // list is still sorted - for (unsigned int j=1; jentries.swap (new_entries); } - // finally do the following - // check: if the sum of - // weights for the - // constraints is close to - // one, but not exactly - // one, then rescale all - // the weights so that they - // sum up to 1. this adds a - // little numerical - // stability and avoids all - // sorts of problems where - // the actual value is - // close to, but not quite - // what we expected + // finally do the following check: if the sum of weights for the + // constraints is close to one, but not exactly one, then rescale all + // the weights so that they sum up to 1. this adds a little numerical + // stability and avoids all sorts of problems where the actual value + // is close to, but not quite what we expected // - // the case where the - // weights don't quite sum - // up happens when we - // compute the - // interpolation weights - // "on the fly", i.e. not - // from precomputed - // tables. in this case, - // the interpolation - // weights are also subject - // to round-off + // the case where the weights don't quite sum up happens when we + // compute the interpolation weights "on the fly", i.e. not from + // precomputed tables. in this case, the interpolation weights are + // also subject to round-off double sum = 0; - for (unsigned int i=0; ientries.size(); ++i) + for (size_type i=0; ientries.size(); ++i) sum += line->entries[i].second; if ((sum != 1.0) && (std::fabs (sum-1.) < 1.e-13)) { @@@ -591,34 -484,25 +484,25 @@@ ConstraintMatrix::merge (const Constrai if (other_constraints.lines_cache.size() > lines_cache.size()) lines_cache.resize(other_constraints.lines_cache.size(), - numbers::invalid_unsigned_int); + numbers::invalid_size_type); - // first action is to fold into the present - // object possible constraints in the - // second object. we don't strictly need to - // do this any more since the - // ConstraintMatrix has learned to deal - // with chains of constraints in the - // close() function, but we have - // traditionally done this and it's not + // first action is to fold into the present object possible constraints + // in the second object. we don't strictly need to do this any more since + // the ConstraintMatrix has learned to deal with chains of constraints in + // the close() function, but we have traditionally done this and it's not // overly hard to do. // - // for this, loop over all - // constraints and replace the - // constraint lines with a new one - // where constraints are replaced - // if necessary. + // for this, loop over all constraints and replace the constraint lines + // with a new one where constraints are replaced if necessary. ConstraintLine::Entries tmp; for (std::vector::iterator line=lines.begin(); line!=lines.end(); ++line) { tmp.clear (); - for (unsigned int i=0; ientries.size(); ++i) + for (size_type i=0; ientries.size(); ++i) { - // if the present dof is not - // constrained, or if we won't take - // the constraint from the other - // object, then simply copy it over + // if the present dof is not constrained, or if we won't take the + // constraint from the other object, then simply copy it over if (other_constraints.is_constrained(line->entries[i].first) == false || ((merge_conflict_behavior != right_object_wins) @@@ -797,10 -664,9 +664,9 @@@ void ConstraintMatrix::condense (const ++shift; ++next_constraint; if (next_constraint == lines.end()) - // nothing more to do; finish rest - // of loop + // nothing more to do; finish rest of loop { - for (unsigned int i=row+1; i distribute(sparsity.n_rows(), - numbers::invalid_unsigned_int); + std::vector distribute(sparsity.n_rows(), + numbers::invalid_size_type); - for (unsigned int c=0; cis_valid_entry()); ++entry) { - const unsigned int column = entry->column(); + const size_type column = entry->column(); - if (distribute[column] != numbers::invalid_unsigned_int) + if (distribute[column] != numbers::invalid_size_type) { - // distribute entry - // at regular row - // @p{row} and - // irregular column - // sparsity.colnums[j] + // distribute entry at regular row @p{row} and irregular + // column sparsity.colnums[j] - for (unsigned int q=0; + for (size_type q=0; q!=lines[distribute[column]].entries.size(); ++q) sparsity.add (row, @@@ -932,21 -790,19 +790,19 @@@ for (SparsityPattern::iterator entry = sparsity.begin(row); (entry != sparsity.end(row)) && entry->is_valid_entry(); ++entry) { - const unsigned int column = entry->column(); - if (distribute[column] == numbers::invalid_unsigned_int) + const size_type column = entry->column(); + if (distribute[column] == numbers::invalid_size_type) - // distribute entry at irregular - // row @p{row} and regular column - // sparsity.colnums[j] + // distribute entry at irregular row @p{row} and regular + // column sparsity.colnums[j] - for (unsigned int q=0; + for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q) sparsity.add (lines[distribute[row]].entries[q].first, column); else - // distribute entry at irregular - // row @p{row} and irregular column - // sparsity.get_column_numbers()[j] + // distribute entry at irregular row @p{row} and irregular + // column sparsity.get_column_numbers()[j] - for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p) - for (unsigned int q=0; + for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p) + for (size_type q=0; q!=lines[distribute[column]].entries.size(); ++q) sparsity.add (lines[distribute[row]].entries[p].first, lines[distribute[column]].entries[q].first); @@@ -965,89 -821,46 +821,46 @@@ void ConstraintMatrix::condense (Compre Assert (sparsity.n_rows() == sparsity.n_cols(), ExcNotQuadratic()); - // store for each index whether it must be - // distributed or not. If entry is - // numbers::invalid_size_type, - // no distribution is necessary. - // otherwise, the number states which line - // in the constraint matrix handles this - // index + // store for each index whether it must be distributed or not. If entry + // is numbers::invalid_unsigned_int, no distribution is necessary. + // otherwise, the number states which line in the constraint matrix + // handles this index - std::vector distribute(sparsity.n_rows(), - numbers::invalid_unsigned_int); + std::vector distribute(sparsity.n_rows(), + numbers::invalid_size_type); - for (unsigned int c=0; c distribute(sparsity.n_rows(), - numbers::invalid_unsigned_int); + std::vector distribute(sparsity.n_rows(), + numbers::invalid_size_type); - for (unsigned int c=0; c distribute(sparsity.n_rows(), - numbers::invalid_unsigned_int); + std::vector distribute(sparsity.n_rows(), + numbers::invalid_size_type); - for (unsigned int c=0; c distribute (sparsity.n_rows(), - numbers::invalid_unsigned_int); + std::vector distribute (sparsity.n_rows(), + numbers::invalid_size_type); - for (unsigned int c=0; c + const std::pair block_index = index_mapping.global_to_local(row); - const unsigned int block_row = block_index.first; + const size_type block_row = block_index.first; - if (distribute[row] == numbers::invalid_unsigned_int) + if (distribute[row] == numbers::invalid_size_type) - // regular line. loop over - // all columns and see - // whether this column must - // be distributed + // regular line. loop over all columns and see whether this column + // must be distributed { - // to loop over all entries - // in this row, we have to - // loop over all blocks in - // this blockrow and the - // corresponding row - // therein + // to loop over all entries in this row, we have to loop over all + // blocks in this blockrow and the corresponding row therein - for (unsigned int block_col=0; block_colis_valid_entry(); ++entry) { - const unsigned int global_col + const size_type global_col = index_mapping.local_to_global(block_col, entry->column()); - if (distribute[global_col] != numbers::invalid_unsigned_int) + if (distribute[global_col] != numbers::invalid_size_type) - // distribute entry at regular - // row @p{row} and irregular column - // global_col + // distribute entry at regular row @p{row} and + // irregular column global_col { - for (unsigned int q=0; + for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q) sparsity.add (row, lines[distribute[global_col]].entries[q].first); @@@ -1393,12 -1141,9 +1141,9 @@@ } else { - // row must be - // distributed. split the - // whole row into the - // chunks defined by the - // blocks + // row must be distributed. split the whole row into the chunks + // defined by the blocks - for (unsigned int block_col=0; block_colis_valid_entry(); ++entry) { - const unsigned int global_col + const size_type global_col = index_mapping.local_to_global (block_col, entry->column()); - if (distribute[global_col] == numbers::invalid_unsigned_int) + if (distribute[global_col] == numbers::invalid_size_type) - // distribute entry at irregular - // row @p{row} and regular column - // global_col. + // distribute entry at irregular row @p{row} and + // regular column global_col. { - for (unsigned int q=0; q!=lines[distribute[row]].entries.size(); ++q) + for (size_type q=0; q!=lines[distribute[row]].entries.size(); ++q) sparsity.add (lines[distribute[row]].entries[q].first, global_col); } else - // distribute entry at irregular - // row @p{row} and irregular column - // @p{global_col} + // distribute entry at irregular row @p{row} and + // irregular column @p{global_col} { - for (unsigned int p=0; p!=lines[distribute[row]].entries.size(); ++p) - for (unsigned int q=0; q!=lines[distribute[global_col]].entries.size(); ++q) + for (size_type p=0; p!=lines[distribute[row]].entries.size(); ++p) + for (size_type q=0; q!=lines[distribute[global_col]].entries.size(); ++q) sparsity.add (lines[distribute[row]].entries[p].first, lines[distribute[global_col]].entries[q].first); } @@@ -1453,58 -1196,41 +1196,41 @@@ void ConstraintMatrix::condense (BlockC const BlockIndices & index_mapping = sparsity.get_column_indices(); - const unsigned int n_blocks = sparsity.n_block_rows(); + const size_type n_blocks = sparsity.n_block_rows(); - // store for each index whether it must be - // distributed or not. If entry is - // numbers::invalid_size_type, - // no distribution is necessary. - // otherwise, the number states which line - // in the constraint matrix handles this - // index + // store for each index whether it must be distributed or not. If entry + // is numbers::invalid_unsigned_int, no distribution is necessary. + // otherwise, the number states which line in the constraint matrix + // handles this index - std::vector distribute (sparsity.n_rows(), - numbers::invalid_unsigned_int); + std::vector distribute (sparsity.n_rows(), + numbers::invalid_size_type); - for (unsigned int c=0; c(c); - const unsigned int n_rows = sparsity.n_rows(); - for (unsigned int row=0; row + const std::pair block_index = index_mapping.global_to_local(row); - const unsigned int block_row = block_index.first; - const unsigned int local_row = block_index.second; + const size_type block_row = block_index.first; + const size_type local_row = block_index.second; - if (distribute[row] == numbers::invalid_unsigned_int) + if (distribute[row] == numbers::invalid_size_type) - // regular line. loop over - // all columns and see - // whether this column must - // be distributed. note that - // as we proceed to - // distribute cols, the loop - // over cols may get longer. + // regular line. loop over all columns and see whether this column + // must be distributed. note that as we proceed to distribute cols, + // the loop over cols may get longer. // - // don't try to be clever - // here as in the algorithm - // for the - // CompressedSparsityPattern, - // as that would be much more - // complicated here. after - // all, we know that - // compressed patterns are - // inefficient... + // don't try to be clever here as in the algorithm for the + // CompressedSparsityPattern, as that would be much more + // complicated here. after all, we know that compressed patterns + // are inefficient... { - // to loop over all entries - // in this row, we have to - // loop over all blocks in - // this blockrow and the - // corresponding row - // therein + // to loop over all entries in this row, we have to loop over all + // blocks in this blockrow and the corresponding row therein - for (unsigned int block_col=0; block_col distribute (sparsity.n_rows(), - numbers::invalid_unsigned_int); + std::vector distribute (sparsity.n_rows(), + numbers::invalid_size_type); - for (unsigned int c=0; c(c); - const unsigned int n_rows = sparsity.n_rows(); - for (unsigned int row=0; row + const std::pair block_index = index_mapping.global_to_local(row); - const unsigned int block_row = block_index.first; - const unsigned int local_row = block_index.second; + const size_type block_row = block_index.first; + const size_type local_row = block_index.second; - if (distribute[row] == numbers::invalid_unsigned_int) + if (distribute[row] == numbers::invalid_size_type) - // regular line. loop over - // all columns and see - // whether this column must - // be distributed. note that - // as we proceed to - // distribute cols, the loop - // over cols may get longer. + // regular line. loop over all columns and see whether this column + // must be distributed. note that as we proceed to distribute cols, + // the loop over cols may get longer. // - // don't try to be clever - // here as in the algorithm - // for the - // CompressedSparsityPattern, - // as that would be much more - // complicated here. after - // all, we know that - // compressed patterns are - // inefficient... + // don't try to be clever here as in the algorithm for the + // CompressedSparsityPattern, as that would be much more + // complicated here. after all, we know that compressed patterns + // are inefficient... { - // to loop over all entries - // in this row, we have to - // loop over all blocks in - // this blockrow and the - // corresponding row - // therein + // to loop over all entries in this row, we have to loop over all + // blocks in this blockrow and the corresponding row therein - for (unsigned int block_col=0; block_col distribute (sparsity.n_rows(), - numbers::invalid_unsigned_int); + std::vector distribute (sparsity.n_rows(), + numbers::invalid_size_type); - for (unsigned int c=0; c(c); - const unsigned int n_rows = sparsity.n_rows(); - for (unsigned int row=0; row + const std::pair block_index = index_mapping.global_to_local(row); - const unsigned int block_row = block_index.first; - const unsigned int local_row = block_index.second; + const size_type block_row = block_index.first; + const size_type local_row = block_index.second; - if (distribute[row] == numbers::invalid_unsigned_int) + if (distribute[row] == numbers::invalid_size_type) - // regular line. loop over - // all columns and see - // whether this column must - // be distributed. note that - // as we proceed to - // distribute cols, the loop - // over cols may get longer. + // regular line. loop over all columns and see whether this column + // must be distributed. note that as we proceed to distribute cols, + // the loop over cols may get longer. // - // don't try to be clever - // here as in the algorithm - // for the - // CompressedSparsityPattern, - // as that would be much more - // complicated here. after - // all, we know that - // compressed patterns are - // inefficient... + // don't try to be clever here as in the algorithm for the + // CompressedSparsityPattern, as that would be much more + // complicated here. after all, we know that compressed patterns + // are inefficient... { - // to loop over all entries - // in this row, we have to - // loop over all blocks in - // this blockrow and the - // corresponding row - // therein + // to loop over all entries in this row, we have to loop over all + // blocks in this blockrow and the corresponding row therein - for (unsigned int block_col=0; block_col + const std::pair local_range = vec.local_range(); my_indices.add_range (local_range.first, local_range.second); @@@ -1921,11 -1588,10 +1588,10 @@@ for (constraint_iterator it = begin_my_constraints; it != end_my_constraints; ++it) { - // fill entry in line - // next_constraint.line by adding the - // different contributions + // fill entry in line next_constraint.line by adding the different + // contributions double new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) + for (size_type i=0; ientries.size(); ++i) new_value += (vec_distribute(it->entries[i].first) * it->entries[i].second); vec(it->line) = new_value; @@@ -1962,13 -1626,11 +1626,11 @@@ ConstraintMatrix::distribute (TrilinosW const constraint_iterator end_my_constraints = Utilities::lower_bound(lines.begin(),lines.end(),index_comparison); - // Here we search all the indices that we - // need to have read-access to - the local - // nodes and all the nodes that the - // constraints indicate. No caching done - // yet. would need some more clever data - // structures for doing that. + // Here we search all the indices that we need to have read-access to + // - the local nodes and all the nodes that the constraints indicate. + // No caching done yet. would need some more clever data structures + // for doing that. - const std::pair + const std::pair local_range = vec.block(block).local_range(); my_indices.add_range (local_range.first, local_range.second); @@@ -2020,11 -1682,10 +1682,10 @@@ for (constraint_iterator it = begin_my_constraints; it != end_my_constraints; ++it) { - // fill entry in line - // next_constraint.line by adding the + // fill entry in line next_constraint.line by adding the // different contributions double new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) + for (size_type i=0; ientries.size(); ++i) new_value += (vec_distribute(it->entries[i].first) * it->entries[i].second); vec(it->line) = new_value; @@@ -2096,11 -1753,10 +1753,10 @@@ ConstraintMatrix::distribute (PETScWrap for (constraint_iterator it = begin_my_constraints; it != end_my_constraints; ++it) { - // fill entry in line - // next_constraint.line by adding the - // different contributions + // fill entry in line next_constraint.line by adding the different + // contributions PetscScalar new_value = it->inhomogeneity; - for (unsigned int i=0; ientries.size(); ++i) + for (size_type i=0; ientries.size(); ++i) new_value += (PetscScalar(ghost_vec(it->entries[i].first)) * it->entries[i].second); vec(it->line) = new_value; @@@ -2139,17 -1794,15 +1794,16 @@@ bool ConstraintMatrix::is_identity_cons -unsigned int ConstraintMatrix::max_constraint_indirections () const +ConstraintMatrix::size_type +ConstraintMatrix::max_constraint_indirections () const { - unsigned int return_value = 0; + size_type return_value = 0; for (std::vector::const_iterator i=lines.begin(); i!=lines.end(); ++i) - // use static cast, since - // typeof(size)==std::size_t, which is != + // use static cast, since typeof(size)==std::size_t, which is != - // unsigned int on AIX + // size_type on AIX return_value = std::max(return_value, - static_cast(i->entries.size())); + static_cast(i->entries.size())); return return_value; } @@@ -2169,14 -1822,12 +1823,12 @@@ bool ConstraintMatrix::has_inhomogeneit void ConstraintMatrix::print (std::ostream &out) const { - for (unsigned int i=0; i!=lines.size(); ++i) + for (size_type i=0; i!=lines.size(); ++i) { - // output the list of - // constraints as pairs of dofs - // and their weights + // output the list of constraints as pairs of dofs and their weights if (lines[i].entries.size() > 0) { - for (unsigned int j=0; j 0) - for (unsigned int j=0; j" << lines[i].entries[j].first << "; // weight: " << lines[i].entries[j].second