From e4bee3d748299d02f0c2f1504c21ffb089861eb8 Mon Sep 17 00:00:00 2001 From: kronbichler Date: Thu, 26 Nov 2009 12:28:34 +0000 Subject: [PATCH] Apply changes from the distributed grid branch: Improve functions in IndexSet. Constructor for ConstraintMatrix with IndexSet. Rewrote code for local_to_global functions in order to make it more accessible. Provide some shortcut for deal.II matrices. git-svn-id: https://svn.dealii.org/trunk@20170 0785d39b-7218-0410-832d-ea1e28bc413d --- deal.II/base/include/base/index_set.h | 46 +- deal.II/base/source/index_set.cc | 4 +- deal.II/lac/include/lac/constraint_matrix.h | 244 +- .../include/lac/constraint_matrix.templates.h | 2133 ++++++++--------- .../lac/include/lac/sparse_matrix.templates.h | 1 + deal.II/lac/source/constraint_matrix.cc | 123 +- 6 files changed, 1340 insertions(+), 1211 deletions(-) diff --git a/deal.II/base/include/base/index_set.h b/deal.II/base/include/base/index_set.h index 4ef0d35edc..3f4dc6b0bb 100644 --- a/deal.II/base/include/base/index_set.h +++ b/deal.II/base/include/base/index_set.h @@ -304,7 +304,12 @@ class IndexSet && (range_1.end < range_2.end))); } - + + static bool end_compare(const IndexSet::Range & x, const IndexSet::Range & y) + { + return x.end < y.end; + } + friend inline bool operator== (const Range &range_1, const Range &range_2) @@ -353,6 +358,12 @@ class IndexSet * number than this value. */ unsigned int index_space_size; + + /** + * Actually perform the compress() + * operation. + */ + void do_compress() const; }; @@ -407,6 +418,19 @@ IndexSet::size () const } + +inline +void +IndexSet::compress () const +{ + if (is_compressed == true) + return; + + do_compress(); +} + + + inline void IndexSet::add_range (const unsigned int begin, @@ -588,17 +612,17 @@ IndexSet::index_within_set (const unsigned int n) const ExcMessage ("Given number is not an element of this set.")); Assert (n < size(), ExcIndexRange (n, 0, size())); -//TODO: this could be done more efficiently by using a binary search - for (std::vector::const_iterator p = ranges.begin(); - p != ranges.end(); ++p) - { - Assert (n >= p->begin, ExcInternalError()); - if (n < p->end) - return (n-p->begin) + p->nth_index_in_set; - } + Range r(n, n); - Assert (false, ExcInternalError()); - return numbers::invalid_unsigned_int; + std::vector::const_iterator p = std::lower_bound(ranges.begin(), + ranges.end(), + r, + Range::end_compare); + + Assert(p!=ranges.end(), ExcInternalError()); + Assert(p->begin<=n, ExcInternalError()); + Assert(nend, ExcInternalError()); + return (n-p->begin) + p->nth_index_in_set; } diff --git a/deal.II/base/source/index_set.cc b/deal.II/base/source/index_set.cc index db090467de..4523416f78 100644 --- a/deal.II/base/source/index_set.cc +++ b/deal.II/base/source/index_set.cc @@ -25,10 +25,8 @@ DEAL_II_NAMESPACE_OPEN void -IndexSet::compress () const +IndexSet::do_compress () const { - if (is_compressed == true) - return; // see if any of the // contiguous ranges can be // merged. since they are sorted by diff --git a/deal.II/lac/include/lac/constraint_matrix.h b/deal.II/lac/include/lac/constraint_matrix.h index 2f95b842ed..9d234d5400 100644 --- a/deal.II/lac/include/lac/constraint_matrix.h +++ b/deal.II/lac/include/lac/constraint_matrix.h @@ -10,6 +10,7 @@ // further information on this license. // //--------------------------------------------------------------------------- + #ifndef __deal2__constraint_matrix_h #define __deal2__constraint_matrix_h @@ -17,8 +18,8 @@ #include #include +#include #include -#include #include #include @@ -47,6 +48,11 @@ template class SparseMatrix; template class BlockSparseMatrix; class BlockIndices; +namespace internals +{ + struct GlobalRowsFromLocal; +} + /** * This class implements dealing with linear (possibly inhomogeneous) @@ -330,13 +336,23 @@ class ConstraintMatrix : public Subscriptor /** * Constructor */ - ConstraintMatrix (); + ConstraintMatrix (const IndexSet & local_constraints = IndexSet()); /** * Copy constructor */ ConstraintMatrix (const ConstraintMatrix &constraint_matrix); + /** + * Reinit the ConstraintMatrix + * object. This function is only relevant + * in the distributed case, to supply a + * different IndexSet. Otherwise this + * routine is equivalent to calling + * clear(). + */ + void reinit (const IndexSet & local_constraints = IndexSet()); + /** * @name Adding constraints * @{ @@ -1473,6 +1489,13 @@ class ConstraintMatrix : public Subscriptor */ struct ConstraintLine { + /** + * A data type in which we store the list + * of entries that make up the homogenous + * part of a constraint. + */ + typedef std::vector > Entries; + /** * Number of this line. Since only * very few lines are stored, we @@ -1492,7 +1515,7 @@ class ConstraintMatrix : public Subscriptor * applies as what is said for * ConstraintMatrix@p ::lines. */ - std::vector > entries; + Entries entries; /** * Value of the inhomogeneity. @@ -1552,21 +1575,23 @@ class ConstraintMatrix : public Subscriptor std::vector lines; /** - * A list of pointers that contains the - * address of the ConstraintLine of a - * constrained degree of freedom, or NULL - * if the degree of freedom is not - * constrained. The NULL return value - * returns thus whether there is a + * A list of unsigned integers that + * contains the position of the + * ConstraintLine of a constrained degree + * of freedom, or @p + * numbers::invalid_unsigned_int if the + * degree of freedom is not + * constrained. The @p invalid_unsigned + * int return value returns thus whether + * there is a constraint line for a given + * degree of freedom index. Note that + * this class has no notion of how many + * degrees of freedom there really are, + * so if we check whether there is a * constraint line for a given degree of - * freedom index. Note that this class - * has no notion of how many degrees of - * freedom there really are, so if we - * check whether there is a constraint - * line for a given degree of freedom, - * then this vector may actually be - * shorter than the index of the DoF we - * check for. + * freedom, then this vector may actually + * be shorter than the index of the DoF + * we check for. * * This field exists since when adding a * new constraint line we have to figure @@ -1606,7 +1631,17 @@ class ConstraintMatrix : public Subscriptor * contributions into vectors and * matrices. */ - std::vector lines_cache; + std::vector lines_cache; + + /** + * This IndexSet is used to limit the + * lines to save in the ContraintMatrix + * to a subset. This is necessary, + * because the lines_cache vector would + * become too big in a distributed + * calculation. + */ + IndexSet local_lines; /** * Store whether the arrays are sorted. @@ -1614,6 +1649,13 @@ class ConstraintMatrix : public Subscriptor */ bool sorted; + /** + * Internal function to calculate the + * index of line @p line in the vector + * lines_cache using local_lines. + */ + unsigned int calculate_line_index(unsigned int line) const; + /** * Return @p true if the weight of an * entry (the second element of the @@ -1684,6 +1726,53 @@ class ConstraintMatrix : public Subscriptor const Table<2,bool> &dof_mask, internal::bool2type) const; + /** + * Internal helper function for + * distribute_local_to_global function. + */ + template + void + make_sorted_dof_list (const FullMatrix &local_matrix, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + internals::GlobalRowsFromLocal &global_rows, + std::vector &constrained_lines) const; + + /** + * Internal helper function for + * add_entries_local_to_global function. + */ + template + void + make_sorted_dof_list (const std::vector &local_dof_indices, + const bool keep_constrained_entries, + SparsityType &sparsity_pattern, + std::vector &active_dofs) const; + + /** + * Internal helper function for + * add_entries_local_to_global function. + */ + template + void + make_sorted_dof_list (const Table<2,bool> &dof_mask, + const std::vector &local_dof_indices, + const bool keep_constrained_entries, + SparsityType &sparsity_pattern, + internals::GlobalRowsFromLocal &global_rows) const; + + /** + * Internal helper function for + * distribute_local_to_global function. + */ + double + resolve_vector_entry (const unsigned int i, + const internals::GlobalRowsFromLocal &global_rows, + const Vector &local_vector, + const std::vector &local_dof_indices, + const FullMatrix &local_matrix, + const std::vector &constrained_lines) const; + #ifdef DEAL_II_USE_TRILINOS //TODO: Make use of the following member thread safe /** @@ -1699,21 +1788,23 @@ class ConstraintMatrix : public Subscriptor /* ---------------- template and inline functions ----------------- */ inline -ConstraintMatrix::ConstraintMatrix () - : - lines (), - sorted (false) +ConstraintMatrix::ConstraintMatrix (const IndexSet &local_constraints) + : + lines (), + local_lines (local_constraints), + sorted (false) {} inline ConstraintMatrix::ConstraintMatrix (const ConstraintMatrix &constraint_matrix) - : - Subscriptor (), - lines (constraint_matrix.lines), - lines_cache (constraint_matrix.lines_cache), - sorted (constraint_matrix.sorted) + : + Subscriptor (), + lines (constraint_matrix.lines), + lines_cache (constraint_matrix.lines_cache), + local_lines (constraint_matrix.local_lines), + sorted (constraint_matrix.sorted) #ifdef DEAL_II_USE_TRILINOS ,vec_distribute () #endif @@ -1726,40 +1817,38 @@ ConstraintMatrix::add_line (const unsigned int line) { Assert (sorted==false, ExcMatrixIsClosed()); - + // the following can happen when we + // compute with distributed meshes + // and dof handlers and we + // constrain a degree of freedom + // whose number we don't have + // locally. if we don't abort here + // the program will try to allocate + // several terabytes of memory to + // resize the various arrays below + // :-) + Assert (line != numbers::invalid_unsigned_int, + ExcInternalError()); + const unsigned int line_index = calculate_line_index (line); // check whether line already exists; it // may, in which case we can just quit - if ((line < lines_cache.size()) - && - (lines_cache[line] != 0)) - { - Assert (lines_cache[line]->line == line, ExcInternalError()); - return; - } + if (is_constrained(line)) + return; // if necessary enlarge vector of // existing entries for cache - if (line >= lines_cache.size()) - lines_cache.resize (line+1,0); - // enlarge lines vector if necessary. - // need to reset the pointers to the - // ConstraintLine entries in that case, - // since new memory will be allocated - if (lines.size() == lines.capacity()) - { - lines.reserve (2*lines.size()+8); - std::vector::const_iterator it = lines.begin(); - for ( ; it != lines.end(); ++it) - lines_cache[it->line] = &*it; - } + if (line_index >= lines_cache.size()) + lines_cache.resize (std::max(2*static_cast(lines_cache.size()), + line_index+1), + numbers::invalid_unsigned_int); // push a new line to the end of the // list lines.push_back (ConstraintLine()); lines.back().line = line; lines.back().inhomogeneity = 0.; - lines_cache[line] = &lines.back(); + lines_cache[line_index] = lines.size()-1; } @@ -1782,10 +1871,11 @@ ConstraintMatrix::add_entry (const unsigned int line, // in any case: exit the function if an // entry for this column already exists, // since we don't want to enter it twice - ConstraintLine* line_ptr = const_cast(lines_cache[line]); - Assert (line_ptr != 0, ExcInternalError()); + Assert (lines_cache[calculate_line_index(line)] != numbers::invalid_unsigned_int, + ExcInternalError()); + ConstraintLine* line_ptr = &lines[lines_cache[calculate_line_index(line)]]; Assert (line_ptr->line == line, ExcInternalError()); - for (std::vector >::const_iterator + for (ConstraintLine::Entries::const_iterator p=line_ptr->entries.begin(); p != line_ptr->entries.end(); ++p) if (p->first == column) @@ -1805,7 +1895,7 @@ void ConstraintMatrix::set_inhomogeneity (const unsigned int line, const double value) { - ConstraintLine* line_ptr = const_cast(lines_cache[line]); + ConstraintLine* line_ptr = &lines[lines_cache[calculate_line_index(line)]]; line_ptr->inhomogeneity = value; } @@ -1815,9 +1905,10 @@ inline bool ConstraintMatrix::is_constrained (const unsigned int index) const { - return ((index < lines_cache.size()) + const unsigned int line_index = calculate_line_index(index); + return ((line_index < lines_cache.size()) && - (lines_cache[index] != 0)); + (lines_cache[line_index] != numbers::invalid_unsigned_int)); } @@ -1827,7 +1918,7 @@ bool ConstraintMatrix::is_inhomogeneously_constrained (const unsigned int index) const { return (is_constrained(index) && - lines_cache[index]->inhomogeneity != 0); + lines[lines_cache[calculate_line_index(index)]].inhomogeneity != 0); } @@ -1865,14 +1956,14 @@ void ConstraintMatrix:: global_vector(*local_indices_begin) += *local_vector_begin; else { - const ConstraintLine* position = - lines_cache[*local_indices_begin]; - for (unsigned int j=0; jentries.size(); ++j) + const ConstraintLine& position = + lines[lines_cache[calculate_line_index(*local_indices_begin)]]; + for (unsigned int j=0; jentries[j].first) == false, + Assert (is_constrained(position.entries[j].first) == false, ExcMessage ("Tried to distribute to a fixed dof.")); - global_vector(position->entries[j].first) - += *local_vector_begin * position->entries[j].second; + global_vector(position.entries[j].first) + += *local_vector_begin * position.entries[j].second; } } } @@ -1896,21 +1987,36 @@ void ConstraintMatrix::get_dof_values (const VectorType &global_vector, *local_vector_begin = global_vector(*local_indices_begin); else { - const ConstraintLine* position = - lines_cache[*local_indices_begin]; - typename VectorType::value_type value = position->inhomogeneity; - for (unsigned int j=0; jentries.size(); ++j) + const ConstraintLine & position = + lines[lines_cache[calculate_line_index(*local_indices_begin)]]; + typename VectorType::value_type value = position.inhomogeneity; + for (unsigned int j=0; jentries[j].first) == false, + Assert (is_constrained(position.entries[j].first) == false, ExcMessage ("Tried to distribute to a fixed dof.")); - value += (global_vector(position->entries[j].first) * - position->entries[j].second); + value += (global_vector(position.entries[j].first) * + position.entries[j].second); } *local_vector_begin = value; } } } + + +inline unsigned int +ConstraintMatrix::calculate_line_index(unsigned int line) const +{ + //IndexSet is unused (serial case) + if (!local_lines.size()) + return line; + + Assert(local_lines.is_element(line), ExcInternalError()); + + return local_lines.index_within_set(line); +} + + DEAL_II_NAMESPACE_CLOSE #endif diff --git a/deal.II/lac/include/lac/constraint_matrix.templates.h b/deal.II/lac/include/lac/constraint_matrix.templates.h index b5d535b48f..a9ea353b30 100644 --- a/deal.II/lac/include/lac/constraint_matrix.templates.h +++ b/deal.II/lac/include/lac/constraint_matrix.templates.h @@ -10,20 +10,17 @@ // further information on this license. // //--------------------------------------------------------------------------- -#ifndef __deal2__dof_constraints_templates_h -#define __deal2__dof_constraints_templates_h + +#ifndef __deal2__constraint_matrix_templates_h +#define __deal2__constraint_matrix_templates_h -#include #include -#include + +#include #include #include #include -#include -#include -#include -#include #include #include @@ -755,35 +752,31 @@ distribute_local_to_global (const Vector &local_vector, continue; } - const ConstraintLine * position = - lines_cache.size() <= local_dof_indices[i] ? 0 : - lines_cache[local_dof_indices[i]]; + const unsigned int line_index = calculate_line_index (local_dof_indices[i]); + const ConstraintLine * position = + lines_cache.size() <= line_index ? 0 : &lines[lines_cache[line_index]]; const double val = position->inhomogeneity; if (val != 0) for (unsigned int j=0; jentries.size(); ++q) - { - Assert (is_constrained(position_j->entries[q].first) == false, - ExcMessage ("Tried to distribute to a fixed dof.")); - global_vector(position_j->entries[q].first) - -= val * position_j->entries[q].second * matrix_entry; - } - } - } + const ConstraintLine & position_j = + lines[lines_cache[calculate_line_index(local_dof_indices[j])]]; + for (unsigned int q=0; q > *constraints; + mutable unsigned int constraint_position; }; inline - distributing::distributing (const unsigned int global_row, + Distributing::Distributing (const unsigned int global_row, const unsigned int local_row) : global_row (global_row), local_row (local_row), - constraints (0) {} + constraint_position (numbers::invalid_unsigned_int) {} inline - distributing::distributing (const distributing &in) : - constraints (0) + Distributing::Distributing (const Distributing &in) : + constraint_position (numbers::invalid_unsigned_int) {*this = (in);} inline - distributing::~distributing () - { - if (constraints != 0) - { - delete constraints; - constraints = 0; - } - } - - inline - distributing & distributing::operator = (const distributing &in) + Distributing & Distributing::operator = (const Distributing &in) { global_row = in.global_row; local_row = in.local_row; // the constraints pointer should not // contain any data here. - Assert (constraints == 0, ExcInternalError()); + Assert (constraint_position == numbers::invalid_unsigned_int, ExcInternalError()); - if (in.constraints != 0) + if (in.constraint_position != numbers::invalid_unsigned_int) { - constraints = in.constraints; - in.constraints = 0; + constraint_position = in.constraint_position; + in.constraint_position = numbers::invalid_unsigned_int; } return *this; } - inline - bool distributing::operator < (const distributing &in) const + + + // this is a cache for constraints. + // could use std::vector, but that + // needs a lot of memory allocations, + // is much more expensive + struct DataCache { - return global_row < in.global_row; - } + DataCache () : element_size (0), data (0) {}; + ~DataCache() { if (data != 0) {delete [] data;} }; + void reinit () { Assert (element_size == 0, ExcInternalError()); + element_size = 6; data = new std::pair [20*6]; + individual_size.resize(20), n_used_elements = 0;}; + unsigned int element_size; + std::pair * data; + std::vector individual_size; + unsigned int n_used_elements; + + unsigned int insert_new_index (const std::pair &pair) + { + if (element_size == 0) + reinit(); + if (n_used_elements == individual_size.size()) + { + std::pair * new_data = + new std::pair [2*individual_size.size()*element_size]; + memcpy (new_data, data, individual_size.size()*element_size* + sizeof(std::pair)); + delete [] data; + data = new_data; + individual_size.resize (2*individual_size.size(), 0); + } + unsigned int index = n_used_elements; + data[index*element_size] = pair; + individual_size[index] = 1; + ++n_used_elements; + return index; + } + + void append_index (const unsigned int index, + const std::pair &pair) + { + Assert (index < n_used_elements, ExcIndexRange (index, 0, n_used_elements)); + const unsigned int my_size = individual_size[index]; + if (my_size == element_size) + { + std::pair * new_data = + new std::pair [2*individual_size.size()*element_size]; + for (unsigned int i=0; i)); + delete [] data; + data = new_data; + element_size *= 2; + } + data[index*element_size+my_size] = pair; + individual_size[index]++; + }; + + unsigned int + get_size (const unsigned int index) const { return individual_size[index]; }; + + const std::pair * + get_entry (const unsigned int index) const { return &data[index*element_size]; }; + }; + + + + // collects all the global rows and + // their origin (direct/constraint) + // basically a vector of distributing + // and the data cache. with some + // specialized sort and insert functions. + struct GlobalRowsFromLocal + { + GlobalRowsFromLocal (const unsigned int local_dof_size) : + total_dof_indices (local_dof_size) {}; + void insert_index (const unsigned int global_row, + const unsigned int local_row, + const double constraint_value); + void sort (const unsigned int added_rows); + const unsigned int & n_additional_dofs (const unsigned int local_dofs); + unsigned int size () const { return total_dof_indices.size(); }; + unsigned int & global_row (const unsigned int loc_index) + { return total_dof_indices[loc_index].global_row; }; + unsigned int size (const unsigned int loc_index) const + { return (total_dof_indices[loc_index].constraint_position == + numbers::invalid_unsigned_int ? + 0 : + data_cache.get_size(total_dof_indices[loc_index].constraint_position)); }; + const unsigned int & global_row (const unsigned int loc_index) const + { return total_dof_indices[loc_index].global_row; }; + const unsigned int & local_row (const unsigned int loc_index) const + { return total_dof_indices[loc_index].local_row; }; + unsigned int & local_row (const unsigned int loc_index) + { return total_dof_indices[loc_index].local_row; }; + unsigned int local_row (const unsigned int loc_index, + const unsigned int index_in_constraint) const + { return (data_cache.get_entry(total_dof_indices[loc_index].constraint_position) + [index_in_constraint]).first; }; + double constraint_value (const unsigned int loc_index, + const unsigned int index_in_constraint) const + { return (data_cache.get_entry(total_dof_indices[loc_index].constraint_position) + [index_in_constraint]).second; }; + bool have_indirect_rows () const { return data_cache.element_size; } + + std::vector total_dof_indices; + DataCache data_cache; + }; // a function that appends an additional // row to the list of values, or appends // a value to an already existing // row. Similar functionality as for - // std::map, + // std::map, // but here done for a std::vector of - // data type distributing, and much + // data type Distributing, and much // faster. inline void - insert_index (std::vector &my_indices, - const unsigned int row, - const std::pair constraint) + GlobalRowsFromLocal::insert_index (const unsigned int global_row, + const unsigned int local_row, + const double constraint_value) { - typedef std::vector::iterator index_iterator; + typedef std::vector::iterator index_iterator; index_iterator pos, pos1; - distributing row_value (row); + Distributing row_value (global_row); + std::pair constraint (local_row, constraint_value); // check whether the list was really // sorted before entering here #ifdef DEBUG - for (unsigned int i=1; iglobal_row == row) + pos = std::lower_bound (total_dof_indices.begin(), + total_dof_indices.end(), + row_value); + if (pos->global_row == global_row) pos1 = pos; else - pos1 = my_indices.insert(pos, row_value); + pos1 = total_dof_indices.insert(pos, row_value); } - if (&*pos1->constraints == 0) - pos1->constraints = - new std::vector > (1,constraint); + if (pos1->constraint_position == numbers::invalid_unsigned_int) + pos1->constraint_position = data_cache.insert_new_index (constraint); else - pos1->constraints->push_back (constraint); + data_cache.append_index (pos1->constraint_position, constraint); } - + inline + void + GlobalRowsFromLocal::sort (const unsigned int added_rows) + { // this sort algorithm sorts a vector of - // distributing elements, but does not + // Distributing elements, but does not // take the constraints into // account. this means that in case that // constraints are already inserted, this - // function does not work as - // expected. shellsort is very fast in + // function does not work as expected. + // we use shellsort, which is very fast in // case the indices are already sorted // (which is the usual case with DG // elements), and not too slow in other // cases - inline - void - list_shellsort (std::vector &my_indices) - { unsigned int i, j, j2, temp, templ, istep; - unsigned step; + unsigned int step; - // in debug mode, check whether the + // check whether the // constraints are really empty. + total_dof_indices.resize(added_rows); #ifdef DEBUG - for (unsigned int i=0; i 0) { @@ -1104,19 +1198,19 @@ namespace internals istep = step; j = i; j2 = j-istep; - temp = my_indices[i].global_row; - templ = my_indices[i].local_row; - if (my_indices[j2].global_row > temp) + temp = total_dof_indices[i].global_row; + templ = total_dof_indices[i].local_row; + if (total_dof_indices[j2].global_row > temp) { - while ((j >= istep) && (my_indices[j2].global_row > temp)) + while ((j >= istep) && (total_dof_indices[j2].global_row > temp)) { - my_indices[j].global_row = my_indices[j2].global_row; - my_indices[j].local_row = my_indices[j2].local_row; + total_dof_indices[j].global_row = total_dof_indices[j2].global_row; + total_dof_indices[j].local_row = total_dof_indices[j2].local_row; j = j2; j2 -= istep; } - my_indices[j].global_row = temp; - my_indices[j].local_row = templ; + total_dof_indices[j].global_row = temp; + total_dof_indices[j].local_row = templ; } } step = step>>1; @@ -1130,6 +1224,45 @@ namespace internals template inline void + make_block_starts (const BlockType &block_object, + GlobalRowsFromLocal &global_rows, + std::vector &block_starts) + { + Assert (block_starts.size() == block_object.n_block_rows() + 1, + ExcDimensionMismatch(block_starts.size(), + block_object.n_block_rows()+1)); + + typedef std::vector::iterator row_iterator; + row_iterator block_indices = global_rows.total_dof_indices.begin(); + + const unsigned int num_blocks = block_object.n_block_rows(); + + // find end of rows. + block_starts[0] = 0; + for (unsigned int i=1;i + inline + void make_block_starts (const BlockType &block_object, std::vector &row_indices, std::vector &block_starts) @@ -1158,178 +1291,82 @@ namespace internals // transform row indices to local index // space for (unsigned int i=block_starts[1]; i -inline -void -ConstraintMatrix:: -distribute_local_to_global (const FullMatrix &local_matrix, - const Vector &local_vector, - const std::vector &local_dof_indices, - MatrixType &global_matrix, - VectorType &global_vector, - internal::bool2type) const -{ - // check whether we work on real vectors - // or we just used a dummy when calling - // the other function above. - const bool use_vectors = (local_vector.size() == 0 && - global_vector.size() == 0) ? false : true; - Assert (local_matrix.n() == local_dof_indices.size(), - ExcDimensionMismatch(local_matrix.n(), local_dof_indices.size())); - Assert (local_matrix.m() == local_dof_indices.size(), - ExcDimensionMismatch(local_matrix.m(), local_dof_indices.size())); - Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); - if (use_vectors == true) - { - Assert (local_matrix.m() == local_vector.size(), - ExcDimensionMismatch(local_matrix.m(), local_vector.size())); - Assert (global_matrix.m() == global_vector.size(), - ExcDimensionMismatch(global_matrix.m(), global_vector.size())); - } - Assert (sorted == true, ExcMatrixNotClosed()); - const unsigned int n_local_dofs = local_dof_indices.size(); + // resolves constraints of one column + // at the innermost loop. goes through + // the origin of each global entry and + // finds out which data we need to collect + inline + double resolve_matrix_entry (const GlobalRowsFromLocal&global_rows, + const unsigned int i, + const unsigned int j, + const unsigned int loc_row, + const FullMatrix &local_matrix, + const double* matrix_ptr) + { + const unsigned int loc_col = global_rows.local_row(j); + double col_val; - double average_diagonal = 0; - for (unsigned int i=0; i my_indices (n_local_dofs); - std::vector > constraint_lines; + // case 2: row has no direct contribution in + // local matrix + else + col_val = 0; - // cache whether we have to resolve any - // indirect rows generated from resolving - // constrained dofs. - bool have_indirect_rows = false; - { - unsigned int added_rows = 0; - // first add the indices in an unsorted - // way and only keep track of the - // constraints that appear. They are - // resolved in a second step. - for (unsigned int i = 0; i(i,lines_cache[local_dof_indices[i]])); - Assert (lines_cache[local_dof_indices[i]]->line == local_dof_indices[i], - ExcInternalError()); + double add_this = loc_col != numbers::invalid_unsigned_int ? + local_matrix(global_rows.local_row(i,q), loc_col) : 0; + + for (unsigned int p=0; pentries.size(); ++q) - { - have_indirect_rows = true; - internals::insert_index(my_indices, position->entries[q].first, - std::make_pair - (local_row, position->entries[q].second)); - } - - // to make sure that the global matrix - // remains invertible, we need to do - // something with the diagonal - // elements. add the absolute value of - // the local matrix, so the resulting - // entry will always be positive and - // furthermore be in the same order of - // magnitude as the other elements of the - // matrix - // - // note that this also captures the - // special case that a dof is both - // constrained and fixed (this can happen - // for hanging nodes in 3d that also - // happen to be on the boundary). in that - // case, following the above program - // flow, it is realized that when - // distributing the row and column no - // elements of the matrix are actually - // touched if all the degrees of freedom - // to which this dof is constrained are - // also constrained (the usual case with - // hanging nodes in 3d). however, in the - // line below, we do actually do - // something with this dof - const typename MatrixType::value_type new_diagonal - = (std::fabs(local_matrix(local_row,local_row)) != 0 ? - std::fabs(local_matrix(local_row,local_row)) : average_diagonal); - global_matrix.add(global_row, global_row, new_diagonal); - } - const unsigned int n_actual_dofs = my_indices.size(); - - // create arrays for the column data - // (indices and values) that will then be - // written into the matrix. - std::vector cols (n_actual_dofs); - std::vector vals (n_actual_dofs); - typedef std::vector > constraint_format; - // now do the actual job. - for (unsigned int i=0; i + inline + void + resolve_matrix_row (const GlobalRowsFromLocal&global_rows, + const unsigned int i, + const unsigned int column_start, + const unsigned int column_end, + const FullMatrix &local_matrix, + unsigned int * &col_ptr, + number * &val_ptr) + { + Assert (global_rows.size() >= column_end, + ExcIndexRange (column_end, 0, global_rows.size())); + const unsigned int loc_row = global_rows.local_row(i); // fast function if there are no indirect // references to any of the local rows at @@ -1337,127 +1374,688 @@ distribute_local_to_global (const FullMatrix &local_matrix, // of checks). the only check we actually // need to perform is whether the matrix // element is zero. - if (have_indirect_rows == false) - { - Assert(loc_row < n_local_dofs, ExcInternalError()); - const double * matrix_ptr = &local_matrix(loc_row, 0); - - for (unsigned int j=0; j < n_actual_dofs; ++j) - { - const unsigned int loc_col = my_indices[j].local_row; - Assert(loc_col < n_local_dofs, ExcInternalError()); - - const double col_val = matrix_ptr[loc_col]; - if (col_val != 0) - { - *val_ptr++ = static_cast - (col_val); - *col_ptr++ = my_indices[j].global_row; - } - } + if (global_rows.have_indirect_rows() == false) + { + Assert(loc_row < local_matrix.m(), ExcInternalError()); + const double * matrix_ptr = &local_matrix(loc_row, 0); - if (use_vectors == true) - { - val = local_vector(loc_row); - - // need to account for inhomogeneities - // here: thie corresponds to eliminating - // the respective column in the local - // matrix with value on the right hand - // side. - for (unsigned int i=0; iinhomogeneity * - matrix_ptr[constraint_lines[i].first]; - } - } + for (unsigned int j=column_start; j (col_val); + *col_ptr++ = global_rows.global_row(j); + } + } + } // more difficult part when there are // indirect references and when we need // to do some more checks. - else - { - const double * matrix_ptr = 0; - if (loc_row != deal_II_numbers::invalid_unsigned_int) - { - Assert (loc_row < n_local_dofs, ExcInternalError()); - matrix_ptr = &local_matrix(loc_row, 0); - } - for (unsigned int j=0; j < n_actual_dofs; ++j) - { - double col_val; - const unsigned int loc_col = my_indices[j].local_row; + else + { + const double * matrix_ptr = 0; + if (loc_row != numbers::invalid_unsigned_int) + { + Assert (loc_row < local_matrix.m(), ExcInternalError()); + matrix_ptr = &local_matrix(loc_row, 0); + } + for (unsigned int j=column_start; j (col_val); + *col_ptr++ = global_rows.global_row(j); + } + } + } + } - // account for indirect contributions by - // constraints in column - if (my_indices[j].constraints != 0) - { - constraint_format &constraint_j = *my_indices[j].constraints; - for (unsigned int p=0; p 0, ExcInternalError()); + // specialized function that can write + // into the row of a SparseMatrix + template + inline + void add_value (const double value, + const unsigned int row, + const unsigned int column, + const unsigned int * col_ptr, + const bool are_on_diagonal, + unsigned int &counter, + number *val_ptr) + { + if (value != 0.) + { + if (are_on_diagonal) + { + val_ptr[0] += value; + return; + } + while (col_ptr[counter] < column) + ++counter; + Assert (col_ptr[counter] == column, + typename SparseMatrix::ExcInvalidIndex(row, column)); + val_ptr[counter] += static_cast(value); + } + } - for (unsigned int q=0; q + inline + void + resolve_matrix_row (const GlobalRowsFromLocal&global_rows, + const unsigned int i, + const unsigned int column_start, + const unsigned int column_end, + const FullMatrix &local_matrix, + SparseMatrix *sparse_matrix) + { + Assert (global_rows.size() >= column_end, + ExcIndexRange (column_end, 0, global_rows.size())); + const unsigned int row = global_rows.global_row(i); + const unsigned int loc_row = global_rows.local_row(i); + const SparsityPattern & sparsity = sparse_matrix->get_sparsity_pattern(); + const std::size_t * row_start = sparsity.get_rowstart_indices(); + const unsigned int * sparsity_struct = sparsity.get_column_numbers(); + const unsigned int * col_ptr = &sparsity_struct[row_start[row]]; + number * val_ptr = &sparse_matrix->global_entry (row_start[row]); + const bool optimize_diagonal = sparsity.optimize_diagonal(); + unsigned int counter = optimize_diagonal; + + // distinguish three cases about what + // can happen (in order to avoid if() + // at the innermost loop position) + // for checking whether the diagonal is + // the first element of the row + if (!optimize_diagonal) // case 1: no diagonal optimization + { + if (global_rows.have_indirect_rows() == false) + { + Assert(loc_row < local_matrix.m(), + ExcIndexRange(loc_row, 0, local_matrix.m())); + const double * matrix_ptr = &local_matrix(loc_row, 0); + + for (unsigned int j=column_start; j=column_start && i &dof_mask, + std::vector::iterator &col_ptr) + { + const unsigned int loc_row = global_rows.local_row(i); + + // fast function if there are no indirect + // references to any of the local rows at + // all on this set of dofs + if (global_rows.have_indirect_rows() == false) + { + Assert(loc_row < dof_mask.n_rows(), + ExcInternalError()); + + for (unsigned int j=column_start; j - (col_val); - *col_ptr++ = my_indices[j].global_row; - } + add_this_index: + *col_ptr++ = global_rows.global_row(j); + } + } + } + + +} // end of namespace internals + + + + // Basic idea of setting up a list of + // all global dofs: first find all rows and columns + // that we are going to write touch, + // and then go through the + // lines and collect all the local rows that + // are related to it. +template +inline +void +ConstraintMatrix:: +make_sorted_dof_list (const FullMatrix &local_matrix, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + internals::GlobalRowsFromLocal &global_rows, + std::vector &constrained_lines) const +{ + const unsigned int n_local_dofs = local_dof_indices.size(); + + double average_diagonal = 0; + for (unsigned int i=0; i(n_local_dofs); + + // when distributing the local data to + // the global matrix, we can quite + // cheaply sort the indices (obviously, + // this introduces the need for + // allocating some memory on the way, but + // we need to do this only for rows, + // whereas the distribution process + // itself goes over rows and + // columns). This has the advantage that + // when writing into the global matrix, + // we can make use of the sortedness. + + // so the first step is to create a + // sorted list of all row values that are + // possible. these values are either the + // rows from unconstrained dofs, or some + // indices introduced by dofs constrained + // to a combination of some other + // dofs. regarding the data type, choose + // an STL vector of a pair of unsigned + // ints (for global columns) and internal + // data (containing local columns + + // possible jumps from + // constraints). Choosing an STL map or + // anything else M.K. knows of would be + // much more expensive here! + + // cache whether we have to resolve any + // indirect rows generated from resolving + // constrained dofs. + unsigned int added_rows = 0; + bool have_inhomogeneities = false; + + // first add the indices in an unsorted + // way and only keep track of the + // constraints that appear. They are + // resolved in a second step. + for (unsigned int i = 0; i +inline +void +ConstraintMatrix:: + make_sorted_dof_list (const std::vector &local_dof_indices, + const bool keep_constrained_entries, + SparsityType &sparsity_pattern, + std::vector &actual_dof_indices) const +{ + const unsigned int n_local_dofs = local_dof_indices.size(); + unsigned int added_rows = 0; + for (unsigned int i = 0; i0; --i) + { + const unsigned int local_row = actual_dof_indices.back(); + actual_dof_indices.pop_back(); + const unsigned int global_row = local_dof_indices[local_row]; + const ConstraintLine & position = + lines[lines_cache[calculate_line_index(global_row)]]; + for (unsigned int q=0; q::iterator it = + std::lower_bound(actual_dof_indices.begin(), + actual_dof_indices.end()-i+1, + new_index); + if (*it != new_index) + actual_dof_indices.insert(it, new_index); } + } + + if (keep_constrained_entries == true) + { + for (unsigned int j=0; j +inline +void +ConstraintMatrix:: + make_sorted_dof_list (const Table<2,bool> &dof_mask, + const std::vector &local_dof_indices, + const bool keep_constrained_entries, + SparsityType &sparsity_pattern, + internals::GlobalRowsFromLocal &global_rows) const +{ + // cache whether we have to resolve any + // indirect rows generated from resolving + // constrained dofs. + std::vector constrained_lines; + unsigned int added_rows = 0; + const unsigned int n_local_dofs = local_dof_indices.size(); + + for (unsigned int i = 0; ientries.size(); ++q) + global_rows.insert_index (position->entries[q].first, + local_row, + position->entries[q].second); + + // need to add the whole row and column + // structure in case we keep constrained + // entries. Unfortunately, we can't use + // the nice matrix structure we use + // elsewhere, so manually add those + // indices one by one. + if (keep_constrained_entries == true) + { + for (unsigned int j=0; j &local_vector, + const std::vector &local_dof_indices, + const FullMatrix &local_matrix, + const std::vector &constrained_lines) const +{ + // Resolve the constraints from the vector and + // apply inhomogeneities. + const unsigned int loc_row = global_rows.local_row(i); + const unsigned int n_inhomogeneous_dofs = constrained_lines.size(); + double val = 0; + if (loc_row != numbers::invalid_unsigned_int) + { + val = local_vector(loc_row); + for (unsigned int i=0; i +void +ConstraintMatrix:: +distribute_local_to_global (const FullMatrix &local_matrix, + const Vector &local_vector, + const std::vector &local_dof_indices, + MatrixType &global_matrix, + VectorType &global_vector, + internal::bool2type) const +{ + // check whether we work on real vectors + // or we just used a dummy when calling + // the other function above. + const bool use_vectors = (local_vector.size() == 0 && + global_vector.size() == 0) ? false : true; + typedef typename MatrixType::value_type number; + const bool use_dealii_matrix = + types_are_equal >::value; + + Assert (local_matrix.n() == local_dof_indices.size(), + ExcDimensionMismatch(local_matrix.n(), local_dof_indices.size())); + Assert (local_matrix.m() == local_dof_indices.size(), + ExcDimensionMismatch(local_matrix.m(), local_dof_indices.size())); + Assert (global_matrix.m() == global_matrix.n(), ExcNotQuadratic()); + if (use_vectors == true) + { + Assert (local_matrix.m() == local_vector.size(), + ExcDimensionMismatch(local_matrix.m(), local_vector.size())); + Assert (global_matrix.m() == global_vector.size(), + ExcDimensionMismatch(global_matrix.m(), global_vector.size())); + } + Assert (sorted == true, ExcMatrixNotClosed()); + + const unsigned int n_local_dofs = local_dof_indices.size(); + internals::GlobalRowsFromLocal global_rows (n_local_dofs); + std::vector constrained_lines; + + make_sorted_dof_list (local_matrix, local_dof_indices, global_matrix, + global_rows, constrained_lines); + + const unsigned int n_actual_dofs = global_rows.size(); + + // create arrays for the column data + // (indices and values) that will then be + // written into the matrix. Shortcut for + // deal.II sparse matrix + std::vector cols; + std::vector vals; + SparseMatrix * sparse_matrix + = dynamic_cast *>(&global_matrix); + if (use_dealii_matrix == false) + { + cols.resize (n_actual_dofs); + vals.resize (n_actual_dofs); + } + + // now do the actual job. + for (unsigned int i=0; i 0) + global_matrix.add(row, n_values, &cols[0], &vals[0], false, true); + } + else + resolve_matrix_row (global_rows, i, 0, n_actual_dofs, + local_matrix, sparse_matrix); // now to the vectors. besides doing the // same job as we did above (i.e., @@ -1467,54 +2065,23 @@ distribute_local_to_global (const FullMatrix &local_matrix, // corresponds to eliminating the // respective column in the local matrix // with value on the right hand side. - if (use_vectors == true) - { - if (loc_row != deal_II_numbers::invalid_unsigned_int) - { - Assert (loc_row < n_local_dofs, - ExcInternalError()); - val = local_vector(loc_row); - for (unsigned int i=0; iinhomogeneity * - matrix_ptr[constraint_lines[i].first]; - } - - if (my_indices[i].constraints != 0) - { - std::vector > &constraint_i = - *my_indices[i].constraints; - - for (unsigned int q=0; qinhomogeneity * - local_matrix(loc_row_q,constraint_lines[k].first); - val += add_this * constraint_i[q].second; - } - } - } + if (use_vectors == true) + { + const double val = resolve_vector_entry (i, global_rows, + local_vector, + local_dof_indices, + local_matrix, + constrained_lines); + + if (val != 0) + global_vector(row) += static_cast(val); } - - // finally, write all the information - // that accumulated under the given - // process into the global matrix row and - // into the vector - const unsigned int n_values = col_ptr - &cols[0]; - Assert (n_values == (unsigned int)(val_ptr - &vals[0]), - ExcInternalError()); - if (n_values > 0) - global_matrix.add(row, n_values, &cols[0], &vals[0], false, true); - if (val != 0) - global_vector(row) += static_cast(val); } } template -inline void ConstraintMatrix:: distribute_local_to_global (const FullMatrix &local_matrix, @@ -1531,6 +2098,9 @@ distribute_local_to_global (const FullMatrix &local_matrix, const bool use_vectors = (local_vector.size() == 0 && global_vector.size() == 0) ? false : true; + typedef typename MatrixType::value_type number; + const bool use_dealii_matrix = + types_are_equal >::value; Assert (local_matrix.n() == local_dof_indices.size(), ExcDimensionMismatch(local_matrix.n(), local_dof_indices.size())); @@ -1549,225 +2119,85 @@ distribute_local_to_global (const FullMatrix &local_matrix, Assert (sorted == true, ExcMatrixNotClosed()); const unsigned int n_local_dofs = local_dof_indices.size(); - const unsigned int num_blocks = global_matrix.n_block_rows(); + internals::GlobalRowsFromLocal global_rows (n_local_dofs); + std::vector constrained_lines; - double average_diagonal = 0; - for (unsigned int i=0; i my_indices (n_local_dofs); - std::vector > constraint_lines; - - bool have_indirect_rows = false; - { - unsigned int added_rows = 0; - for (unsigned int i = 0; i(i,lines_cache[local_dof_indices[i]])); - } - Assert (constraint_lines.size() + added_rows == n_local_dofs, - ExcInternalError()); - my_indices.resize (added_rows); - } - internals::list_shellsort (my_indices); - - const unsigned int n_constrained_dofs = constraint_lines.size(); - for (unsigned int i=0; i global_indices; + if (use_vectors == true) { - const unsigned int local_row = constraint_lines[i].first; - const unsigned int global_row = local_dof_indices[local_row]; - const ConstraintLine * position = constraint_lines[i].second; - for (unsigned int q=0; qentries.size(); ++q) - { - have_indirect_rows = true; - internals::insert_index(my_indices, position->entries[q].first, - std::make_pair - (local_row, position->entries[q].second)); - } - - const typename MatrixType::value_type new_diagonal - = (std::fabs(local_matrix(local_row,local_row)) != 0 ? - std::fabs(local_matrix(local_row,local_row)) : average_diagonal); - global_matrix.add(global_row, global_row, new_diagonal); + global_indices.resize(n_actual_dofs); + for (unsigned int i=0; i localized_indices (n_actual_dofs); - for (unsigned int i=0; i block_starts(num_blocks+1, n_actual_dofs); - internals::make_block_starts (global_matrix, localized_indices, block_starts); + internals::make_block_starts (global_matrix, global_rows, block_starts); - std::vector cols (n_actual_dofs); - std::vector vals (n_actual_dofs); - typedef std::vector > constraint_format; + std::vector cols; + std::vector vals; + if (use_dealii_matrix == false) + { + cols.resize (n_actual_dofs); + vals.resize (n_actual_dofs); + } // the basic difference to the // non-block variant from now onwards // is that we go through the blocks // of the matrix separately. for (unsigned int block=0; block (col_val); - *col_ptr++ = localized_indices[j]; - } - } - } - - else - { - const double * matrix_ptr = 0; - if (loc_row != deal_II_numbers::invalid_unsigned_int) - { - Assert (loc_row < n_local_dofs, ExcInternalError()); - matrix_ptr = &local_matrix(loc_row, 0); - } - for (unsigned int j=block_starts[block_col]; j < next_block_col; ++j) - { - double col_val; - const unsigned int loc_col = my_indices[j].local_row; - - if (loc_row != deal_II_numbers::invalid_unsigned_int) - { - col_val = loc_col != deal_II_numbers::invalid_unsigned_int ? - matrix_ptr[loc_col] : 0; - - // account for indirect contributions by - // constraints - if (my_indices[j].constraints != 0) - { - constraint_format &constraint_j = - *my_indices[j].constraints; - - for (unsigned int p=0; p(col_val); - } - } - } - - // finally, write all the information - // that accumulated under the given - // process into the global matrix row and - // into the vector. For the block matrix, - // go trough the individual blocks and - // look which entries we need to set. - const unsigned int n_values = col_ptr - &cols[0]; - Assert (n_values == (unsigned int)(val_ptr - &vals[0]), - ExcInternalError()); - if (n_values > 0) - global_matrix.block(block, block_col).add(row, n_values, - &cols[0], &vals[0], - false, true); - } + { + const unsigned int next_block = block_starts[block+1]; + for (unsigned int i=block_starts[block]; iinhomogeneity * - local_matrix(loc_row,constraint_lines[i].first); + if (n_values > 0) + global_matrix.block(block, block_col).add(row, n_values, + &cols[0], &vals[0], + false, true); } - - if (my_indices[i].constraints != 0) + else { - std::vector > &constraint_i = - *my_indices[i].constraints; - - for (unsigned int q=0; qinhomogeneity * - local_matrix(loc_row_q,constraint_lines[k].first); - val += add_this * constraint_i[q].second; - } + SparseMatrix * sparse_matrix + = dynamic_cast *>(&global_matrix.block(block, + block_col)); + Assert (sparse_matrix != 0, ExcInternalError()); + resolve_matrix_row (global_rows, i, start_block, + end_block, local_matrix, sparse_matrix); } + } + + if (use_vectors == true) + { + const double val = resolve_vector_entry (i, global_rows, + local_vector, + local_dof_indices, + local_matrix, + constrained_lines); + if (val != 0) - global_vector(my_indices[i].global_row) += + global_vector(global_indices[i]) += static_cast(val); } } @@ -1777,7 +2207,6 @@ distribute_local_to_global (const FullMatrix &local_matrix, template -inline void ConstraintMatrix:: add_entries_local_to_global (const std::vector &local_dof_indices, @@ -1807,71 +2236,14 @@ add_entries_local_to_global (const std::vector &local_dof_indices, if (dof_mask_is_active == false) { std::vector actual_dof_indices (n_local_dofs); - unsigned int added_rows = 0; - bool have_indirect_rows = false; - std::vector > constraint_lines; - for (unsigned int i = 0; i(i,lines_cache[local_dof_indices[i]])); - } - Assert (constraint_lines.size() + added_rows == n_local_dofs, - ExcInternalError()); - actual_dof_indices.resize (added_rows); - std::sort (actual_dof_indices.begin(), actual_dof_indices.end()); - - const unsigned int n_constrained_dofs = constraint_lines.size(); - for (unsigned int i=0; ientries.size(); ++q) - { - have_indirect_rows = true; - const unsigned int new_index = position->entries[q].first; - if (actual_dof_indices.back() < new_index) - { - actual_dof_indices.push_back(new_index); - } - else - { - std::vector::iterator it = - std::lower_bound(actual_dof_indices.begin(), - actual_dof_indices.end(), - new_index); - if (*it != new_index) - actual_dof_indices.insert(it, new_index); - } - } - - if (keep_constrained_entries == true) - { - for (unsigned int j=0; j &local_dof_indices, // gets similar to the function for // distributing matrix entries, see there // for additional comments. - std::vector my_indices (n_local_dofs); - std::vector > constraint_lines; - - // cache whether we have to resolve any - // indirect rows generated from resolving - // constrained dofs. - bool have_indirect_rows = false; - { - unsigned int added_rows = 0; - // first add the indices in an unsorted - // way and only keep track of the - // constraints that appear. They are - // resolved in a second step. - for (unsigned int i = 0; i(i,lines_cache[local_dof_indices[i]])); - } - Assert (constraint_lines.size() + added_rows == n_local_dofs, - ExcInternalError()); - my_indices.resize (added_rows); - } - internals::list_shellsort (my_indices); - - // now in the second step actually - // resolve the constraints - const unsigned int n_constrained_dofs = constraint_lines.size(); - for (unsigned int i=0; ientries.size(); ++q) - { - have_indirect_rows = true; - internals::insert_index(my_indices, position->entries[q].first, - std::make_pair - (local_row, position->entries[q].second)); - } - - // need to add the whole row and column - // structure in case we keep constrained - // entries. Unfortunately, we can't use - // the nice matrix structure we use - // elsewhere, so manually add those - // indices one by one. - if (keep_constrained_entries == true) - { - for (unsigned int j=0; j &local_dof_indices, for (unsigned int i=0; i::iterator col_ptr = cols.begin(); - const unsigned int row = my_indices[i].global_row; - const unsigned int loc_row = my_indices[i].local_row; - - // fast function if there are no indirect - // references to any of the local rows at - // all on this set of dofs - if (have_indirect_rows == false) - { - Assert(loc_row < n_local_dofs, - ExcInternalError()); - - for (unsigned int j=0; j < n_actual_dofs; ++j) - { - const unsigned int loc_col = my_indices[j].local_row; - Assert(loc_col < n_local_dofs, ExcInternalError()); - - if (dof_mask[loc_row][loc_col] == true) - *col_ptr++ = my_indices[j].global_row; - } - } - - // slower functions when there are - // indirect references and when we need - // to do some more checks. - else - { - for (unsigned int j=0; j < n_actual_dofs; ++j) - { - const unsigned int loc_col = my_indices[j].local_row; - - bool add_this = false; - - // case 1: row has direct contribution in - // local matrix - if (loc_row != deal_II_numbers::invalid_unsigned_int) - { - Assert (loc_row < n_local_dofs, ExcInternalError()); - - // case 1a: col has direct contribution - // in local matrix - if (loc_col != deal_II_numbers::invalid_unsigned_int) - { - Assert (loc_col < n_local_dofs, ExcInternalError()); - if (dof_mask[loc_row][loc_col] == true) - goto add_this_index; - } - - // account for indirect contributions by - // constraints - if (my_indices[j].constraints != 0) - { - std::vector > &constraint_j = - *my_indices[j].constraints; - - for (unsigned int p=0; p > &constraint_i = - *my_indices[i].constraints; - for (unsigned int q=0; q > &constraint_j = - *my_indices[j].constraints; - - for (unsigned int p=0; p &local_dof_indices, template -inline void ConstraintMatrix:: add_entries_local_to_global (const std::vector &local_dof_indices, @@ -2110,75 +2316,11 @@ add_entries_local_to_global (const std::vector &local_dof_indices, ExcDimensionMismatch(dof_mask.n_cols(), n_local_dofs)); } - // if the dof mask is not active, all we - // have to do is to add some indices in a - // matrix format. To do this, we first - // create an array of all the indices - // that are to be added. these indices - // are the local dof indices plus some - // indices that come from constraints. if (dof_mask_is_active == false) { std::vector actual_dof_indices (n_local_dofs); - unsigned int added_rows = 0; - bool have_indirect_rows = false; - std::vector > constraint_lines; - for (unsigned int i = 0; i(i,lines_cache[local_dof_indices[i]])); - } - Assert (constraint_lines.size() + added_rows == n_local_dofs, - ExcInternalError()); - actual_dof_indices.resize (added_rows); - std::sort (actual_dof_indices.begin(), actual_dof_indices.end()); - - const unsigned int n_constrained_dofs = constraint_lines.size(); - for (unsigned int i=0; ientries.size(); ++q) - { - have_indirect_rows = true; - const unsigned int new_index = position->entries[q].first; - if (actual_dof_indices.back() < new_index) - { - actual_dof_indices.push_back(new_index); - } - else - { - std::vector::iterator it = - std::lower_bound(actual_dof_indices.begin(), - actual_dof_indices.end(), - new_index); - if (*it != new_index) - actual_dof_indices.insert(it, new_index); - } - } - - if (keep_constrained_entries == true) - { - for (unsigned int j=0; j &local_dof_indices, internals::make_block_starts (sparsity_pattern, actual_dof_indices, block_starts); - // easy operation - just go trough the - // individual blocks and add the same - // array for each row for (unsigned int block=0; block &local_dof_indices, // difficult case with dof_mask, similar // to the distribute_local_to_global // function for block matrices - std::vector my_indices (n_local_dofs); - std::vector > constraint_lines; - - // cache whether we have to resolve any - // indirect rows generated from resolving - // constrained dofs. - bool have_indirect_rows = false; - { - unsigned int added_rows = 0; - // first add the indices in an unsorted - // way and only keep track of the - // constraints that appear. They are - // resolved in a second step. - for (unsigned int i = 0; i(i,lines_cache[local_dof_indices[i]])); - } - Assert (constraint_lines.size() + added_rows == n_local_dofs, - ExcInternalError()); - my_indices.resize (added_rows); - } - internals::list_shellsort (my_indices); - - // now in the second step actually - // resolve the constraints - const unsigned int n_constrained_dofs = constraint_lines.size(); - for (unsigned int i=0; ientries.size(); ++q) - { - have_indirect_rows = true; - internals::insert_index(my_indices, position->entries[q].first, - std::make_pair - (local_row, position->entries[q].second)); - } - - if (keep_constrained_entries == true) - { - for (unsigned int j=0; j localized_indices (n_actual_dofs); - for (unsigned int i=0; i block_starts(num_blocks+1, n_actual_dofs); - internals::make_block_starts(sparsity_pattern, localized_indices, + internals::make_block_starts(sparsity_pattern, global_rows, block_starts); std::vector cols (n_actual_dofs); @@ -2308,107 +2379,15 @@ add_entries_local_to_global (const std::vector &local_dof_indices, const unsigned int next_block = block_starts[block+1]; for (unsigned int i=block_starts[block]; i::iterator col_ptr = cols.begin(); - if (have_indirect_rows == false) - { - Assert(loc_row < n_local_dofs, - ExcInternalError()); - - for (unsigned int j=block_starts[block_col]; j < next_block_col; ++j) - { - const unsigned int loc_col = my_indices[j].local_row; - Assert(loc_col < n_local_dofs, - ExcInternalError()); - - if (dof_mask[loc_row][loc_col] == true) - *col_ptr++ = localized_indices[j]; - } - } - - // have indirect references by - // constraints, resolve them - else - { - for (unsigned int j=block_starts[block_col]; j < next_block_col; ++j) - { - const unsigned int loc_col = my_indices[j].local_row; - - bool add_this = false; - - if (loc_row != deal_II_numbers::invalid_unsigned_int) - { - Assert (loc_row < n_local_dofs, - ExcInternalError()); - - if (loc_col != deal_II_numbers::invalid_unsigned_int) - { - Assert (loc_col < n_local_dofs, - ExcInternalError()); - if (dof_mask[loc_row][loc_col] == true) - goto add_this_index; - } - - // account for indirect contributions by - // constraints - if (my_indices[j].constraints != 0) - { - std::vector > - &constraint_j = *my_indices[j].constraints; - - for (unsigned int p=0; p > - &constraint_i = *my_indices[i].constraints; - for (unsigned int q=0; q > - &constraint_j = *my_indices[j].constraints; - - for (unsigned int p=0; p #include -#include #include #include #include -#include #include -#include #include #include #include @@ -100,7 +97,7 @@ ConstraintMatrix::add_entries (const unsigned int line, Assert (sorted==false, ExcMatrixIsClosed()); Assert (is_constrained(line), ExcLineInexistant(line)); - ConstraintLine * line_ptr = const_cast(lines_cache[line]); + ConstraintLine * line_ptr = &lines[lines_cache[calculate_line_index(line)]]; Assert (line_ptr->line == line, ExcInternalError()); // if the loop didn't break, then @@ -156,18 +153,21 @@ void ConstraintMatrix::close () // modify the size any more after this // point. { - std::vector new_lines (lines_cache.size()); + std::vector new_lines (lines_cache.size(), + numbers::invalid_unsigned_int); + unsigned int counter = 0; for (std::vector::const_iterator line=lines.begin(); - line!=lines.end(); ++line) - new_lines[line->line] = &*line; + line!=lines.end(); ++line, ++counter) + new_lines[calculate_line_index(line->line)] = counter; std::swap (lines_cache, new_lines); } // in debug mode: check whether we really // set the pointers correctly. for (unsigned int i=0; iline, ExcInternalError()); + if (lines_cache[i] != numbers::invalid_unsigned_int) + Assert (i == calculate_line_index(lines[lines_cache[i]].line), + ExcInternalError()); // first, strip zero entries, as we // have to do that only once @@ -241,7 +241,8 @@ void ConstraintMatrix::close () Assert (dof_index != line->line, ExcMessage ("Cycle in constraints detected!")); - const ConstraintLine * constrained_line = lines_cache[dof_index]; + const ConstraintLine * constrained_line = + &lines[lines_cache[calculate_line_index(dof_index)]]; Assert (constrained_line->line == dof_index, ExcInternalError()); @@ -464,15 +465,15 @@ void ConstraintMatrix::close () // that is also constrained for (std::vector::const_iterator line=lines.begin(); line!=lines.end(); ++line) - for (std::vector >::const_iterator entry=line->entries.begin(); + for (std::vector >::const_iterator + entry=line->entries.begin(); entry!=line->entries.end(); ++entry) { // make sure that // entry->first is not the // index of a line itself - const ConstraintLine * it = entry->first < lines_cache.size() ? - lines_cache[entry->first] : 0; - Assert (it == 0, + const bool is_circle = is_constrained(entry->first); + Assert (is_circle == false, ExcDoFConstrainedToConstrainedDoF(line->line, entry->first)); }; #endif @@ -484,6 +485,10 @@ void ConstraintMatrix::close () void ConstraintMatrix::merge (const ConstraintMatrix &other_constraints) { + //TODO: this doesn't work with IndexSets yet. [TH] + AssertThrow(local_lines.size()==0, ExcNotImplemented()); + AssertThrow(other_constraints.local_lines.size()==0, ExcNotImplemented()); + // first check whether the // constraints in the two objects // are for different degrees of @@ -540,7 +545,8 @@ void ConstraintMatrix::merge (const ConstraintMatrix &other_constraints) sorted = false; if (other_constraints.lines_cache.size() > lines_cache.size()) - lines_cache.resize(other_constraints.lines_cache.size()); + lines_cache.resize(other_constraints.lines_cache.size(), + numbers::invalid_unsigned_int); // first action is to fold into the // present object possible @@ -694,9 +700,10 @@ void ConstraintMatrix::merge (const ConstraintMatrix &other_constraints) // afterwards as well. otherwise // leave everything in the unsorted // state + unsigned int counter = 0; for (std::vector::const_iterator line=lines.begin(); - line!=lines.end(); ++line) - lines_cache[line->line] = &*line; + line!=lines.end(); ++line, ++counter) + lines_cache[line->line] = counter; if (object_was_sorted == true) close (); } @@ -705,7 +712,11 @@ void ConstraintMatrix::merge (const ConstraintMatrix &other_constraints) void ConstraintMatrix::shift (const unsigned int offset) { - lines_cache.insert (lines_cache.begin(), offset, 0); + //TODO: this doesn't work with IndexSets yet. [TH] + AssertThrow(local_lines.size()==0, ExcNotImplemented()); + + lines_cache.insert (lines_cache.begin(), offset, + numbers::invalid_unsigned_int); for (std::vector::iterator i = lines.begin(); i != lines.end(); i++) @@ -728,7 +739,7 @@ void ConstraintMatrix::clear () } { - std::vector tmp; + std::vector tmp; lines_cache.swap (tmp); } @@ -744,6 +755,14 @@ void ConstraintMatrix::clear () +void ConstraintMatrix::reinit (const IndexSet & local_constraints) +{ + local_lines = local_constraints; + clear(); +} + + + void ConstraintMatrix::condense (const SparsityPattern &uncondensed, SparsityPattern &condensed) const { @@ -1864,12 +1883,11 @@ ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const { typedef std::vector::const_iterator constraint_iterator; ConstraintLine index_comparison; - std::pair local_range = vec.local_range(); - index_comparison.line = local_range.first; + index_comparison.line = vec.local_range().first; const constraint_iterator begin_my_constraints = std::lower_bound (lines.begin(),lines.end(),index_comparison); - index_comparison.line = local_range.second; + index_comparison.line = vec.local_range().second; const constraint_iterator end_my_constraints = std::lower_bound(lines.begin(),lines.end(),index_comparison); @@ -1881,41 +1899,44 @@ ConstraintMatrix::distribute (TrilinosWrappers::MPI::Vector &vec) const // with a vector for further use. if (!vec_distribute || vec_distribute->size()!=vec.size()) { - std::vector my_indices(vec.local_size()); - unsigned int index2 = 0; - - for(unsigned int i=local_range.first;i + local_range = vec.local_range(); + + my_indices.add_range (local_range.first, local_range.second); + + std::set individual_indices; + for (constraint_iterator it = begin_my_constraints; it != end_my_constraints; ++it) for (unsigned int i=0; ientries.size(); ++i) if ((it->entries[i].first < local_range.first) || (it->entries[i].first >= local_range.second)) - my_indices.push_back (it->entries[i].first); - - // sort and compress out duplicates - std::sort(my_indices.begin(),my_indices.end()); - index2 = 1; - for(unsigned int index1=1;index1entries[i].first); + + my_indices.add_indices (individual_indices.begin(), + individual_indices.end()); - Epetra_Map map_exchange - = Epetra_Map(-1,index2,(int*)&my_indices[0],0, - vec.trilinos_vector().Comm()); - vec_distribute.reset (new TrilinosWrappers::MPI::Vector(map_exchange)); +#ifdef DEAL_II_COMPILER_SUPPORTS_MPI + const Epetra_MpiComm *mpi_comm + = dynamic_cast(&vec.trilinos_vector().Comm()); + + Assert (mpi_comm != 0, ExcInternalError()); + + vec_distribute.reset (new TrilinosWrappers::MPI::Vector + (my_indices.make_trilinos_map (mpi_comm->Comm(), + true))); +#else + vec_distribute.reset (new TrilinosWrappers::MPI::Vector + (my_indices.make_trilinos_map (MPI_COMM_WORLD, + true))); +#endif } // here we import the data vec_distribute->reinit(vec,false,true); - for (constraint_iterator it = begin_my_constraints; + for (constraint_iterator it = begin_my_constraints; it != end_my_constraints; ++it) { // fill entry in line @@ -1944,14 +1965,14 @@ bool ConstraintMatrix::is_identity_constrained (const unsigned int index) const if (is_constrained(index) == false) return false; - const ConstraintLine * p = lines_cache[index]; - Assert (p->line == index, ExcInternalError()); + const ConstraintLine & p = lines[lines_cache[calculate_line_index(index)]]; + Assert (p.line == index, ExcInternalError()); // return if an entry for this // line was found and if it has // only one entry equal to 1.0 - return ((p->entries.size() == 1) && - (p->entries[0].second == 1.0)); + return ((p.entries.size() == 1) && + (p.entries[0].second == 1.0)); } -- 2.39.5