From: Wolfgang Bangerth Date: Fri, 22 May 2020 02:34:18 +0000 (-0600) Subject: Rename namespace internals:: to internal::AffineConstraints::. X-Git-Tag: v9.3.0-rc1~1592^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F10306%2Fhead;p=dealii.git Rename namespace internals:: to internal::AffineConstraints::. The AffineConstraints class and friends put things into a namespace 'internals'. That's an uncommon name: We typically use 'internal::'. But a lot of the things in that namespace also have rather generic names, so do as we often do in such cases: further qualify the name to indicate what class the internals correspond to. --- diff --git a/include/deal.II/lac/affine_constraints.h b/include/deal.II/lac/affine_constraints.h index 993f59b072..c65ae12fc1 100644 --- a/include/deal.II/lac/affine_constraints.h +++ b/include/deal.II/lac/affine_constraints.h @@ -48,11 +48,14 @@ class SparseMatrix; template class BlockSparseMatrix; -namespace internals +namespace internal { - template - class GlobalRowsFromLocal; -} + namespace AffineConstraints + { + template + class GlobalRowsFromLocal; + } +} // namespace internal namespace internal { @@ -1560,9 +1563,9 @@ private: * the global row indices. */ void - make_sorted_row_list( - const std::vector & local_dof_indices, - internals::GlobalRowsFromLocal &global_rows) const; + make_sorted_row_list(const std::vector &local_dof_indices, + internal::AffineConstraints::GlobalRowsFromLocal + &global_rows) const; /** * Internal helper function for add_entries_local_to_global function. @@ -1581,11 +1584,11 @@ private: template typename ProductType::type resolve_vector_entry( - const size_type i, - const internals::GlobalRowsFromLocal &global_rows, - const Vector & local_vector, - const std::vector & local_dof_indices, - const FullMatrix & local_matrix) const; + const size_type i, + const internal::AffineConstraints::GlobalRowsFromLocal &global_rows, + const Vector & local_vector, + const std::vector & local_dof_indices, + const FullMatrix &local_matrix) const; }; /* ---------------- template and inline functions ----------------- */ diff --git a/include/deal.II/lac/affine_constraints.templates.h b/include/deal.II/lac/affine_constraints.templates.h index 95b8188723..2c0da3a2e3 100644 --- a/include/deal.II/lac/affine_constraints.templates.h +++ b/include/deal.II/lac/affine_constraints.templates.h @@ -2325,1147 +2325,1161 @@ AffineConstraints::distribute(VectorType &vec) const } // Some helper definitions for the local_to_global functions. -namespace internals +namespace internal { - using size_type = types::global_dof_index; - - // this struct contains all the information we need to store about each of - // the global entries (global_row): are they obtained directly by some local - // entry (local_row) or some constraints (constraint_position). This is not - // directly used in the user code, but accessed via the GlobalRowsFromLocal. - // - // The actions performed here correspond to reshaping the constraint - // information from global degrees of freedom to local ones (i.e., - // cell-related DoFs), and also transforming the constraint information from - // compressed row storage (each local dof that is constrained has a list of - // constraint entries associated to it) into compressed column storage based - // on the cell-related DoFs (we have a list of global degrees of freedom, - // and to each we have a list of local rows where the entries come from). To - // increase the speed, we additionally store whether an entry is generated - // directly from the local degrees of freedom or whether it comes from a - // constraint. - struct Distributing - { - Distributing(const size_type global_row = numbers::invalid_size_type, - const size_type local_row = numbers::invalid_size_type); - Distributing(const Distributing &in); - Distributing & - operator=(const Distributing &in); - bool - operator<(const Distributing &in) const - { - return global_row < in.global_row; - } - - size_type global_row; - size_type local_row; - mutable size_type constraint_position; - }; - - inline Distributing::Distributing(const size_type global_row, - const size_type local_row) - : global_row(global_row) - , local_row(local_row) - , constraint_position(numbers::invalid_size_type) - {} - - inline Distributing::Distributing(const Distributing &in) - : constraint_position(numbers::invalid_size_type) - { - *this = (in); - } - - inline Distributing & - Distributing::operator=(const Distributing &in) + namespace AffineConstraints { - global_row = in.global_row; - local_row = in.local_row; - // the constraints pointer should not contain any data here. - Assert(constraint_position == numbers::invalid_size_type, - ExcInternalError()); + using size_type = types::global_dof_index; - if (in.constraint_position != numbers::invalid_size_type) + // this struct contains all the information we need to store about each of + // the global entries (global_row): are they obtained directly by some local + // entry (local_row) or some constraints (constraint_position). This is not + // directly used in the user code, but accessed via the GlobalRowsFromLocal. + // + // The actions performed here correspond to reshaping the constraint + // information from global degrees of freedom to local ones (i.e., + // cell-related DoFs), and also transforming the constraint information from + // compressed row storage (each local dof that is constrained has a list of + // constraint entries associated to it) into compressed column storage based + // on the cell-related DoFs (we have a list of global degrees of freedom, + // and to each we have a list of local rows where the entries come from). To + // increase the speed, we additionally store whether an entry is generated + // directly from the local degrees of freedom or whether it comes from a + // constraint. + struct Distributing + { + Distributing(const size_type global_row = numbers::invalid_size_type, + const size_type local_row = numbers::invalid_size_type); + Distributing(const Distributing &in); + Distributing & + operator=(const Distributing &in); + bool + operator<(const Distributing &in) const { - constraint_position = in.constraint_position; - in.constraint_position = numbers::invalid_size_type; + return global_row < in.global_row; } - return *this; - } - // this is a cache for constraints that are encountered on a local level. - // The functionality is similar to - // std::vector > >, but tuned so that - // frequent memory allocation for each entry is avoided. The data is put - // into a std::vector > and the row length is kept - // fixed at row_length. Both the number of rows and the row length can - // change is this structure is filled. In that case, the data is - // rearranged. This is not directly used in the user code, but accessed - // via the GlobalRowsFromLocal. - template - struct DataCache - { - DataCache() - : row_length(8) + size_type global_row; + size_type local_row; + mutable size_type constraint_position; + }; + + inline Distributing::Distributing(const size_type global_row, + const size_type local_row) + : global_row(global_row) + , local_row(local_row) + , constraint_position(numbers::invalid_size_type) {} - void - reinit() + inline Distributing::Distributing(const Distributing &in) + : constraint_position(numbers::invalid_size_type) { - individual_size.resize(0); - data.resize(0); + *this = (in); } - size_type - insert_new_index(const std::pair &pair) + inline Distributing & + Distributing::operator=(const Distributing &in) { - Assert(row_length > 0, ExcInternalError()); - const unsigned int index = individual_size.size(); - individual_size.push_back(1); - data.resize(individual_size.size() * row_length); - data[index * row_length] = pair; - individual_size[index] = 1; - return index; - } + global_row = in.global_row; + local_row = in.local_row; + // the constraints pointer should not contain any data here. + Assert(constraint_position == numbers::invalid_size_type, + ExcInternalError()); - void - append_index(const size_type index, - const std::pair &pair) - { - AssertIndexRange(index, individual_size.size()); - const size_type my_length = individual_size[index]; - if (my_length == row_length) + if (in.constraint_position != numbers::invalid_size_type) { - AssertDimension(data.size(), individual_size.size() * row_length); - // no space left in this row, need to double row_length and - // rearrange the data items. Move all items to the right except the - // first one, starting at the back. Since individual_size contains - // at least one element when we get here, subtracting 1 works fine. - data.resize(2 * data.size()); - for (size_type i = individual_size.size() - 1; i > 0; --i) - { - const auto ptr = data.data(); - std::move_backward(ptr + i * row_length, - ptr + i * row_length + individual_size[i], - ptr + i * 2 * row_length + individual_size[i]); - } - row_length *= 2; + constraint_position = in.constraint_position; + in.constraint_position = numbers::invalid_size_type; } - data[index * row_length + my_length] = pair; - individual_size[index] = my_length + 1; - } - - size_type - get_size(const size_type index) const - { - return individual_size[index]; + return *this; } - const std::pair * - get_entry(const size_type index) const - { - return &data[index * row_length]; - } - - size_type row_length; - - std::vector> data; - - std::vector individual_size; - }; - - // collects all the global rows from a local contribution (cell) and their - // origin (direct/constraint). this is basically a vector consisting of - // "Distributing" structs using access via the DataCache. Provides some - // specialized sort and insert functions. - // - // in case there are no constraints, this is basically a list of pairs - // with the first index being the global index and the second - // index the local index. The list is sorted with respect to the global - // index. - // - // in case there are constraints, a global dof might get a contribution also - // because it gets data from a constrained dof. This means that a global dof - // might also have indirect contributions from a local dof via a constraint, - // besides the direct ones. - // - // The actions performed here correspond to reshaping the constraint - // information from global degrees of freedom to local ones (i.e., - // cell-related DoFs), and also transforming the constraint information from - // compressed row storage (each local dof that is constrained has a list of - // constraint entries associated to it) into compressed column storage based - // on the cell-related DoFs (we have a list of global degrees of freedom, - // and to each we have a list of local rows where the entries come from). To - // increase the speed, we additionally store whether an entry is generated - // directly from the local degrees of freedom or whether it comes from a - // constraint. - template - class GlobalRowsFromLocal - { - public: - GlobalRowsFromLocal() - : n_active_rows(0) - , n_inhomogeneous_rows(0) - {} - - void - reinit(const size_type n_local_rows) - { - total_row_indices.resize(n_local_rows); - for (unsigned int i = 0; i < n_local_rows; ++i) - total_row_indices[i].constraint_position = numbers::invalid_size_type; - n_active_rows = n_local_rows; - n_inhomogeneous_rows = 0; - data_cache.reinit(); - } - - // implemented below - void - insert_index(const size_type global_row, - const size_type local_row, - const number constraint_value); - void - sort(); - - // Print object for debugging purpose - void - print(std::ostream &os) + // this is a cache for constraints that are encountered on a local level. + // The functionality is similar to + // std::vector > >, but tuned so that + // frequent memory allocation for each entry is avoided. The data is put + // into a std::vector > and the row length is kept + // fixed at row_length. Both the number of rows and the row length can + // change is this structure is filled. In that case, the data is + // rearranged. This is not directly used in the user code, but accessed + // via the GlobalRowsFromLocal. + template + struct DataCache { - os << "Active rows " << n_active_rows << std::endl - << "Constr rows " << n_constraints() << std::endl - << "Inhom rows " << n_inhomogeneous_rows << std::endl - << "Local: "; - for (const auto &total_row_index : total_row_indices) - os << ' ' << std::setw(4) << total_row_index.local_row; - os << std::endl << "Global:"; - for (const auto &total_row_index : total_row_indices) - os << ' ' << std::setw(4) << total_row_index.global_row; - os << std::endl << "ConPos:"; - for (const auto &total_row_index : total_row_indices) - os << ' ' << std::setw(4) << total_row_index.constraint_position; - os << std::endl; - } - - // return all kind of information on the constraints + DataCache() + : row_length(8) + {} - // returns the number of global indices in the struct - size_type - size() const - { - return n_active_rows; - } + void + reinit() + { + individual_size.resize(0); + data.resize(0); + } - // returns the number of constraints that are associated to the - // counter_index-th entry in the list - size_type - size(const size_type counter_index) const - { - return (total_row_indices[counter_index].constraint_position == - numbers::invalid_size_type ? - 0 : - data_cache.get_size( - total_row_indices[counter_index].constraint_position)); - } + size_type + insert_new_index(const std::pair &pair) + { + Assert(row_length > 0, ExcInternalError()); + const unsigned int index = individual_size.size(); + individual_size.push_back(1); + data.resize(individual_size.size() * row_length); + data[index * row_length] = pair; + individual_size[index] = 1; + return index; + } - // returns the global row of the counter_index-th entry in the list - size_type - global_row(const size_type counter_index) const - { - return total_row_indices[counter_index].global_row; - } + void + append_index(const size_type index, + const std::pair &pair) + { + AssertIndexRange(index, individual_size.size()); + const size_type my_length = individual_size[index]; + if (my_length == row_length) + { + AssertDimension(data.size(), individual_size.size() * row_length); + // no space left in this row, need to double row_length and + // rearrange the data items. Move all items to the right except the + // first one, starting at the back. Since individual_size contains + // at least one element when we get here, subtracting 1 works fine. + data.resize(2 * data.size()); + for (size_type i = individual_size.size() - 1; i > 0; --i) + { + const auto ptr = data.data(); + std::move_backward(ptr + i * row_length, + ptr + i * row_length + individual_size[i], + ptr + i * 2 * row_length + + individual_size[i]); + } + row_length *= 2; + } + data[index * row_length + my_length] = pair; + individual_size[index] = my_length + 1; + } - // returns the global row of the counter_index-th entry in the list - size_type & - global_row(const size_type counter_index) - { - return total_row_indices[counter_index].global_row; - } + size_type + get_size(const size_type index) const + { + return individual_size[index]; + } - // returns the local row in the cell matrix associated with the - // counter_index-th entry in the list. Returns invalid_size_type for - // constrained rows - size_type - local_row(const size_type counter_index) const - { - return total_row_indices[counter_index].local_row; - } + const std::pair * + get_entry(const size_type index) const + { + return &data[index * row_length]; + } - // writable index - size_type & - local_row(const size_type counter_index) - { - return total_row_indices[counter_index].local_row; - } + size_type row_length; - // returns the local row in the cell matrix associated with the - // counter_index-th entry in the list in the index_in_constraint-th - // position of constraints - size_type - local_row(const size_type counter_index, - const size_type index_in_constraint) const - { - return (data_cache.get_entry(total_row_indices[counter_index] - .constraint_position)[index_in_constraint]) - .first; - } + std::vector> data; - // returns the value of the constraint in the counter_index-th entry in - // the list in the index_in_constraint-th position of constraints - number - constraint_value(const size_type counter_index, - const size_type index_in_constraint) const - { - return (data_cache.get_entry(total_row_indices[counter_index] - .constraint_position)[index_in_constraint]) - .second; - } + std::vector individual_size; + }; - // returns whether there is one row with indirect contributions (i.e., - // there has been at least one constraint with non-trivial ConstraintLine) - bool - have_indirect_rows() const + // collects all the global rows from a local contribution (cell) and their + // origin (direct/constraint). this is basically a vector consisting of + // "Distributing" structs using access via the DataCache. Provides some + // specialized sort and insert functions. + // + // in case there are no constraints, this is basically a list of pairs + // with the first index being the global index and the second + // index the local index. The list is sorted with respect to the global + // index. + // + // in case there are constraints, a global dof might get a contribution also + // because it gets data from a constrained dof. This means that a global dof + // might also have indirect contributions from a local dof via a constraint, + // besides the direct ones. + // + // The actions performed here correspond to reshaping the constraint + // information from global degrees of freedom to local ones (i.e., + // cell-related DoFs), and also transforming the constraint information from + // compressed row storage (each local dof that is constrained has a list of + // constraint entries associated to it) into compressed column storage based + // on the cell-related DoFs (we have a list of global degrees of freedom, + // and to each we have a list of local rows where the entries come from). To + // increase the speed, we additionally store whether an entry is generated + // directly from the local degrees of freedom or whether it comes from a + // constraint. + template + class GlobalRowsFromLocal { - return data_cache.individual_size.empty() == false; - } + public: + GlobalRowsFromLocal() + : n_active_rows(0) + , n_inhomogeneous_rows(0) + {} - // append an entry that is constrained. This means that there is one less - // nontrivial row - void - insert_constraint(const size_type constrained_local_dof) - { - --n_active_rows; - total_row_indices[n_active_rows].local_row = constrained_local_dof; - total_row_indices[n_active_rows].global_row = numbers::invalid_size_type; - } + void + reinit(const size_type n_local_rows) + { + total_row_indices.resize(n_local_rows); + for (unsigned int i = 0; i < n_local_rows; ++i) + total_row_indices[i].constraint_position = numbers::invalid_size_type; + n_active_rows = n_local_rows; + n_inhomogeneous_rows = 0; + data_cache.reinit(); + } - // returns the number of constrained dofs in the structure. Constrained - // dofs do not contribute directly to the matrix, but are needed in order - // to set matrix diagonals and resolve inhomogeneities - size_type - n_constraints() const - { - return total_row_indices.size() - n_active_rows; - } + // implemented below + void + insert_index(const size_type global_row, + const size_type local_row, + const number constraint_value); + void + sort(); + + // Print object for debugging purpose + void + print(std::ostream &os) + { + os << "Active rows " << n_active_rows << std::endl + << "Constr rows " << n_constraints() << std::endl + << "Inhom rows " << n_inhomogeneous_rows << std::endl + << "Local: "; + for (const auto &total_row_index : total_row_indices) + os << ' ' << std::setw(4) << total_row_index.local_row; + os << std::endl << "Global:"; + for (const auto &total_row_index : total_row_indices) + os << ' ' << std::setw(4) << total_row_index.global_row; + os << std::endl << "ConPos:"; + for (const auto &total_row_index : total_row_indices) + os << ' ' << std::setw(4) << total_row_index.constraint_position; + os << std::endl; + } - // returns the number of constrained dofs in the structure that have an - // inhomogeneity - size_type - n_inhomogeneities() const - { - return n_inhomogeneous_rows; - } + // return all kind of information on the constraints - // tells the structure that the ith constraint is - // inhomogeneous. inhomogeneous constraints contribute to right hand - // sides, so to have fast access to them, put them before homogeneous - // constraints - void - set_ith_constraint_inhomogeneous(const size_type i) - { - Assert(i >= n_inhomogeneous_rows, ExcInternalError()); - std::swap(total_row_indices[n_active_rows + i], - total_row_indices[n_active_rows + n_inhomogeneous_rows]); - n_inhomogeneous_rows++; - } + // returns the number of global indices in the struct + size_type + size() const + { + return n_active_rows; + } - // the local row where constraint number i was detected, to find that row - // easily when the GlobalRowsToLocal has been set up - size_type - constraint_origin(size_type i) const - { - return total_row_indices[n_active_rows + i].local_row; - } + // returns the number of constraints that are associated to the + // counter_index-th entry in the list + size_type + size(const size_type counter_index) const + { + return (total_row_indices[counter_index].constraint_position == + numbers::invalid_size_type ? + 0 : + data_cache.get_size( + total_row_indices[counter_index].constraint_position)); + } - // a vector that contains all the global ids and the corresponding local - // ids as well as a pointer to that data where we store how to resolve - // constraints. - std::vector total_row_indices; + // returns the global row of the counter_index-th entry in the list + size_type + global_row(const size_type counter_index) const + { + return total_row_indices[counter_index].global_row; + } - private: - // holds the actual data from the constraints - DataCache data_cache; + // returns the global row of the counter_index-th entry in the list + size_type & + global_row(const size_type counter_index) + { + return total_row_indices[counter_index].global_row; + } - // how many rows there are, constraints disregarded - size_type n_active_rows; + // returns the local row in the cell matrix associated with the + // counter_index-th entry in the list. Returns invalid_size_type for + // constrained rows + size_type + local_row(const size_type counter_index) const + { + return total_row_indices[counter_index].local_row; + } - // the number of rows with inhomogeneous constraints - size_type n_inhomogeneous_rows; - }; + // writable index + size_type & + local_row(const size_type counter_index) + { + return total_row_indices[counter_index].local_row; + } - // a function that appends an additional row to the list of values, or - // appends a value to an already existing row. Similar functionality as for - // std::map, but here done for a - // std::vector, much faster for short lists as we have them - // here - template - inline void - GlobalRowsFromLocal::insert_index(const size_type global_row, - const size_type local_row, - const number constraint_value) - { - using index_iterator = std::vector::iterator; - index_iterator pos, pos1; - Distributing row_value(global_row); - std::pair constraint(local_row, constraint_value); - - // check whether the list was really sorted before entering here - for (size_type i = 1; i < n_active_rows; ++i) - Assert(total_row_indices[i - 1] < total_row_indices[i], - ExcInternalError()); + // returns the local row in the cell matrix associated with the + // counter_index-th entry in the list in the index_in_constraint-th + // position of constraints + size_type + local_row(const size_type counter_index, + const size_type index_in_constraint) const + { + return (data_cache.get_entry( + total_row_indices[counter_index] + .constraint_position)[index_in_constraint]) + .first; + } - pos = Utilities::lower_bound(total_row_indices.begin(), - total_row_indices.begin() + n_active_rows, - row_value); - if (pos->global_row == global_row) - pos1 = pos; - else + // returns the value of the constraint in the counter_index-th entry in + // the list in the index_in_constraint-th position of constraints + number + constraint_value(const size_type counter_index, + const size_type index_in_constraint) const { - pos1 = total_row_indices.insert(pos, row_value); - ++n_active_rows; + return (data_cache.get_entry( + total_row_indices[counter_index] + .constraint_position)[index_in_constraint]) + .second; } - if (pos1->constraint_position == numbers::invalid_size_type) - pos1->constraint_position = data_cache.insert_new_index(constraint); - else - data_cache.append_index(pos1->constraint_position, constraint); - } + // returns whether there is one row with indirect contributions (i.e., + // there has been at least one constraint with non-trivial ConstraintLine) + bool + have_indirect_rows() const + { + return data_cache.individual_size.empty() == false; + } - // this sort algorithm sorts std::vector, but does not take - // the constraints into account. this means that in case that constraints - // are already inserted, this function does not work as expected. Use - // shellsort, which is very fast in case the indices are already sorted - // (which is the usual case with DG elements), and not too slow in other - // cases - template - inline void - GlobalRowsFromLocal::sort() - { - size_type i, j, j2, temp, templ, istep; - size_type step; + // append an entry that is constrained. This means that there is one less + // nontrivial row + void + insert_constraint(const size_type constrained_local_dof) + { + --n_active_rows; + total_row_indices[n_active_rows].local_row = constrained_local_dof; + total_row_indices[n_active_rows].global_row = + numbers::invalid_size_type; + } - // check whether the constraints are really empty. - const size_type length = size(); + // returns the number of constrained dofs in the structure. Constrained + // dofs do not contribute directly to the matrix, but are needed in order + // to set matrix diagonals and resolve inhomogeneities + size_type + n_constraints() const + { + return total_row_indices.size() - n_active_rows; + } - // make sure that we are in the range of the vector - AssertIndexRange(length, total_row_indices.size() + 1); - for (size_type i = 0; i < length; ++i) - Assert(total_row_indices[i].constraint_position == - numbers::invalid_size_type, - ExcInternalError()); + // returns the number of constrained dofs in the structure that have an + // inhomogeneity + size_type + n_inhomogeneities() const + { + return n_inhomogeneous_rows; + } - step = length / 2; - while (step > 0) + // tells the structure that the ith constraint is + // inhomogeneous. inhomogeneous constraints contribute to right hand + // sides, so to have fast access to them, put them before homogeneous + // constraints + void + set_ith_constraint_inhomogeneous(const size_type i) { - for (i = step; i < length; i++) - { - istep = step; - j = i; - j2 = j - istep; - temp = total_row_indices[i].global_row; - templ = total_row_indices[i].local_row; - if (total_row_indices[j2].global_row > temp) - { - while ((j >= istep) && - (total_row_indices[j2].global_row > temp)) - { - total_row_indices[j].global_row = - total_row_indices[j2].global_row; - total_row_indices[j].local_row = - total_row_indices[j2].local_row; - j = j2; - j2 -= istep; - } - total_row_indices[j].global_row = temp; - total_row_indices[j].local_row = templ; - } - } - step = step >> 1; + Assert(i >= n_inhomogeneous_rows, ExcInternalError()); + std::swap(total_row_indices[n_active_rows + i], + total_row_indices[n_active_rows + n_inhomogeneous_rows]); + n_inhomogeneous_rows++; } - } - /** - * Scratch data that is used during calls to distribute_local_to_global and - * add_entries_local_to_global. In order to avoid frequent memory - * allocation, we keep the data alive from one call to the next in a static - * variable. Since we want to allow for different number types in matrices, - * this is a template. - * - * Since each thread gets its private version of scratch data out of the - * ThreadLocalStorage, no conflicting access can occur. For this to be - * valid, we need to make sure that no call within - * distribute_local_to_global is made that by itself can spawn tasks. - * Otherwise, we might end up in a situation where several threads fight for - * the data. - * - * Access to the scratch data is only through the accessor class which - * handles the access as well as marking the data as used. - */ - template - class AffineConstraintsData - { - public: - struct ScratchData - { - /** - * Constructor, does nothing. - */ - ScratchData() - : in_use(false) - {} + // the local row where constraint number i was detected, to find that row + // easily when the GlobalRowsToLocal has been set up + size_type + constraint_origin(size_type i) const + { + return total_row_indices[n_active_rows + i].local_row; + } - /** - * Copy constructor, does nothing - */ - ScratchData(const ScratchData &) - : in_use(false) - {} + // a vector that contains all the global ids and the corresponding local + // ids as well as a pointer to that data where we store how to resolve + // constraints. + std::vector total_row_indices; - /** - * Stores whether the data is currently in use. - */ - bool in_use; + private: + // holds the actual data from the constraints + DataCache data_cache; - /** - * Temporary array for column indices - */ - std::vector columns; + // how many rows there are, constraints disregarded + size_type n_active_rows; - /** - * Temporary array for column values - */ - std::vector values; + // the number of rows with inhomogeneous constraints + size_type n_inhomogeneous_rows; + }; - /** - * Temporary array for block start indices - */ - std::vector block_starts; + // a function that appends an additional row to the list of values, or + // appends a value to an already existing row. Similar functionality as for + // std::map, but here done for a + // std::vector, much faster for short lists as we have them + // here + template + inline void + GlobalRowsFromLocal::insert_index(const size_type global_row, + const size_type local_row, + const number constraint_value) + { + using index_iterator = std::vector::iterator; + index_iterator pos, pos1; + Distributing row_value(global_row); + std::pair constraint(local_row, constraint_value); + + // check whether the list was really sorted before entering here + for (size_type i = 1; i < n_active_rows; ++i) + Assert(total_row_indices[i - 1] < total_row_indices[i], + ExcInternalError()); - /** - * Temporary array for vector indices - */ - std::vector vector_indices; + pos = Utilities::lower_bound(total_row_indices.begin(), + total_row_indices.begin() + n_active_rows, + row_value); + if (pos->global_row == global_row) + pos1 = pos; + else + { + pos1 = total_row_indices.insert(pos, row_value); + ++n_active_rows; + } - /** - * Temporary array for vector values - */ - std::vector vector_values; + if (pos1->constraint_position == numbers::invalid_size_type) + pos1->constraint_position = data_cache.insert_new_index(constraint); + else + data_cache.append_index(pos1->constraint_position, constraint); + } - /** - * Data array for reorder row/column indices. - */ - GlobalRowsFromLocal global_rows; + // this sort algorithm sorts std::vector, but does not take + // the constraints into account. this means that in case that constraints + // are already inserted, this function does not work as expected. Use + // shellsort, which is very fast in case the indices are already sorted + // (which is the usual case with DG elements), and not too slow in other + // cases + template + inline void + GlobalRowsFromLocal::sort() + { + size_type i, j, j2, temp, templ, istep; + size_type step; - /** - * Data array for reorder row/column indices. - */ - GlobalRowsFromLocal global_columns; - }; + // check whether the constraints are really empty. + const size_type length = size(); + // make sure that we are in the range of the vector + AssertIndexRange(length, total_row_indices.size() + 1); + for (size_type i = 0; i < length; ++i) + Assert(total_row_indices[i].constraint_position == + numbers::invalid_size_type, + ExcInternalError()); + step = length / 2; + while (step > 0) + { + for (i = step; i < length; i++) + { + istep = step; + j = i; + j2 = j - istep; + temp = total_row_indices[i].global_row; + templ = total_row_indices[i].local_row; + if (total_row_indices[j2].global_row > temp) + { + while ((j >= istep) && + (total_row_indices[j2].global_row > temp)) + { + total_row_indices[j].global_row = + total_row_indices[j2].global_row; + total_row_indices[j].local_row = + total_row_indices[j2].local_row; + j = j2; + j2 -= istep; + } + total_row_indices[j].global_row = temp; + total_row_indices[j].local_row = templ; + } + } + step = step >> 1; + } + } /** - * Accessor class to guard access to scratch_data + * Scratch data that is used during calls to distribute_local_to_global and + * add_entries_local_to_global. In order to avoid frequent memory + * allocation, we keep the data alive from one call to the next in a static + * variable. Since we want to allow for different number types in matrices, + * this is a template. + * + * Since each thread gets its private version of scratch data out of the + * ThreadLocalStorage, no conflicting access can occur. For this to be + * valid, we need to make sure that no call within + * distribute_local_to_global is made that by itself can spawn tasks. + * Otherwise, we might end up in a situation where several threads fight for + * the data. + * + * Access to the scratch data is only through the accessor class which + * handles the access as well as marking the data as used. */ - class ScratchDataAccessor + template + class AffineConstraintsData { public: - /** - * Constructor. Grabs the scratch data object on the current thread and - * marks it as used. - */ - ScratchDataAccessor() - : my_scratch_data(&AffineConstraintsData::scratch_data.get()) + struct ScratchData { - Assert(my_scratch_data->in_use == false, - ExcMessage( - "Access to thread-local scratch data tried, but it is already " - "in use")); - my_scratch_data->in_use = true; - } + /** + * Constructor, does nothing. + */ + ScratchData() + : in_use(false) + {} + + /** + * Copy constructor, does nothing + */ + ScratchData(const ScratchData &) + : in_use(false) + {} + + /** + * Stores whether the data is currently in use. + */ + bool in_use; + + /** + * Temporary array for column indices + */ + std::vector columns; + + /** + * Temporary array for column values + */ + std::vector values; + + /** + * Temporary array for block start indices + */ + std::vector block_starts; + + /** + * Temporary array for vector indices + */ + std::vector vector_indices; + + /** + * Temporary array for vector values + */ + std::vector vector_values; + + /** + * Data array for reorder row/column indices. + */ + GlobalRowsFromLocal global_rows; + + /** + * Data array for reorder row/column indices. + */ + GlobalRowsFromLocal global_columns; + }; - /** - * Destructor. Mark scratch data as available again. - */ - ~ScratchDataAccessor() - { - my_scratch_data->in_use = false; - } - /** - * Dereferencing operator. - */ - ScratchData &operator*() - { - return *my_scratch_data; - } /** - * Dereferencing operator. + * Accessor class to guard access to scratch_data */ - ScratchData *operator->() + class ScratchDataAccessor { - return my_scratch_data; - } + public: + /** + * Constructor. Grabs the scratch data object on the current thread and + * marks it as used. + */ + ScratchDataAccessor() + : my_scratch_data(&AffineConstraintsData::scratch_data.get()) + { + Assert( + my_scratch_data->in_use == false, + ExcMessage( + "Access to thread-local scratch data tried, but it is already " + "in use")); + my_scratch_data->in_use = true; + } + + /** + * Destructor. Mark scratch data as available again. + */ + ~ScratchDataAccessor() + { + my_scratch_data->in_use = false; + } + + /** + * Dereferencing operator. + */ + ScratchData &operator*() + { + return *my_scratch_data; + } + + /** + * Dereferencing operator. + */ + ScratchData *operator->() + { + return my_scratch_data; + } + + private: + ScratchData *my_scratch_data; + }; private: - ScratchData *my_scratch_data; + /** + * The actual data object that contains a scratch data for each thread. + */ + static Threads::ThreadLocalStorage scratch_data; }; - private: - /** - * The actual data object that contains a scratch data for each thread. - */ - static Threads::ThreadLocalStorage scratch_data; - }; + // function for block matrices: Find out where in the list of local dofs + // (sorted according to global ids) the individual blocks start. + // Transform the global indices to block-local indices in order to be + // able to use functions like vector.block(1)(block_local_id), instead of + // vector(global_id). This avoids transforming indices one-by-one later + // on. + template + inline void + make_block_starts(const BlockType & block_object, + GlobalRowsFromLocal &global_rows, + std::vector & block_starts) + { + AssertDimension(block_starts.size(), block_object.n_block_rows() + 1); - // function for block matrices: Find out where in the list of local dofs - // (sorted according to global ids) the individual blocks start. - // Transform the global indices to block-local indices in order to be - // able to use functions like vector.block(1)(block_local_id), instead of - // vector(global_id). This avoids transforming indices one-by-one later - // on. - template - inline void - make_block_starts(const BlockType & block_object, - GlobalRowsFromLocal &global_rows, - std::vector & block_starts) - { - AssertDimension(block_starts.size(), block_object.n_block_rows() + 1); + using row_iterator = std::vector::iterator; + row_iterator block_indices = global_rows.total_row_indices.begin(); - using row_iterator = std::vector::iterator; - row_iterator block_indices = global_rows.total_row_indices.begin(); + const size_type num_blocks = block_object.n_block_rows(); + const size_type n_active_rows = global_rows.size(); - const size_type num_blocks = block_object.n_block_rows(); - const size_type n_active_rows = global_rows.size(); + // find end of rows. + block_starts[0] = 0; + for (size_type i = 1; i < num_blocks; ++i) + { + row_iterator first_block = Utilities::lower_bound( + block_indices, + global_rows.total_row_indices.begin() + n_active_rows, + Distributing(block_object.get_row_indices().block_start(i))); + block_starts[i] = first_block - global_rows.total_row_indices.begin(); + block_indices = first_block; + } + block_starts[num_blocks] = n_active_rows; + + // transform row indices to block-local index space + for (size_type i = block_starts[1]; i < n_active_rows; ++i) + global_rows.global_row(i) = + block_object.get_row_indices() + .global_to_local(global_rows.global_row(i)) + .second; + } - // find end of rows. - block_starts[0] = 0; - for (size_type i = 1; i < num_blocks; ++i) - { - row_iterator first_block = Utilities::lower_bound( - block_indices, - global_rows.total_row_indices.begin() + n_active_rows, - Distributing(block_object.get_row_indices().block_start(i))); - block_starts[i] = first_block - global_rows.total_row_indices.begin(); - block_indices = first_block; - } - block_starts[num_blocks] = n_active_rows; + // same as before, but for std::vector instead of + // GlobalRowsFromLocal. Used in functions for sparsity patterns. + template + inline void + make_block_starts(const BlockType & block_object, + std::vector &row_indices, + std::vector &block_starts) + { + AssertDimension(block_starts.size(), block_object.n_block_rows() + 1); - // transform row indices to block-local index space - for (size_type i = block_starts[1]; i < n_active_rows; ++i) - global_rows.global_row(i) = block_object.get_row_indices() - .global_to_local(global_rows.global_row(i)) - .second; - } + using row_iterator = std::vector::iterator; + row_iterator col_indices = row_indices.begin(); - // same as before, but for std::vector instead of - // GlobalRowsFromLocal. Used in functions for sparsity patterns. - template - inline void - make_block_starts(const BlockType & block_object, - std::vector &row_indices, - std::vector &block_starts) - { - AssertDimension(block_starts.size(), block_object.n_block_rows() + 1); + const size_type num_blocks = block_object.n_block_rows(); - using row_iterator = std::vector::iterator; - row_iterator col_indices = row_indices.begin(); + // find end of rows. + block_starts[0] = 0; + for (size_type i = 1; i < num_blocks; ++i) + { + row_iterator first_block = + Utilities::lower_bound(col_indices, + row_indices.end(), + block_object.get_row_indices().block_start( + i)); + block_starts[i] = first_block - row_indices.begin(); + col_indices = first_block; + } + block_starts[num_blocks] = row_indices.size(); - const size_type num_blocks = block_object.n_block_rows(); + // transform row indices to local index space + for (size_type i = block_starts[1]; i < row_indices.size(); ++i) + row_indices[i] = + block_object.get_row_indices().global_to_local(row_indices[i]).second; + } - // find end of rows. - block_starts[0] = 0; - for (size_type i = 1; i < num_blocks; ++i) - { - row_iterator first_block = - Utilities::lower_bound(col_indices, - row_indices.end(), - block_object.get_row_indices().block_start(i)); - block_starts[i] = first_block - row_indices.begin(); - col_indices = first_block; - } - block_starts[num_blocks] = row_indices.size(); + // resolves constraints of one column at the innermost loop. goes through + // the origin of each global entry and finds out which data we need to + // collect. + template + static inline number + resolve_matrix_entry(const GlobalRowsFromLocal &global_rows, + const GlobalRowsFromLocal &global_cols, + const size_type i, + const size_type j, + const size_type loc_row, + const FullMatrix & local_matrix) + { + const size_type loc_col = global_cols.local_row(j); + number col_val; - // transform row indices to local index space - for (size_type i = block_starts[1]; i < row_indices.size(); ++i) - row_indices[i] = - block_object.get_row_indices().global_to_local(row_indices[i]).second; - } + // case 1: row has direct contribution in local matrix. decide whether col + // has a direct contribution. if not, set the value to zero. + if (loc_row != numbers::invalid_size_type) + { + col_val = ((loc_col != numbers::invalid_size_type) ? + local_matrix(loc_row, loc_col) : + 0); + + // account for indirect contributions by constraints in column + for (size_type p = 0; p < global_cols.size(j); ++p) + col_val += (local_matrix(loc_row, global_cols.local_row(j, p)) * + global_cols.constraint_value(j, p)); + } - // resolves constraints of one column at the innermost loop. goes through - // the origin of each global entry and finds out which data we need to - // collect. - template - static inline number - resolve_matrix_entry(const GlobalRowsFromLocal &global_rows, - const GlobalRowsFromLocal &global_cols, - const size_type i, - const size_type j, - const size_type loc_row, - const FullMatrix & local_matrix) - { - const size_type loc_col = global_cols.local_row(j); - number col_val; + // case 2: row has no direct contribution in local matrix + else + col_val = 0; - // case 1: row has direct contribution in local matrix. decide whether col - // has a direct contribution. if not, set the value to zero. - if (loc_row != numbers::invalid_size_type) - { - col_val = ((loc_col != numbers::invalid_size_type) ? - local_matrix(loc_row, loc_col) : - 0); - - // account for indirect contributions by constraints in column - for (size_type p = 0; p < global_cols.size(j); ++p) - col_val += (local_matrix(loc_row, global_cols.local_row(j, p)) * - global_cols.constraint_value(j, p)); - } + // account for indirect contributions by constraints in row, going trough + // the direct and indirect references in the given column. + for (size_type q = 0; q < global_rows.size(i); ++q) + { + number add_this = + (loc_col != numbers::invalid_size_type) ? + local_matrix(global_rows.local_row(i, q), loc_col) : + 0; + + for (size_type p = 0; p < global_cols.size(j); ++p) + add_this += (local_matrix(global_rows.local_row(i, q), + global_cols.local_row(j, p)) * + global_cols.constraint_value(j, p)); + col_val += add_this * global_rows.constraint_value(i, q); + } + return col_val; + } - // case 2: row has no direct contribution in local matrix - else - col_val = 0; + // computes all entries that need to be written into global_rows[i]. Lists + // the resulting values in val_ptr, and the corresponding column indices in + // col_ptr. + template + inline void + resolve_matrix_row(const GlobalRowsFromLocal &global_rows, + const GlobalRowsFromLocal &global_cols, + const size_type i, + const size_type column_start, + const size_type column_end, + const FullMatrix & local_matrix, + size_type *& col_ptr, + number *& val_ptr) + { + if (column_end == column_start) + return; - // account for indirect contributions by constraints in row, going trough - // the direct and indirect references in the given column. - for (size_type q = 0; q < global_rows.size(i); ++q) - { - number add_this = (loc_col != numbers::invalid_size_type) ? - local_matrix(global_rows.local_row(i, q), loc_col) : - 0; - - for (size_type p = 0; p < global_cols.size(j); ++p) - add_this += (local_matrix(global_rows.local_row(i, q), - global_cols.local_row(j, p)) * - global_cols.constraint_value(j, p)); - col_val += add_this * global_rows.constraint_value(i, q); - } - return col_val; - } + AssertIndexRange(column_end - 1, global_cols.size()); + const size_type loc_row = global_rows.local_row(i); - // computes all entries that need to be written into global_rows[i]. Lists - // the resulting values in val_ptr, and the corresponding column indices in - // col_ptr. - template - inline void - resolve_matrix_row(const GlobalRowsFromLocal &global_rows, - const GlobalRowsFromLocal &global_cols, - const size_type i, - const size_type column_start, - const size_type column_end, - const FullMatrix & local_matrix, - size_type *& col_ptr, - number *& val_ptr) - { - if (column_end == column_start) - return; + // fast function if there are no indirect references to any of the local + // rows at all on this set of dofs (saves a lot of checks). the only check + // we actually need to perform is whether the matrix element is zero. + if (global_rows.have_indirect_rows() == false && + global_cols.have_indirect_rows() == false) + { + AssertIndexRange(loc_row, local_matrix.m()); + const number *matrix_ptr = &local_matrix(loc_row, 0); - AssertIndexRange(column_end - 1, global_cols.size()); - const size_type loc_row = global_rows.local_row(i); + for (size_type j = column_start; j < column_end; ++j) + { + const size_type loc_col = global_cols.local_row(j); + AssertIndexRange(loc_col, local_matrix.n()); + const number col_val = matrix_ptr[loc_col]; + if (col_val != number()) + { + *val_ptr++ = static_cast(col_val); + *col_ptr++ = global_cols.global_row(j); + } + } + } - // fast function if there are no indirect references to any of the local - // rows at all on this set of dofs (saves a lot of checks). the only check - // we actually need to perform is whether the matrix element is zero. - if (global_rows.have_indirect_rows() == false && - global_cols.have_indirect_rows() == false) - { - AssertIndexRange(loc_row, local_matrix.m()); - const number *matrix_ptr = &local_matrix(loc_row, 0); + // more difficult part when there are indirect references and when we need + // to do some more checks. + else + { + for (size_type j = column_start; j < column_end; ++j) + { + number col_val = resolve_matrix_entry( + global_rows, global_cols, i, j, loc_row, local_matrix); - for (size_type j = column_start; j < column_end; ++j) - { - const size_type loc_col = global_cols.local_row(j); - AssertIndexRange(loc_col, local_matrix.n()); - const number col_val = matrix_ptr[loc_col]; - if (col_val != number()) - { - *val_ptr++ = static_cast(col_val); - *col_ptr++ = global_cols.global_row(j); - } - } - } + // if we got some nontrivial value, append it to the array of + // values. + if (col_val != number()) + { + *val_ptr++ = static_cast(col_val); + *col_ptr++ = global_cols.global_row(j); + } + } + } + } - // more difficult part when there are indirect references and when we need - // to do some more checks. - else + // specialized function that can write into the row of a + // SparseMatrix. + namespace dealiiSparseMatrix + { + template + static inline void + add_value(const LocalType value, + const size_type row, + const size_type column, + SparseMatrixIterator &matrix_values) { - for (size_type j = column_start; j < column_end; ++j) + (void)row; + if (value != LocalType()) { - number col_val = resolve_matrix_entry( - global_rows, global_cols, i, j, loc_row, local_matrix); - - // if we got some nontrivial value, append it to the array of - // values. - if (col_val != number()) - { - *val_ptr++ = static_cast(col_val); - *col_ptr++ = global_cols.global_row(j); - } + while (matrix_values->column() < column) + ++matrix_values; + Assert(matrix_values->column() == column, + typename SparseMatrix< + typename SparseMatrixIterator::MatrixType::value_type>:: + ExcInvalidIndex(row, column)); + matrix_values->value() += value; } } - } + } // namespace dealiiSparseMatrix - // specialized function that can write into the row of a - // SparseMatrix. - namespace dealiiSparseMatrix - { - template - static inline void - add_value(const LocalType value, - const size_type row, - const size_type column, - SparseMatrixIterator &matrix_values) + // similar as before, now with shortcut for deal.II sparse matrices. this + // lets us avoid using extra arrays, and does all the operations just in + // place, i.e., in the respective matrix row + template + inline void + resolve_matrix_row(const GlobalRowsFromLocal &global_rows, + const size_type i, + const size_type column_start, + const size_type column_end, + const FullMatrix & local_matrix, + SparseMatrix * sparse_matrix) { - (void)row; - if (value != LocalType()) - { - while (matrix_values->column() < column) - ++matrix_values; - Assert(matrix_values->column() == column, - typename SparseMatrix< - typename SparseMatrixIterator::MatrixType::value_type>:: - ExcInvalidIndex(row, column)); - matrix_values->value() += value; - } - } - } // namespace dealiiSparseMatrix + if (column_end == column_start) + return; - // similar as before, now with shortcut for deal.II sparse matrices. this - // lets us avoid using extra arrays, and does all the operations just in - // place, i.e., in the respective matrix row - template - inline void - resolve_matrix_row(const GlobalRowsFromLocal &global_rows, - const size_type i, - const size_type column_start, - const size_type column_end, - const FullMatrix & local_matrix, - SparseMatrix * sparse_matrix) - { - if (column_end == column_start) - return; + AssertIndexRange(column_end - 1, global_rows.size()); + const SparsityPattern &sparsity = sparse_matrix->get_sparsity_pattern(); - AssertIndexRange(column_end - 1, global_rows.size()); - const SparsityPattern &sparsity = sparse_matrix->get_sparsity_pattern(); + if (sparsity.n_nonzero_elements() == 0) + return; - if (sparsity.n_nonzero_elements() == 0) - return; + const size_type row = global_rows.global_row(i); + const size_type loc_row = global_rows.local_row(i); - const size_type row = global_rows.global_row(i); - const size_type loc_row = global_rows.local_row(i); + typename SparseMatrix::iterator matrix_values = + sparse_matrix->begin(row); + const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols(); - typename SparseMatrix::iterator matrix_values = - sparse_matrix->begin(row); - const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols(); + // distinguish three cases about what can happen for checking whether the + // diagonal is the first element of the row. this avoids if statements at + // the innermost loop positions - // distinguish three cases about what can happen for checking whether the - // diagonal is the first element of the row. this avoids if statements at - // the innermost loop positions + if (!optimize_diagonal) // case 1: no diagonal optimization in matrix + { + if (global_rows.have_indirect_rows() == false) + { + AssertIndexRange(loc_row, local_matrix.m()); + const number *matrix_ptr = &local_matrix(loc_row, 0); - if (!optimize_diagonal) // case 1: no diagonal optimization in matrix - { - if (global_rows.have_indirect_rows() == false) - { - AssertIndexRange(loc_row, local_matrix.m()); - const number *matrix_ptr = &local_matrix(loc_row, 0); + for (size_type j = column_start; j < column_end; ++j) + { + const size_type loc_col = global_rows.local_row(j); + const number col_val = matrix_ptr[loc_col]; + dealiiSparseMatrix::add_value(col_val, + row, + global_rows.global_row(j), + matrix_values); + } + } + else + { + for (size_type j = column_start; j < column_end; ++j) + { + number col_val = resolve_matrix_entry( + global_rows, global_rows, i, j, loc_row, local_matrix); + dealiiSparseMatrix::add_value(col_val, + row, + global_rows.global_row(j), + matrix_values); + } + } + } + else if (i >= column_start && i < column_end) // case 2: can split loop + { + ++matrix_values; // jump over diagonal element + if (global_rows.have_indirect_rows() == false) + { + AssertIndexRange(loc_row, local_matrix.m()); + const number *matrix_ptr = &local_matrix(loc_row, 0); - for (size_type j = column_start; j < column_end; ++j) - { - const size_type loc_col = global_rows.local_row(j); - const number col_val = matrix_ptr[loc_col]; - dealiiSparseMatrix::add_value(col_val, - row, - global_rows.global_row(j), - matrix_values); - } - } - else - { - for (size_type j = column_start; j < column_end; ++j) - { - number col_val = resolve_matrix_entry( - global_rows, global_rows, i, j, loc_row, local_matrix); - dealiiSparseMatrix::add_value(col_val, - row, - global_rows.global_row(j), - matrix_values); - } - } - } - else if (i >= column_start && i < column_end) // case 2: can split loop - { - ++matrix_values; // jump over diagonal element - if (global_rows.have_indirect_rows() == false) - { - AssertIndexRange(loc_row, local_matrix.m()); - const number *matrix_ptr = &local_matrix(loc_row, 0); + sparse_matrix->begin(row)->value() += matrix_ptr[loc_row]; + for (size_type j = column_start; j < i; ++j) + { + const size_type loc_col = global_rows.local_row(j); + const number col_val = matrix_ptr[loc_col]; + dealiiSparseMatrix::add_value(col_val, + row, + global_rows.global_row(j), + matrix_values); + } + for (size_type j = i + 1; j < column_end; ++j) + { + const size_type loc_col = global_rows.local_row(j); + const number col_val = matrix_ptr[loc_col]; + dealiiSparseMatrix::add_value(col_val, + row, + global_rows.global_row(j), + matrix_values); + } + } + else + { + sparse_matrix->begin(row)->value() += resolve_matrix_entry( + global_rows, global_rows, i, i, loc_row, local_matrix); + for (size_type j = column_start; j < i; ++j) + { + number col_val = resolve_matrix_entry( + global_rows, global_rows, i, j, loc_row, local_matrix); + dealiiSparseMatrix::add_value(col_val, + row, + global_rows.global_row(j), + matrix_values); + } + for (size_type j = i + 1; j < column_end; ++j) + { + number col_val = resolve_matrix_entry( + global_rows, global_rows, i, j, loc_row, local_matrix); + dealiiSparseMatrix::add_value(col_val, + row, + global_rows.global_row(j), + matrix_values); + } + } + } + // case 3: can't say - need to check inside the loop + else if (global_rows.have_indirect_rows() == false) + { + ++matrix_values; // jump over diagonal element + AssertIndexRange(loc_row, local_matrix.m()); + const number *matrix_ptr = &local_matrix(loc_row, 0); - sparse_matrix->begin(row)->value() += matrix_ptr[loc_row]; - for (size_type j = column_start; j < i; ++j) - { - const size_type loc_col = global_rows.local_row(j); - const number col_val = matrix_ptr[loc_col]; - dealiiSparseMatrix::add_value(col_val, - row, - global_rows.global_row(j), - matrix_values); - } - for (size_type j = i + 1; j < column_end; ++j) - { - const size_type loc_col = global_rows.local_row(j); - const number col_val = matrix_ptr[loc_col]; - dealiiSparseMatrix::add_value(col_val, - row, - global_rows.global_row(j), - matrix_values); - } - } - else - { - sparse_matrix->begin(row)->value() += resolve_matrix_entry( - global_rows, global_rows, i, i, loc_row, local_matrix); - for (size_type j = column_start; j < i; ++j) - { - number col_val = resolve_matrix_entry( - global_rows, global_rows, i, j, loc_row, local_matrix); + for (size_type j = column_start; j < column_end; ++j) + { + const size_type loc_col = global_rows.local_row(j); + const number col_val = matrix_ptr[loc_col]; + if (row == global_rows.global_row(j)) + sparse_matrix->begin(row)->value() += col_val; + else dealiiSparseMatrix::add_value(col_val, row, global_rows.global_row(j), matrix_values); - } - for (size_type j = i + 1; j < column_end; ++j) - { - number col_val = resolve_matrix_entry( - global_rows, global_rows, i, j, loc_row, local_matrix); + } + } + else + { + ++matrix_values; // jump over diagonal element + for (size_type j = column_start; j < column_end; ++j) + { + number col_val = resolve_matrix_entry( + global_rows, global_rows, i, j, loc_row, local_matrix); + if (row == global_rows.global_row(j)) + sparse_matrix->begin(row)->value() += col_val; + else dealiiSparseMatrix::add_value(col_val, row, global_rows.global_row(j), matrix_values); - } - } - } - // case 3: can't say - need to check inside the loop - else if (global_rows.have_indirect_rows() == false) - { - ++matrix_values; // jump over diagonal element - AssertIndexRange(loc_row, local_matrix.m()); - const number *matrix_ptr = &local_matrix(loc_row, 0); + } + } + } - for (size_type j = column_start; j < column_end; ++j) - { - const size_type loc_col = global_rows.local_row(j); - const number col_val = matrix_ptr[loc_col]; - if (row == global_rows.global_row(j)) - sparse_matrix->begin(row)->value() += col_val; - else - dealiiSparseMatrix::add_value(col_val, - row, - global_rows.global_row(j), - matrix_values); - } - } - else - { - ++matrix_values; // jump over diagonal element - for (size_type j = column_start; j < column_end; ++j) - { - number col_val = resolve_matrix_entry( - global_rows, global_rows, i, j, loc_row, local_matrix); - if (row == global_rows.global_row(j)) - sparse_matrix->begin(row)->value() += col_val; - else - dealiiSparseMatrix::add_value(col_val, - row, - global_rows.global_row(j), - matrix_values); - } - } - } + // Same function to resolve all entries that will be added to the given + // global row global_rows[i] as before, now for sparsity pattern + template + inline void + resolve_matrix_row(const GlobalRowsFromLocal &global_rows, + const size_type i, + const size_type column_start, + const size_type column_end, + const Table<2, bool> & dof_mask, + std::vector::iterator & col_ptr) + { + if (column_end == column_start) + return; - // Same function to resolve all entries that will be added to the given - // global row global_rows[i] as before, now for sparsity pattern - template - inline void - resolve_matrix_row(const GlobalRowsFromLocal &global_rows, - const size_type i, - const size_type column_start, - const size_type column_end, - const Table<2, bool> & dof_mask, - std::vector::iterator & col_ptr) - { - if (column_end == column_start) - return; + const size_type loc_row = global_rows.local_row(i); - const size_type loc_row = global_rows.local_row(i); + // fast function if there are no indirect references to any of the local + // rows at all on this set of dofs + if (global_rows.have_indirect_rows() == false) + { + Assert(loc_row < dof_mask.n_rows(), ExcInternalError()); - // fast function if there are no indirect references to any of the local - // rows at all on this set of dofs - if (global_rows.have_indirect_rows() == false) - { - Assert(loc_row < dof_mask.n_rows(), ExcInternalError()); + for (size_type j = column_start; j < column_end; ++j) + { + const size_type loc_col = global_rows.local_row(j); + Assert(loc_col < dof_mask.n_cols(), ExcInternalError()); - for (size_type j = column_start; j < column_end; ++j) - { - const size_type loc_col = global_rows.local_row(j); - Assert(loc_col < dof_mask.n_cols(), ExcInternalError()); + if (dof_mask(loc_row, loc_col) == true) + *col_ptr++ = global_rows.global_row(j); + } + } - if (dof_mask(loc_row, loc_col) == true) - *col_ptr++ = global_rows.global_row(j); - } - } + // slower functions when there are indirect references and when we need to + // do some more checks. + else + { + for (size_type j = column_start; j < column_end; ++j) + { + const size_type loc_col = global_rows.local_row(j); + if (loc_row != numbers::invalid_size_type) + { + Assert(loc_row < dof_mask.n_rows(), ExcInternalError()); + if (loc_col != numbers::invalid_size_type) + { + Assert(loc_col < dof_mask.n_cols(), ExcInternalError()); + if (dof_mask(loc_row, loc_col) == true) + goto add_this_index; + } - // slower functions when there are indirect references and when we need to - // do some more checks. - else - { - for (size_type j = column_start; j < column_end; ++j) - { - const size_type loc_col = global_rows.local_row(j); - if (loc_row != numbers::invalid_size_type) - { - Assert(loc_row < dof_mask.n_rows(), ExcInternalError()); - if (loc_col != numbers::invalid_size_type) - { - Assert(loc_col < dof_mask.n_cols(), ExcInternalError()); - if (dof_mask(loc_row, loc_col) == true) + for (size_type p = 0; p < global_rows.size(j); ++p) + if (dof_mask(loc_row, global_rows.local_row(j, p)) == true) goto add_this_index; - } + } - for (size_type p = 0; p < global_rows.size(j); ++p) - if (dof_mask(loc_row, global_rows.local_row(j, p)) == true) - goto add_this_index; - } + for (size_type q = 0; q < global_rows.size(i); ++q) + { + if (loc_col != numbers::invalid_size_type) + { + Assert(loc_col < dof_mask.n_cols(), ExcInternalError()); + if (dof_mask(global_rows.local_row(i, q), loc_col) == + true) + goto add_this_index; + } - for (size_type q = 0; q < global_rows.size(i); ++q) - { - if (loc_col != numbers::invalid_size_type) - { - Assert(loc_col < dof_mask.n_cols(), ExcInternalError()); - if (dof_mask(global_rows.local_row(i, q), loc_col) == true) + for (size_type p = 0; p < global_rows.size(j); ++p) + if (dof_mask(global_rows.local_row(i, q), + global_rows.local_row(j, p)) == true) goto add_this_index; - } - - for (size_type p = 0; p < global_rows.size(j); ++p) - if (dof_mask(global_rows.local_row(i, q), - global_rows.local_row(j, p)) == true) - goto add_this_index; - } + } - continue; - // if we got some nontrivial value, append it to the array of - // values. - add_this_index: - *col_ptr++ = global_rows.global_row(j); - } - } - } + continue; + // if we got some nontrivial value, append it to the array of + // values. + add_this_index: + *col_ptr++ = global_rows.global_row(j); + } + } + } - // to make sure that the global matrix remains invertible, we need to do - // something with the diagonal elements. Add the average of the - // absolute values of the local matrix diagonals, so the resulting entry - // will always be positive and furthermore be in the same order of magnitude - // as the other elements of the matrix. If all local matrix diagonals are - // zero, add the l1 norm of the local matrix divided by the matrix size - // to the diagonal of the global matrix. If the entire local matrix is zero, - // add 1 to the diagonal of the global matrix. - // - // note that this also captures the special case that a dof is both - // constrained and fixed (this can happen for hanging nodes in 3d that also - // happen to be on the boundary). in that case, following the program flow - // in distribute_local_to_global, it is realized that when distributing the - // row and column no elements of the matrix are actually touched if all the - // degrees of freedom to which this dof is constrained are also constrained - // (the usual case with hanging nodes in 3d). however, in the line below, we - // do actually do something with this dof - template - inline void - set_matrix_diagonals( - const internals::GlobalRowsFromLocal &global_rows, - const std::vector & local_dof_indices, - const FullMatrix & local_matrix, - const AffineConstraints & constraints, - MatrixType & global_matrix, - VectorType & global_vector, - bool use_inhomogeneities_for_rhs) - { - if (global_rows.n_constraints() > 0) - { - number average_diagonal = number(); - for (size_type i = 0; i < local_matrix.m(); ++i) - average_diagonal += std::abs(local_matrix(i, i)); - average_diagonal /= static_cast(local_matrix.m()); + // to make sure that the global matrix remains invertible, we need to do + // something with the diagonal elements. Add the average of the + // absolute values of the local matrix diagonals, so the resulting entry + // will always be positive and furthermore be in the same order of magnitude + // as the other elements of the matrix. If all local matrix diagonals are + // zero, add the l1 norm of the local matrix divided by the matrix size + // to the diagonal of the global matrix. If the entire local matrix is zero, + // add 1 to the diagonal of the global matrix. + // + // note that this also captures the special case that a dof is both + // constrained and fixed (this can happen for hanging nodes in 3d that also + // happen to be on the boundary). in that case, following the program flow + // in distribute_local_to_global, it is realized that when distributing the + // row and column no elements of the matrix are actually touched if all the + // degrees of freedom to which this dof is constrained are also constrained + // (the usual case with hanging nodes in 3d). however, in the line below, we + // do actually do something with this dof + template + inline void + set_matrix_diagonals( + const internal::AffineConstraints::GlobalRowsFromLocal + & global_rows, + const std::vector & local_dof_indices, + const FullMatrix & local_matrix, + const dealii::AffineConstraints &constraints, + MatrixType & global_matrix, + VectorType & global_vector, + bool use_inhomogeneities_for_rhs) + { + if (global_rows.n_constraints() > 0) + { + number average_diagonal = number(); + for (size_type i = 0; i < local_matrix.m(); ++i) + average_diagonal += std::abs(local_matrix(i, i)); + average_diagonal /= static_cast(local_matrix.m()); - // handle the case that all diagonal elements are zero - if (average_diagonal == static_cast(0.)) - { - average_diagonal = static_cast(local_matrix.l1_norm()) / - static_cast(local_matrix.m()); - // if the entire matrix is zero, use 1. for the diagonal - if (average_diagonal == static_cast(0.)) - average_diagonal = static_cast(1.); - } + // handle the case that all diagonal elements are zero + if (average_diagonal == static_cast(0.)) + { + average_diagonal = static_cast(local_matrix.l1_norm()) / + static_cast(local_matrix.m()); + // if the entire matrix is zero, use 1. for the diagonal + if (average_diagonal == static_cast(0.)) + average_diagonal = static_cast(1.); + } - for (size_type i = 0; i < global_rows.n_constraints(); i++) - { - const size_type local_row = global_rows.constraint_origin(i); - const size_type global_row = local_dof_indices[local_row]; - const number new_diagonal = - (std::abs(local_matrix(local_row, local_row)) != 0. ? - std::abs(local_matrix(local_row, local_row)) : - average_diagonal); - global_matrix.add(global_row, global_row, new_diagonal); - - // if the use_inhomogeneities_for_rhs flag is set to true, the - // inhomogeneities are used to create the global vector. instead - // of fill in a zero in the ith components with an inhomogeneity, - // we set those to: inhomogeneity(i)*global_matrix (i,i). - if (use_inhomogeneities_for_rhs == true) - global_vector(global_row) += - new_diagonal * constraints.get_inhomogeneity(global_row); - } - } - } + for (size_type i = 0; i < global_rows.n_constraints(); i++) + { + const size_type local_row = global_rows.constraint_origin(i); + const size_type global_row = local_dof_indices[local_row]; + const number new_diagonal = + (std::abs(local_matrix(local_row, local_row)) != 0. ? + std::abs(local_matrix(local_row, local_row)) : + average_diagonal); + global_matrix.add(global_row, global_row, new_diagonal); + + // if the use_inhomogeneities_for_rhs flag is set to true, the + // inhomogeneities are used to create the global vector. instead + // of fill in a zero in the ith components with an inhomogeneity, + // we set those to: inhomogeneity(i)*global_matrix (i,i). + if (use_inhomogeneities_for_rhs == true) + global_vector(global_row) += + new_diagonal * constraints.get_inhomogeneity(global_row); + } + } + } - // similar function as the one above for setting matrix diagonals, but now - // doing that for sparsity patterns when setting them up using - // add_entries_local_to_global. In case we keep constrained entries, add all - // the rows and columns related to the constrained dof, otherwise just add - // the diagonal - template - inline void - set_sparsity_diagonals( - const internals::GlobalRowsFromLocal &global_rows, - const std::vector & local_dof_indices, - const Table<2, bool> & dof_mask, - const bool keep_constrained_entries, - SparsityPatternType & sparsity_pattern) - { - // if we got constraints, need to add the diagonal element and, if the - // user requested so, also the rest of the entries in rows and columns - // that have been left out above - if (global_rows.n_constraints() > 0) - { - for (size_type i = 0; i < global_rows.n_constraints(); i++) - { - const size_type local_row = global_rows.constraint_origin(i); - const size_type global_row = local_dof_indices[local_row]; - if (keep_constrained_entries == true) - { - for (size_type j = 0; j < local_dof_indices.size(); ++j) - { - if (dof_mask(local_row, j) == true) - sparsity_pattern.add(global_row, local_dof_indices[j]); - if (dof_mask(j, local_row) == true) - sparsity_pattern.add(local_dof_indices[j], global_row); - } - } - else - // don't keep constrained entries - just add the diagonal. - sparsity_pattern.add(global_row, global_row); - } - } - } + // similar function as the one above for setting matrix diagonals, but now + // doing that for sparsity patterns when setting them up using + // add_entries_local_to_global. In case we keep constrained entries, add all + // the rows and columns related to the constrained dof, otherwise just add + // the diagonal + template + inline void + set_sparsity_diagonals( + const internal::AffineConstraints::GlobalRowsFromLocal + & global_rows, + const std::vector &local_dof_indices, + const Table<2, bool> & dof_mask, + const bool keep_constrained_entries, + SparsityPatternType & sparsity_pattern) + { + // if we got constraints, need to add the diagonal element and, if the + // user requested so, also the rest of the entries in rows and columns + // that have been left out above + if (global_rows.n_constraints() > 0) + { + for (size_type i = 0; i < global_rows.n_constraints(); i++) + { + const size_type local_row = global_rows.constraint_origin(i); + const size_type global_row = local_dof_indices[local_row]; + if (keep_constrained_entries == true) + { + for (size_type j = 0; j < local_dof_indices.size(); ++j) + { + if (dof_mask(local_row, j) == true) + sparsity_pattern.add(global_row, local_dof_indices[j]); + if (dof_mask(j, local_row) == true) + sparsity_pattern.add(local_dof_indices[j], global_row); + } + } + else + // don't keep constrained entries - just add the diagonal. + sparsity_pattern.add(global_row, global_row); + } + } + } -} // end of namespace internals + } // end of namespace AffineConstraints +} // end of namespace internal // Basic idea of setting up a list of // all global dofs: first find all rows and columns @@ -3476,8 +3490,8 @@ namespace internals template void AffineConstraints::make_sorted_row_list( - const std::vector & local_dof_indices, - internals::GlobalRowsFromLocal &global_rows) const + const std::vector & local_dof_indices, + internal::AffineConstraints::GlobalRowsFromLocal &global_rows) const { const size_type n_local_dofs = local_dof_indices.size(); AssertDimension(n_local_dofs, global_rows.size()); @@ -3593,11 +3607,11 @@ template template inline typename ProductType::type AffineConstraints::resolve_vector_entry( - const size_type i, - const internals::GlobalRowsFromLocal &global_rows, - const Vector & local_vector, - const std::vector & local_dof_indices, - const FullMatrix & local_matrix) const + const size_type i, + const internal::AffineConstraints::GlobalRowsFromLocal &global_rows, + const Vector & local_vector, + const std::vector & local_dof_indices, + const FullMatrix &local_matrix) const { const size_type loc_row = global_rows.local_row(i); const size_type n_inhomogeneous_rows = global_rows.n_inhomogeneities(); @@ -3667,10 +3681,10 @@ AffineConstraints::distribute_local_to_global( const size_type n_local_dofs = local_dof_indices.size(); - typename internals::AffineConstraintsData::ScratchDataAccessor - scratch_data; + typename internal::AffineConstraints::AffineConstraintsData< + number>::ScratchDataAccessor scratch_data; - internals::GlobalRowsFromLocal &global_rows = + internal::AffineConstraints::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_dofs); make_sorted_row_list(local_dof_indices, global_rows); @@ -3715,21 +3729,21 @@ AffineConstraints::distribute_local_to_global( // cast is uncritical here and only used to avoid compiler // warnings. We never access a non-double array number *val_ptr = vals.data(); - internals::resolve_matrix_row(global_rows, - global_rows, - i, - 0, - n_actual_dofs, - local_matrix, - col_ptr, - val_ptr); + internal::AffineConstraints::resolve_matrix_row(global_rows, + global_rows, + i, + 0, + n_actual_dofs, + local_matrix, + col_ptr, + val_ptr); const size_type n_values = col_ptr - cols.data(); if (n_values > 0) global_matrix.add( row, n_values, cols.data(), vals.data(), false, true); } else - internals::resolve_matrix_row( + internal::AffineConstraints::resolve_matrix_row( global_rows, i, 0, n_actual_dofs, local_matrix, sparse_matrix); // now to the vectors. besides doing the same job as we did above (i.e., @@ -3777,13 +3791,14 @@ AffineConstraints::distribute_local_to_global( } } - internals::set_matrix_diagonals(global_rows, - local_dof_indices, - local_matrix, - *this, - global_matrix, - global_vector, - use_inhomogeneities_for_rhs); + internal::AffineConstraints::set_matrix_diagonals( + global_rows, + local_dof_indices, + local_matrix, + *this, + global_matrix, + global_vector, + use_inhomogeneities_for_rhs); } // similar function as above, but now specialized for block matrices. See the @@ -3817,11 +3832,11 @@ AffineConstraints::distribute_local_to_global( } Assert(sorted == true, ExcMatrixNotClosed()); - typename internals::AffineConstraintsData::ScratchDataAccessor - scratch_data; + typename internal::AffineConstraints::AffineConstraintsData< + number>::ScratchDataAccessor scratch_data; const size_type n_local_dofs = local_dof_indices.size(); - internals::GlobalRowsFromLocal &global_rows = + internal::AffineConstraints::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_dofs); @@ -3840,7 +3855,9 @@ AffineConstraints::distribute_local_to_global( const size_type num_blocks = global_matrix.n_block_rows(); std::vector &block_starts = scratch_data->block_starts; block_starts.resize(num_blocks + 1); - internals::make_block_starts(global_matrix, global_rows, block_starts); + internal::AffineConstraints::make_block_starts(global_matrix, + global_rows, + block_starts); std::vector &cols = scratch_data->columns; std::vector & vals = scratch_data->values; @@ -3868,14 +3885,14 @@ AffineConstraints::distribute_local_to_global( { size_type *col_ptr = cols.data(); number * val_ptr = vals.data(); - internals::resolve_matrix_row(global_rows, - global_rows, - i, - start_block, - end_block, - local_matrix, - col_ptr, - val_ptr); + internal::AffineConstraints::resolve_matrix_row(global_rows, + global_rows, + i, + start_block, + end_block, + local_matrix, + col_ptr, + val_ptr); const size_type n_values = col_ptr - cols.data(); if (n_values > 0) global_matrix.block(block, block_col) @@ -3888,12 +3905,13 @@ AffineConstraints::distribute_local_to_global( dynamic_cast *>( &global_matrix.block(block, block_col)); Assert(sparse_matrix != nullptr, ExcInternalError()); - internals::resolve_matrix_row(global_rows, - i, - start_block, - end_block, - local_matrix, - sparse_matrix); + internal::AffineConstraints::resolve_matrix_row( + global_rows, + i, + start_block, + end_block, + local_matrix, + sparse_matrix); } } @@ -3909,13 +3927,14 @@ AffineConstraints::distribute_local_to_global( } } - internals::set_matrix_diagonals(global_rows, - local_dof_indices, - local_matrix, - *this, - global_matrix, - global_vector, - use_inhomogeneities_for_rhs); + internal::AffineConstraints::set_matrix_diagonals( + global_rows, + local_dof_indices, + local_matrix, + *this, + global_matrix, + global_vector, + use_inhomogeneities_for_rhs); } @@ -3949,14 +3968,14 @@ AffineConstraints::distribute_local_to_global( const size_type n_local_row_dofs = row_indices.size(); const size_type n_local_col_dofs = col_indices.size(); - typename internals::AffineConstraintsData< + typename internal::AffineConstraints::AffineConstraintsData< typename MatrixType::value_type>::ScratchDataAccessor scratch_data; - internals::GlobalRowsFromLocal &global_rows = + internal::AffineConstraints::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_row_dofs); - internals::GlobalRowsFromLocal &global_cols = + internal::AffineConstraints::GlobalRowsFromLocal &global_cols = scratch_data->global_columns; global_cols.reinit(n_local_col_dofs); @@ -3981,14 +4000,14 @@ AffineConstraints::distribute_local_to_global( // calculate all the data that will be written into the matrix row. size_type *col_ptr = cols.data(); number * val_ptr = vals.data(); - internals::resolve_matrix_row(global_rows, - global_cols, - i, - 0, - n_actual_col_dofs, - local_matrix, - col_ptr, - val_ptr); + internal::AffineConstraints::resolve_matrix_row(global_rows, + global_cols, + i, + 0, + n_actual_col_dofs, + local_matrix, + col_ptr, + val_ptr); const size_type n_values = col_ptr - cols.data(); if (n_values > 0) global_matrix.add(row, n_values, cols.data(), vals.data(), false, true); @@ -4009,8 +4028,8 @@ AffineConstraints::add_entries_local_to_global( ExcNotQuadratic()); const size_type n_local_dofs = local_dof_indices.size(); - typename internals::AffineConstraintsData::ScratchDataAccessor - scratch_data; + typename internal::AffineConstraints::AffineConstraintsData< + number>::ScratchDataAccessor scratch_data; const bool dof_mask_is_active = (dof_mask.n_rows() == n_local_dofs); if (dof_mask_is_active == true) @@ -4060,7 +4079,7 @@ AffineConstraints::add_entries_local_to_global( // complicated case: we need to filter out some indices. then the function // gets similar to the function for distributing matrix entries, see there // for additional comments. - internals::GlobalRowsFromLocal &global_rows = + internal::AffineConstraints::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_dofs); make_sorted_row_list(local_dof_indices, global_rows); @@ -4075,7 +4094,7 @@ AffineConstraints::add_entries_local_to_global( { std::vector::iterator col_ptr = cols.begin(); const size_type row = global_rows.global_row(i); - internals::resolve_matrix_row( + internal::AffineConstraints::resolve_matrix_row( global_rows, i, 0, n_actual_dofs, dof_mask, col_ptr); // finally, write all the information that accumulated under the given @@ -4083,11 +4102,11 @@ AffineConstraints::add_entries_local_to_global( if (col_ptr != cols.begin()) sparsity_pattern.add_entries(row, cols.begin(), col_ptr, true); } - internals::set_sparsity_diagonals(global_rows, - local_dof_indices, - dof_mask, - keep_constrained_entries, - sparsity_pattern); + internal::AffineConstraints::set_sparsity_diagonals(global_rows, + local_dof_indices, + dof_mask, + keep_constrained_entries, + sparsity_pattern); } template @@ -4165,8 +4184,8 @@ AffineConstraints::add_entries_local_to_global( const size_type n_local_dofs = local_dof_indices.size(); const size_type num_blocks = sparsity_pattern.n_block_rows(); - typename internals::AffineConstraintsData::ScratchDataAccessor - scratch_data; + typename internal::AffineConstraints::AffineConstraintsData< + number>::ScratchDataAccessor scratch_data; const bool dof_mask_is_active = (dof_mask.n_rows() == n_local_dofs); if (dof_mask_is_active == true) @@ -4184,9 +4203,9 @@ AffineConstraints::add_entries_local_to_global( // additional construct that also takes care of block indices. std::vector &block_starts = scratch_data->block_starts; block_starts.resize(num_blocks + 1); - internals::make_block_starts(sparsity_pattern, - actual_dof_indices, - block_starts); + internal::AffineConstraints::make_block_starts(sparsity_pattern, + actual_dof_indices, + block_starts); for (size_type block = 0; block < num_blocks; ++block) { @@ -4232,7 +4251,7 @@ AffineConstraints::add_entries_local_to_global( // difficult case with dof_mask, similar to the distribute_local_to_global // function for block matrices - internals::GlobalRowsFromLocal &global_rows = + internal::AffineConstraints::GlobalRowsFromLocal &global_rows = scratch_data->global_rows; global_rows.reinit(n_local_dofs); make_sorted_row_list(local_dof_indices, global_rows); @@ -4241,7 +4260,9 @@ AffineConstraints::add_entries_local_to_global( // additional construct that also takes care of block indices. std::vector &block_starts = scratch_data->block_starts; block_starts.resize(num_blocks + 1); - internals::make_block_starts(sparsity_pattern, global_rows, block_starts); + internal::AffineConstraints::make_block_starts(sparsity_pattern, + global_rows, + block_starts); std::vector &cols = scratch_data->columns; cols.resize(n_actual_dofs); @@ -4259,7 +4280,7 @@ AffineConstraints::add_entries_local_to_global( const size_type begin_block = block_starts[block_col], end_block = block_starts[block_col + 1]; std::vector::iterator col_ptr = cols.begin(); - internals::resolve_matrix_row( + internal::AffineConstraints::resolve_matrix_row( global_rows, i, begin_block, end_block, dof_mask, col_ptr); sparsity_pattern.block(block, block_col) @@ -4268,11 +4289,11 @@ AffineConstraints::add_entries_local_to_global( } } - internals::set_sparsity_diagonals(global_rows, - local_dof_indices, - dof_mask, - keep_constrained_entries, - sparsity_pattern); + internal::AffineConstraints::set_sparsity_diagonals(global_rows, + local_dof_indices, + dof_mask, + keep_constrained_entries, + sparsity_pattern); } DEAL_II_NAMESPACE_CLOSE diff --git a/include/deal.II/matrix_free/dof_info.h b/include/deal.II/matrix_free/dof_info.h index d684ab304b..d0150d06d8 100644 --- a/include/deal.II/matrix_free/dof_info.h +++ b/include/deal.II/matrix_free/dof_info.h @@ -174,7 +174,7 @@ namespace internal read_dof_indices( const std::vector &local_indices, const std::vector & lexicographic_inv, - const AffineConstraints & constraints, + const dealii::AffineConstraints & constraints, const unsigned int cell_number, ConstraintValues & constraint_values, bool & cell_at_boundary); diff --git a/include/deal.II/matrix_free/dof_info.templates.h b/include/deal.II/matrix_free/dof_info.templates.h index 1599ea1611..f596f2e8b2 100644 --- a/include/deal.II/matrix_free/dof_info.templates.h +++ b/include/deal.II/matrix_free/dof_info.templates.h @@ -214,7 +214,7 @@ namespace internal DoFInfo::read_dof_indices( const std::vector &local_indices, const std::vector & lexicographic_inv, - const AffineConstraints & constraints, + const dealii::AffineConstraints & constraints, const unsigned int cell_number, ConstraintValues & constraint_values, bool & cell_at_subdomain_boundary) diff --git a/source/lac/affine_constraints.cc b/source/lac/affine_constraints.cc index aa8055fe6a..767ff5fc9f 100644 --- a/source/lac/affine_constraints.cc +++ b/source/lac/affine_constraints.cc @@ -137,8 +137,10 @@ INSTANTIATE_DLTG_MATRIX(TrilinosWrappers::BlockSparseMatrix); * place). */ -namespace internals +namespace internal { + namespace AffineConstraints + { #define SCRATCH_INITIALIZER(number, Name) \ AffineConstraintsData::ScratchData scratch_data_initializer_##Name; \ template <> \ @@ -146,13 +148,14 @@ namespace internals AffineConstraintsData::scratch_data( \ scratch_data_initializer_##Name) - SCRATCH_INITIALIZER(double, d); - SCRATCH_INITIALIZER(float, f); + SCRATCH_INITIALIZER(double, d); + SCRATCH_INITIALIZER(float, f); #ifdef DEAL_II_WITH_COMPLEX_VALUES - SCRATCH_INITIALIZER(std::complex, cd); - SCRATCH_INITIALIZER(std::complex, cf); + SCRATCH_INITIALIZER(std::complex, cd); + SCRATCH_INITIALIZER(std::complex, cf); #endif #undef SCRATCH_INITIALIZER -} // namespace internals + } // namespace AffineConstraints +} // namespace internal DEAL_II_NAMESPACE_CLOSE diff --git a/source/matrix_free/dof_info.cc b/source/matrix_free/dof_info.cc index 105b44d590..3b7e462fa1 100644 --- a/source/matrix_free/dof_info.cc +++ b/source/matrix_free/dof_info.cc @@ -35,7 +35,7 @@ namespace internal DoFInfo::read_dof_indices( const std::vector &, const std::vector &, - const AffineConstraints &, + const dealii::AffineConstraints &, const unsigned int, ConstraintValues &, bool &); @@ -44,7 +44,7 @@ namespace internal DoFInfo::read_dof_indices( const std::vector &, const std::vector &, - const AffineConstraints &, + const dealii::AffineConstraints &, const unsigned int, ConstraintValues &, bool &);