}
// Some helper definitions for the local_to_global functions.
-namespace internals
+namespace internal
{
- using size_type = types::global_dof_index;
-
- // this struct contains all the information we need to store about each of
- // the global entries (global_row): are they obtained directly by some local
- // entry (local_row) or some constraints (constraint_position). This is not
- // directly used in the user code, but accessed via the GlobalRowsFromLocal.
- //
- // The actions performed here correspond to reshaping the constraint
- // information from global degrees of freedom to local ones (i.e.,
- // cell-related DoFs), and also transforming the constraint information from
- // compressed row storage (each local dof that is constrained has a list of
- // constraint entries associated to it) into compressed column storage based
- // on the cell-related DoFs (we have a list of global degrees of freedom,
- // and to each we have a list of local rows where the entries come from). To
- // increase the speed, we additionally store whether an entry is generated
- // directly from the local degrees of freedom or whether it comes from a
- // constraint.
- struct Distributing
- {
- Distributing(const size_type global_row = numbers::invalid_size_type,
- const size_type local_row = numbers::invalid_size_type);
- Distributing(const Distributing &in);
- Distributing &
- operator=(const Distributing &in);
- bool
- operator<(const Distributing &in) const
- {
- return global_row < in.global_row;
- }
-
- size_type global_row;
- size_type local_row;
- mutable size_type constraint_position;
- };
-
- inline Distributing::Distributing(const size_type global_row,
- const size_type local_row)
- : global_row(global_row)
- , local_row(local_row)
- , constraint_position(numbers::invalid_size_type)
- {}
-
- inline Distributing::Distributing(const Distributing &in)
- : constraint_position(numbers::invalid_size_type)
- {
- *this = (in);
- }
-
- inline Distributing &
- Distributing::operator=(const Distributing &in)
+ namespace AffineConstraints
{
- global_row = in.global_row;
- local_row = in.local_row;
- // the constraints pointer should not contain any data here.
- Assert(constraint_position == numbers::invalid_size_type,
- ExcInternalError());
+ using size_type = types::global_dof_index;
- if (in.constraint_position != numbers::invalid_size_type)
+ // this struct contains all the information we need to store about each of
+ // the global entries (global_row): are they obtained directly by some local
+ // entry (local_row) or some constraints (constraint_position). This is not
+ // directly used in the user code, but accessed via the GlobalRowsFromLocal.
+ //
+ // The actions performed here correspond to reshaping the constraint
+ // information from global degrees of freedom to local ones (i.e.,
+ // cell-related DoFs), and also transforming the constraint information from
+ // compressed row storage (each local dof that is constrained has a list of
+ // constraint entries associated to it) into compressed column storage based
+ // on the cell-related DoFs (we have a list of global degrees of freedom,
+ // and to each we have a list of local rows where the entries come from). To
+ // increase the speed, we additionally store whether an entry is generated
+ // directly from the local degrees of freedom or whether it comes from a
+ // constraint.
+ struct Distributing
+ {
+ Distributing(const size_type global_row = numbers::invalid_size_type,
+ const size_type local_row = numbers::invalid_size_type);
+ Distributing(const Distributing &in);
+ Distributing &
+ operator=(const Distributing &in);
+ bool
+ operator<(const Distributing &in) const
{
- constraint_position = in.constraint_position;
- in.constraint_position = numbers::invalid_size_type;
+ return global_row < in.global_row;
}
- return *this;
- }
- // this is a cache for constraints that are encountered on a local level.
- // The functionality is similar to
- // std::vector<std::vector<std::pair<uint,double> > >, but tuned so that
- // frequent memory allocation for each entry is avoided. The data is put
- // into a std::vector<std::pair<uint,double> > and the row length is kept
- // fixed at row_length. Both the number of rows and the row length can
- // change is this structure is filled. In that case, the data is
- // rearranged. This is not directly used in the user code, but accessed
- // via the GlobalRowsFromLocal.
- template <typename number>
- struct DataCache
- {
- DataCache()
- : row_length(8)
+ size_type global_row;
+ size_type local_row;
+ mutable size_type constraint_position;
+ };
+
+ inline Distributing::Distributing(const size_type global_row,
+ const size_type local_row)
+ : global_row(global_row)
+ , local_row(local_row)
+ , constraint_position(numbers::invalid_size_type)
{}
- void
- reinit()
+ inline Distributing::Distributing(const Distributing &in)
+ : constraint_position(numbers::invalid_size_type)
{
- individual_size.resize(0);
- data.resize(0);
+ *this = (in);
}
- size_type
- insert_new_index(const std::pair<size_type, number> &pair)
+ inline Distributing &
+ Distributing::operator=(const Distributing &in)
{
- Assert(row_length > 0, ExcInternalError());
- const unsigned int index = individual_size.size();
- individual_size.push_back(1);
- data.resize(individual_size.size() * row_length);
- data[index * row_length] = pair;
- individual_size[index] = 1;
- return index;
- }
+ global_row = in.global_row;
+ local_row = in.local_row;
+ // the constraints pointer should not contain any data here.
+ Assert(constraint_position == numbers::invalid_size_type,
+ ExcInternalError());
- void
- append_index(const size_type index,
- const std::pair<size_type, number> &pair)
- {
- AssertIndexRange(index, individual_size.size());
- const size_type my_length = individual_size[index];
- if (my_length == row_length)
+ if (in.constraint_position != numbers::invalid_size_type)
{
- AssertDimension(data.size(), individual_size.size() * row_length);
- // no space left in this row, need to double row_length and
- // rearrange the data items. Move all items to the right except the
- // first one, starting at the back. Since individual_size contains
- // at least one element when we get here, subtracting 1 works fine.
- data.resize(2 * data.size());
- for (size_type i = individual_size.size() - 1; i > 0; --i)
- {
- const auto ptr = data.data();
- std::move_backward(ptr + i * row_length,
- ptr + i * row_length + individual_size[i],
- ptr + i * 2 * row_length + individual_size[i]);
- }
- row_length *= 2;
+ constraint_position = in.constraint_position;
+ in.constraint_position = numbers::invalid_size_type;
}
- data[index * row_length + my_length] = pair;
- individual_size[index] = my_length + 1;
- }
-
- size_type
- get_size(const size_type index) const
- {
- return individual_size[index];
+ return *this;
}
- const std::pair<size_type, number> *
- get_entry(const size_type index) const
- {
- return &data[index * row_length];
- }
-
- size_type row_length;
-
- std::vector<std::pair<size_type, number>> data;
-
- std::vector<size_type> individual_size;
- };
-
- // collects all the global rows from a local contribution (cell) and their
- // origin (direct/constraint). this is basically a vector consisting of
- // "Distributing" structs using access via the DataCache. Provides some
- // specialized sort and insert functions.
- //
- // in case there are no constraints, this is basically a list of pairs
- // <uint,unit> with the first index being the global index and the second
- // index the local index. The list is sorted with respect to the global
- // index.
- //
- // in case there are constraints, a global dof might get a contribution also
- // because it gets data from a constrained dof. This means that a global dof
- // might also have indirect contributions from a local dof via a constraint,
- // besides the direct ones.
- //
- // The actions performed here correspond to reshaping the constraint
- // information from global degrees of freedom to local ones (i.e.,
- // cell-related DoFs), and also transforming the constraint information from
- // compressed row storage (each local dof that is constrained has a list of
- // constraint entries associated to it) into compressed column storage based
- // on the cell-related DoFs (we have a list of global degrees of freedom,
- // and to each we have a list of local rows where the entries come from). To
- // increase the speed, we additionally store whether an entry is generated
- // directly from the local degrees of freedom or whether it comes from a
- // constraint.
- template <typename number>
- class GlobalRowsFromLocal
- {
- public:
- GlobalRowsFromLocal()
- : n_active_rows(0)
- , n_inhomogeneous_rows(0)
- {}
-
- void
- reinit(const size_type n_local_rows)
- {
- total_row_indices.resize(n_local_rows);
- for (unsigned int i = 0; i < n_local_rows; ++i)
- total_row_indices[i].constraint_position = numbers::invalid_size_type;
- n_active_rows = n_local_rows;
- n_inhomogeneous_rows = 0;
- data_cache.reinit();
- }
-
- // implemented below
- void
- insert_index(const size_type global_row,
- const size_type local_row,
- const number constraint_value);
- void
- sort();
-
- // Print object for debugging purpose
- void
- print(std::ostream &os)
+ // this is a cache for constraints that are encountered on a local level.
+ // The functionality is similar to
+ // std::vector<std::vector<std::pair<uint,double> > >, but tuned so that
+ // frequent memory allocation for each entry is avoided. The data is put
+ // into a std::vector<std::pair<uint,double> > and the row length is kept
+ // fixed at row_length. Both the number of rows and the row length can
+ // change is this structure is filled. In that case, the data is
+ // rearranged. This is not directly used in the user code, but accessed
+ // via the GlobalRowsFromLocal.
+ template <typename number>
+ struct DataCache
{
- os << "Active rows " << n_active_rows << std::endl
- << "Constr rows " << n_constraints() << std::endl
- << "Inhom rows " << n_inhomogeneous_rows << std::endl
- << "Local: ";
- for (const auto &total_row_index : total_row_indices)
- os << ' ' << std::setw(4) << total_row_index.local_row;
- os << std::endl << "Global:";
- for (const auto &total_row_index : total_row_indices)
- os << ' ' << std::setw(4) << total_row_index.global_row;
- os << std::endl << "ConPos:";
- for (const auto &total_row_index : total_row_indices)
- os << ' ' << std::setw(4) << total_row_index.constraint_position;
- os << std::endl;
- }
-
- // return all kind of information on the constraints
+ DataCache()
+ : row_length(8)
+ {}
- // returns the number of global indices in the struct
- size_type
- size() const
- {
- return n_active_rows;
- }
+ void
+ reinit()
+ {
+ individual_size.resize(0);
+ data.resize(0);
+ }
- // returns the number of constraints that are associated to the
- // counter_index-th entry in the list
- size_type
- size(const size_type counter_index) const
- {
- return (total_row_indices[counter_index].constraint_position ==
- numbers::invalid_size_type ?
- 0 :
- data_cache.get_size(
- total_row_indices[counter_index].constraint_position));
- }
+ size_type
+ insert_new_index(const std::pair<size_type, number> &pair)
+ {
+ Assert(row_length > 0, ExcInternalError());
+ const unsigned int index = individual_size.size();
+ individual_size.push_back(1);
+ data.resize(individual_size.size() * row_length);
+ data[index * row_length] = pair;
+ individual_size[index] = 1;
+ return index;
+ }
- // returns the global row of the counter_index-th entry in the list
- size_type
- global_row(const size_type counter_index) const
- {
- return total_row_indices[counter_index].global_row;
- }
+ void
+ append_index(const size_type index,
+ const std::pair<size_type, number> &pair)
+ {
+ AssertIndexRange(index, individual_size.size());
+ const size_type my_length = individual_size[index];
+ if (my_length == row_length)
+ {
+ AssertDimension(data.size(), individual_size.size() * row_length);
+ // no space left in this row, need to double row_length and
+ // rearrange the data items. Move all items to the right except the
+ // first one, starting at the back. Since individual_size contains
+ // at least one element when we get here, subtracting 1 works fine.
+ data.resize(2 * data.size());
+ for (size_type i = individual_size.size() - 1; i > 0; --i)
+ {
+ const auto ptr = data.data();
+ std::move_backward(ptr + i * row_length,
+ ptr + i * row_length + individual_size[i],
+ ptr + i * 2 * row_length +
+ individual_size[i]);
+ }
+ row_length *= 2;
+ }
+ data[index * row_length + my_length] = pair;
+ individual_size[index] = my_length + 1;
+ }
- // returns the global row of the counter_index-th entry in the list
- size_type &
- global_row(const size_type counter_index)
- {
- return total_row_indices[counter_index].global_row;
- }
+ size_type
+ get_size(const size_type index) const
+ {
+ return individual_size[index];
+ }
- // returns the local row in the cell matrix associated with the
- // counter_index-th entry in the list. Returns invalid_size_type for
- // constrained rows
- size_type
- local_row(const size_type counter_index) const
- {
- return total_row_indices[counter_index].local_row;
- }
+ const std::pair<size_type, number> *
+ get_entry(const size_type index) const
+ {
+ return &data[index * row_length];
+ }
- // writable index
- size_type &
- local_row(const size_type counter_index)
- {
- return total_row_indices[counter_index].local_row;
- }
+ size_type row_length;
- // returns the local row in the cell matrix associated with the
- // counter_index-th entry in the list in the index_in_constraint-th
- // position of constraints
- size_type
- local_row(const size_type counter_index,
- const size_type index_in_constraint) const
- {
- return (data_cache.get_entry(total_row_indices[counter_index]
- .constraint_position)[index_in_constraint])
- .first;
- }
+ std::vector<std::pair<size_type, number>> data;
- // returns the value of the constraint in the counter_index-th entry in
- // the list in the index_in_constraint-th position of constraints
- number
- constraint_value(const size_type counter_index,
- const size_type index_in_constraint) const
- {
- return (data_cache.get_entry(total_row_indices[counter_index]
- .constraint_position)[index_in_constraint])
- .second;
- }
+ std::vector<size_type> individual_size;
+ };
- // returns whether there is one row with indirect contributions (i.e.,
- // there has been at least one constraint with non-trivial ConstraintLine)
- bool
- have_indirect_rows() const
+ // collects all the global rows from a local contribution (cell) and their
+ // origin (direct/constraint). this is basically a vector consisting of
+ // "Distributing" structs using access via the DataCache. Provides some
+ // specialized sort and insert functions.
+ //
+ // in case there are no constraints, this is basically a list of pairs
+ // <uint,unit> with the first index being the global index and the second
+ // index the local index. The list is sorted with respect to the global
+ // index.
+ //
+ // in case there are constraints, a global dof might get a contribution also
+ // because it gets data from a constrained dof. This means that a global dof
+ // might also have indirect contributions from a local dof via a constraint,
+ // besides the direct ones.
+ //
+ // The actions performed here correspond to reshaping the constraint
+ // information from global degrees of freedom to local ones (i.e.,
+ // cell-related DoFs), and also transforming the constraint information from
+ // compressed row storage (each local dof that is constrained has a list of
+ // constraint entries associated to it) into compressed column storage based
+ // on the cell-related DoFs (we have a list of global degrees of freedom,
+ // and to each we have a list of local rows where the entries come from). To
+ // increase the speed, we additionally store whether an entry is generated
+ // directly from the local degrees of freedom or whether it comes from a
+ // constraint.
+ template <typename number>
+ class GlobalRowsFromLocal
{
- return data_cache.individual_size.empty() == false;
- }
+ public:
+ GlobalRowsFromLocal()
+ : n_active_rows(0)
+ , n_inhomogeneous_rows(0)
+ {}
- // append an entry that is constrained. This means that there is one less
- // nontrivial row
- void
- insert_constraint(const size_type constrained_local_dof)
- {
- --n_active_rows;
- total_row_indices[n_active_rows].local_row = constrained_local_dof;
- total_row_indices[n_active_rows].global_row = numbers::invalid_size_type;
- }
+ void
+ reinit(const size_type n_local_rows)
+ {
+ total_row_indices.resize(n_local_rows);
+ for (unsigned int i = 0; i < n_local_rows; ++i)
+ total_row_indices[i].constraint_position = numbers::invalid_size_type;
+ n_active_rows = n_local_rows;
+ n_inhomogeneous_rows = 0;
+ data_cache.reinit();
+ }
- // returns the number of constrained dofs in the structure. Constrained
- // dofs do not contribute directly to the matrix, but are needed in order
- // to set matrix diagonals and resolve inhomogeneities
- size_type
- n_constraints() const
- {
- return total_row_indices.size() - n_active_rows;
- }
+ // implemented below
+ void
+ insert_index(const size_type global_row,
+ const size_type local_row,
+ const number constraint_value);
+ void
+ sort();
+
+ // Print object for debugging purpose
+ void
+ print(std::ostream &os)
+ {
+ os << "Active rows " << n_active_rows << std::endl
+ << "Constr rows " << n_constraints() << std::endl
+ << "Inhom rows " << n_inhomogeneous_rows << std::endl
+ << "Local: ";
+ for (const auto &total_row_index : total_row_indices)
+ os << ' ' << std::setw(4) << total_row_index.local_row;
+ os << std::endl << "Global:";
+ for (const auto &total_row_index : total_row_indices)
+ os << ' ' << std::setw(4) << total_row_index.global_row;
+ os << std::endl << "ConPos:";
+ for (const auto &total_row_index : total_row_indices)
+ os << ' ' << std::setw(4) << total_row_index.constraint_position;
+ os << std::endl;
+ }
- // returns the number of constrained dofs in the structure that have an
- // inhomogeneity
- size_type
- n_inhomogeneities() const
- {
- return n_inhomogeneous_rows;
- }
+ // return all kind of information on the constraints
- // tells the structure that the ith constraint is
- // inhomogeneous. inhomogeneous constraints contribute to right hand
- // sides, so to have fast access to them, put them before homogeneous
- // constraints
- void
- set_ith_constraint_inhomogeneous(const size_type i)
- {
- Assert(i >= n_inhomogeneous_rows, ExcInternalError());
- std::swap(total_row_indices[n_active_rows + i],
- total_row_indices[n_active_rows + n_inhomogeneous_rows]);
- n_inhomogeneous_rows++;
- }
+ // returns the number of global indices in the struct
+ size_type
+ size() const
+ {
+ return n_active_rows;
+ }
- // the local row where constraint number i was detected, to find that row
- // easily when the GlobalRowsToLocal has been set up
- size_type
- constraint_origin(size_type i) const
- {
- return total_row_indices[n_active_rows + i].local_row;
- }
+ // returns the number of constraints that are associated to the
+ // counter_index-th entry in the list
+ size_type
+ size(const size_type counter_index) const
+ {
+ return (total_row_indices[counter_index].constraint_position ==
+ numbers::invalid_size_type ?
+ 0 :
+ data_cache.get_size(
+ total_row_indices[counter_index].constraint_position));
+ }
- // a vector that contains all the global ids and the corresponding local
- // ids as well as a pointer to that data where we store how to resolve
- // constraints.
- std::vector<Distributing> total_row_indices;
+ // returns the global row of the counter_index-th entry in the list
+ size_type
+ global_row(const size_type counter_index) const
+ {
+ return total_row_indices[counter_index].global_row;
+ }
- private:
- // holds the actual data from the constraints
- DataCache<number> data_cache;
+ // returns the global row of the counter_index-th entry in the list
+ size_type &
+ global_row(const size_type counter_index)
+ {
+ return total_row_indices[counter_index].global_row;
+ }
- // how many rows there are, constraints disregarded
- size_type n_active_rows;
+ // returns the local row in the cell matrix associated with the
+ // counter_index-th entry in the list. Returns invalid_size_type for
+ // constrained rows
+ size_type
+ local_row(const size_type counter_index) const
+ {
+ return total_row_indices[counter_index].local_row;
+ }
- // the number of rows with inhomogeneous constraints
- size_type n_inhomogeneous_rows;
- };
+ // writable index
+ size_type &
+ local_row(const size_type counter_index)
+ {
+ return total_row_indices[counter_index].local_row;
+ }
- // a function that appends an additional row to the list of values, or
- // appends a value to an already existing row. Similar functionality as for
- // std::map<size_type,Distributing>, but here done for a
- // std::vector<Distributing>, much faster for short lists as we have them
- // here
- template <typename number>
- inline void
- GlobalRowsFromLocal<number>::insert_index(const size_type global_row,
- const size_type local_row,
- const number constraint_value)
- {
- using index_iterator = std::vector<Distributing>::iterator;
- index_iterator pos, pos1;
- Distributing row_value(global_row);
- std::pair<size_type, number> constraint(local_row, constraint_value);
-
- // check whether the list was really sorted before entering here
- for (size_type i = 1; i < n_active_rows; ++i)
- Assert(total_row_indices[i - 1] < total_row_indices[i],
- ExcInternalError());
+ // returns the local row in the cell matrix associated with the
+ // counter_index-th entry in the list in the index_in_constraint-th
+ // position of constraints
+ size_type
+ local_row(const size_type counter_index,
+ const size_type index_in_constraint) const
+ {
+ return (data_cache.get_entry(
+ total_row_indices[counter_index]
+ .constraint_position)[index_in_constraint])
+ .first;
+ }
- pos = Utilities::lower_bound(total_row_indices.begin(),
- total_row_indices.begin() + n_active_rows,
- row_value);
- if (pos->global_row == global_row)
- pos1 = pos;
- else
+ // returns the value of the constraint in the counter_index-th entry in
+ // the list in the index_in_constraint-th position of constraints
+ number
+ constraint_value(const size_type counter_index,
+ const size_type index_in_constraint) const
{
- pos1 = total_row_indices.insert(pos, row_value);
- ++n_active_rows;
+ return (data_cache.get_entry(
+ total_row_indices[counter_index]
+ .constraint_position)[index_in_constraint])
+ .second;
}
- if (pos1->constraint_position == numbers::invalid_size_type)
- pos1->constraint_position = data_cache.insert_new_index(constraint);
- else
- data_cache.append_index(pos1->constraint_position, constraint);
- }
+ // returns whether there is one row with indirect contributions (i.e.,
+ // there has been at least one constraint with non-trivial ConstraintLine)
+ bool
+ have_indirect_rows() const
+ {
+ return data_cache.individual_size.empty() == false;
+ }
- // this sort algorithm sorts std::vector<Distributing>, but does not take
- // the constraints into account. this means that in case that constraints
- // are already inserted, this function does not work as expected. Use
- // shellsort, which is very fast in case the indices are already sorted
- // (which is the usual case with DG elements), and not too slow in other
- // cases
- template <typename number>
- inline void
- GlobalRowsFromLocal<number>::sort()
- {
- size_type i, j, j2, temp, templ, istep;
- size_type step;
+ // append an entry that is constrained. This means that there is one less
+ // nontrivial row
+ void
+ insert_constraint(const size_type constrained_local_dof)
+ {
+ --n_active_rows;
+ total_row_indices[n_active_rows].local_row = constrained_local_dof;
+ total_row_indices[n_active_rows].global_row =
+ numbers::invalid_size_type;
+ }
- // check whether the constraints are really empty.
- const size_type length = size();
+ // returns the number of constrained dofs in the structure. Constrained
+ // dofs do not contribute directly to the matrix, but are needed in order
+ // to set matrix diagonals and resolve inhomogeneities
+ size_type
+ n_constraints() const
+ {
+ return total_row_indices.size() - n_active_rows;
+ }
- // make sure that we are in the range of the vector
- AssertIndexRange(length, total_row_indices.size() + 1);
- for (size_type i = 0; i < length; ++i)
- Assert(total_row_indices[i].constraint_position ==
- numbers::invalid_size_type,
- ExcInternalError());
+ // returns the number of constrained dofs in the structure that have an
+ // inhomogeneity
+ size_type
+ n_inhomogeneities() const
+ {
+ return n_inhomogeneous_rows;
+ }
- step = length / 2;
- while (step > 0)
+ // tells the structure that the ith constraint is
+ // inhomogeneous. inhomogeneous constraints contribute to right hand
+ // sides, so to have fast access to them, put them before homogeneous
+ // constraints
+ void
+ set_ith_constraint_inhomogeneous(const size_type i)
{
- for (i = step; i < length; i++)
- {
- istep = step;
- j = i;
- j2 = j - istep;
- temp = total_row_indices[i].global_row;
- templ = total_row_indices[i].local_row;
- if (total_row_indices[j2].global_row > temp)
- {
- while ((j >= istep) &&
- (total_row_indices[j2].global_row > temp))
- {
- total_row_indices[j].global_row =
- total_row_indices[j2].global_row;
- total_row_indices[j].local_row =
- total_row_indices[j2].local_row;
- j = j2;
- j2 -= istep;
- }
- total_row_indices[j].global_row = temp;
- total_row_indices[j].local_row = templ;
- }
- }
- step = step >> 1;
+ Assert(i >= n_inhomogeneous_rows, ExcInternalError());
+ std::swap(total_row_indices[n_active_rows + i],
+ total_row_indices[n_active_rows + n_inhomogeneous_rows]);
+ n_inhomogeneous_rows++;
}
- }
- /**
- * Scratch data that is used during calls to distribute_local_to_global and
- * add_entries_local_to_global. In order to avoid frequent memory
- * allocation, we keep the data alive from one call to the next in a static
- * variable. Since we want to allow for different number types in matrices,
- * this is a template.
- *
- * Since each thread gets its private version of scratch data out of the
- * ThreadLocalStorage, no conflicting access can occur. For this to be
- * valid, we need to make sure that no call within
- * distribute_local_to_global is made that by itself can spawn tasks.
- * Otherwise, we might end up in a situation where several threads fight for
- * the data.
- *
- * Access to the scratch data is only through the accessor class which
- * handles the access as well as marking the data as used.
- */
- template <typename number>
- class AffineConstraintsData
- {
- public:
- struct ScratchData
- {
- /**
- * Constructor, does nothing.
- */
- ScratchData()
- : in_use(false)
- {}
+ // the local row where constraint number i was detected, to find that row
+ // easily when the GlobalRowsToLocal has been set up
+ size_type
+ constraint_origin(size_type i) const
+ {
+ return total_row_indices[n_active_rows + i].local_row;
+ }
- /**
- * Copy constructor, does nothing
- */
- ScratchData(const ScratchData &)
- : in_use(false)
- {}
+ // a vector that contains all the global ids and the corresponding local
+ // ids as well as a pointer to that data where we store how to resolve
+ // constraints.
+ std::vector<Distributing> total_row_indices;
- /**
- * Stores whether the data is currently in use.
- */
- bool in_use;
+ private:
+ // holds the actual data from the constraints
+ DataCache<number> data_cache;
- /**
- * Temporary array for column indices
- */
- std::vector<size_type> columns;
+ // how many rows there are, constraints disregarded
+ size_type n_active_rows;
- /**
- * Temporary array for column values
- */
- std::vector<number> values;
+ // the number of rows with inhomogeneous constraints
+ size_type n_inhomogeneous_rows;
+ };
- /**
- * Temporary array for block start indices
- */
- std::vector<size_type> block_starts;
+ // a function that appends an additional row to the list of values, or
+ // appends a value to an already existing row. Similar functionality as for
+ // std::map<size_type,Distributing>, but here done for a
+ // std::vector<Distributing>, much faster for short lists as we have them
+ // here
+ template <typename number>
+ inline void
+ GlobalRowsFromLocal<number>::insert_index(const size_type global_row,
+ const size_type local_row,
+ const number constraint_value)
+ {
+ using index_iterator = std::vector<Distributing>::iterator;
+ index_iterator pos, pos1;
+ Distributing row_value(global_row);
+ std::pair<size_type, number> constraint(local_row, constraint_value);
+
+ // check whether the list was really sorted before entering here
+ for (size_type i = 1; i < n_active_rows; ++i)
+ Assert(total_row_indices[i - 1] < total_row_indices[i],
+ ExcInternalError());
- /**
- * Temporary array for vector indices
- */
- std::vector<size_type> vector_indices;
+ pos = Utilities::lower_bound(total_row_indices.begin(),
+ total_row_indices.begin() + n_active_rows,
+ row_value);
+ if (pos->global_row == global_row)
+ pos1 = pos;
+ else
+ {
+ pos1 = total_row_indices.insert(pos, row_value);
+ ++n_active_rows;
+ }
- /**
- * Temporary array for vector values
- */
- std::vector<number> vector_values;
+ if (pos1->constraint_position == numbers::invalid_size_type)
+ pos1->constraint_position = data_cache.insert_new_index(constraint);
+ else
+ data_cache.append_index(pos1->constraint_position, constraint);
+ }
- /**
- * Data array for reorder row/column indices.
- */
- GlobalRowsFromLocal<number> global_rows;
+ // this sort algorithm sorts std::vector<Distributing>, but does not take
+ // the constraints into account. this means that in case that constraints
+ // are already inserted, this function does not work as expected. Use
+ // shellsort, which is very fast in case the indices are already sorted
+ // (which is the usual case with DG elements), and not too slow in other
+ // cases
+ template <typename number>
+ inline void
+ GlobalRowsFromLocal<number>::sort()
+ {
+ size_type i, j, j2, temp, templ, istep;
+ size_type step;
- /**
- * Data array for reorder row/column indices.
- */
- GlobalRowsFromLocal<number> global_columns;
- };
+ // check whether the constraints are really empty.
+ const size_type length = size();
+ // make sure that we are in the range of the vector
+ AssertIndexRange(length, total_row_indices.size() + 1);
+ for (size_type i = 0; i < length; ++i)
+ Assert(total_row_indices[i].constraint_position ==
+ numbers::invalid_size_type,
+ ExcInternalError());
+ step = length / 2;
+ while (step > 0)
+ {
+ for (i = step; i < length; i++)
+ {
+ istep = step;
+ j = i;
+ j2 = j - istep;
+ temp = total_row_indices[i].global_row;
+ templ = total_row_indices[i].local_row;
+ if (total_row_indices[j2].global_row > temp)
+ {
+ while ((j >= istep) &&
+ (total_row_indices[j2].global_row > temp))
+ {
+ total_row_indices[j].global_row =
+ total_row_indices[j2].global_row;
+ total_row_indices[j].local_row =
+ total_row_indices[j2].local_row;
+ j = j2;
+ j2 -= istep;
+ }
+ total_row_indices[j].global_row = temp;
+ total_row_indices[j].local_row = templ;
+ }
+ }
+ step = step >> 1;
+ }
+ }
/**
- * Accessor class to guard access to scratch_data
+ * Scratch data that is used during calls to distribute_local_to_global and
+ * add_entries_local_to_global. In order to avoid frequent memory
+ * allocation, we keep the data alive from one call to the next in a static
+ * variable. Since we want to allow for different number types in matrices,
+ * this is a template.
+ *
+ * Since each thread gets its private version of scratch data out of the
+ * ThreadLocalStorage, no conflicting access can occur. For this to be
+ * valid, we need to make sure that no call within
+ * distribute_local_to_global is made that by itself can spawn tasks.
+ * Otherwise, we might end up in a situation where several threads fight for
+ * the data.
+ *
+ * Access to the scratch data is only through the accessor class which
+ * handles the access as well as marking the data as used.
*/
- class ScratchDataAccessor
+ template <typename number>
+ class AffineConstraintsData
{
public:
- /**
- * Constructor. Grabs the scratch data object on the current thread and
- * marks it as used.
- */
- ScratchDataAccessor()
- : my_scratch_data(&AffineConstraintsData::scratch_data.get())
+ struct ScratchData
{
- Assert(my_scratch_data->in_use == false,
- ExcMessage(
- "Access to thread-local scratch data tried, but it is already "
- "in use"));
- my_scratch_data->in_use = true;
- }
+ /**
+ * Constructor, does nothing.
+ */
+ ScratchData()
+ : in_use(false)
+ {}
+
+ /**
+ * Copy constructor, does nothing
+ */
+ ScratchData(const ScratchData &)
+ : in_use(false)
+ {}
+
+ /**
+ * Stores whether the data is currently in use.
+ */
+ bool in_use;
+
+ /**
+ * Temporary array for column indices
+ */
+ std::vector<size_type> columns;
+
+ /**
+ * Temporary array for column values
+ */
+ std::vector<number> values;
+
+ /**
+ * Temporary array for block start indices
+ */
+ std::vector<size_type> block_starts;
+
+ /**
+ * Temporary array for vector indices
+ */
+ std::vector<size_type> vector_indices;
+
+ /**
+ * Temporary array for vector values
+ */
+ std::vector<number> vector_values;
+
+ /**
+ * Data array for reorder row/column indices.
+ */
+ GlobalRowsFromLocal<number> global_rows;
+
+ /**
+ * Data array for reorder row/column indices.
+ */
+ GlobalRowsFromLocal<number> global_columns;
+ };
- /**
- * Destructor. Mark scratch data as available again.
- */
- ~ScratchDataAccessor()
- {
- my_scratch_data->in_use = false;
- }
- /**
- * Dereferencing operator.
- */
- ScratchData &operator*()
- {
- return *my_scratch_data;
- }
/**
- * Dereferencing operator.
+ * Accessor class to guard access to scratch_data
*/
- ScratchData *operator->()
+ class ScratchDataAccessor
{
- return my_scratch_data;
- }
+ public:
+ /**
+ * Constructor. Grabs the scratch data object on the current thread and
+ * marks it as used.
+ */
+ ScratchDataAccessor()
+ : my_scratch_data(&AffineConstraintsData::scratch_data.get())
+ {
+ Assert(
+ my_scratch_data->in_use == false,
+ ExcMessage(
+ "Access to thread-local scratch data tried, but it is already "
+ "in use"));
+ my_scratch_data->in_use = true;
+ }
+
+ /**
+ * Destructor. Mark scratch data as available again.
+ */
+ ~ScratchDataAccessor()
+ {
+ my_scratch_data->in_use = false;
+ }
+
+ /**
+ * Dereferencing operator.
+ */
+ ScratchData &operator*()
+ {
+ return *my_scratch_data;
+ }
+
+ /**
+ * Dereferencing operator.
+ */
+ ScratchData *operator->()
+ {
+ return my_scratch_data;
+ }
+
+ private:
+ ScratchData *my_scratch_data;
+ };
private:
- ScratchData *my_scratch_data;
+ /**
+ * The actual data object that contains a scratch data for each thread.
+ */
+ static Threads::ThreadLocalStorage<ScratchData> scratch_data;
};
- private:
- /**
- * The actual data object that contains a scratch data for each thread.
- */
- static Threads::ThreadLocalStorage<ScratchData> scratch_data;
- };
+ // function for block matrices: Find out where in the list of local dofs
+ // (sorted according to global ids) the individual blocks start.
+ // Transform the global indices to block-local indices in order to be
+ // able to use functions like vector.block(1)(block_local_id), instead of
+ // vector(global_id). This avoids transforming indices one-by-one later
+ // on.
+ template <typename number, class BlockType>
+ inline void
+ make_block_starts(const BlockType & block_object,
+ GlobalRowsFromLocal<number> &global_rows,
+ std::vector<size_type> & block_starts)
+ {
+ AssertDimension(block_starts.size(), block_object.n_block_rows() + 1);
- // function for block matrices: Find out where in the list of local dofs
- // (sorted according to global ids) the individual blocks start.
- // Transform the global indices to block-local indices in order to be
- // able to use functions like vector.block(1)(block_local_id), instead of
- // vector(global_id). This avoids transforming indices one-by-one later
- // on.
- template <typename number, class BlockType>
- inline void
- make_block_starts(const BlockType & block_object,
- GlobalRowsFromLocal<number> &global_rows,
- std::vector<size_type> & block_starts)
- {
- AssertDimension(block_starts.size(), block_object.n_block_rows() + 1);
+ using row_iterator = std::vector<Distributing>::iterator;
+ row_iterator block_indices = global_rows.total_row_indices.begin();
- using row_iterator = std::vector<Distributing>::iterator;
- row_iterator block_indices = global_rows.total_row_indices.begin();
+ const size_type num_blocks = block_object.n_block_rows();
+ const size_type n_active_rows = global_rows.size();
- const size_type num_blocks = block_object.n_block_rows();
- const size_type n_active_rows = global_rows.size();
+ // find end of rows.
+ block_starts[0] = 0;
+ for (size_type i = 1; i < num_blocks; ++i)
+ {
+ row_iterator first_block = Utilities::lower_bound(
+ block_indices,
+ global_rows.total_row_indices.begin() + n_active_rows,
+ Distributing(block_object.get_row_indices().block_start(i)));
+ block_starts[i] = first_block - global_rows.total_row_indices.begin();
+ block_indices = first_block;
+ }
+ block_starts[num_blocks] = n_active_rows;
+
+ // transform row indices to block-local index space
+ for (size_type i = block_starts[1]; i < n_active_rows; ++i)
+ global_rows.global_row(i) =
+ block_object.get_row_indices()
+ .global_to_local(global_rows.global_row(i))
+ .second;
+ }
- // find end of rows.
- block_starts[0] = 0;
- for (size_type i = 1; i < num_blocks; ++i)
- {
- row_iterator first_block = Utilities::lower_bound(
- block_indices,
- global_rows.total_row_indices.begin() + n_active_rows,
- Distributing(block_object.get_row_indices().block_start(i)));
- block_starts[i] = first_block - global_rows.total_row_indices.begin();
- block_indices = first_block;
- }
- block_starts[num_blocks] = n_active_rows;
+ // same as before, but for std::vector<uint> instead of
+ // GlobalRowsFromLocal. Used in functions for sparsity patterns.
+ template <class BlockType>
+ inline void
+ make_block_starts(const BlockType & block_object,
+ std::vector<size_type> &row_indices,
+ std::vector<size_type> &block_starts)
+ {
+ AssertDimension(block_starts.size(), block_object.n_block_rows() + 1);
- // transform row indices to block-local index space
- for (size_type i = block_starts[1]; i < n_active_rows; ++i)
- global_rows.global_row(i) = block_object.get_row_indices()
- .global_to_local(global_rows.global_row(i))
- .second;
- }
+ using row_iterator = std::vector<size_type>::iterator;
+ row_iterator col_indices = row_indices.begin();
- // same as before, but for std::vector<uint> instead of
- // GlobalRowsFromLocal. Used in functions for sparsity patterns.
- template <class BlockType>
- inline void
- make_block_starts(const BlockType & block_object,
- std::vector<size_type> &row_indices,
- std::vector<size_type> &block_starts)
- {
- AssertDimension(block_starts.size(), block_object.n_block_rows() + 1);
+ const size_type num_blocks = block_object.n_block_rows();
- using row_iterator = std::vector<size_type>::iterator;
- row_iterator col_indices = row_indices.begin();
+ // find end of rows.
+ block_starts[0] = 0;
+ for (size_type i = 1; i < num_blocks; ++i)
+ {
+ row_iterator first_block =
+ Utilities::lower_bound(col_indices,
+ row_indices.end(),
+ block_object.get_row_indices().block_start(
+ i));
+ block_starts[i] = first_block - row_indices.begin();
+ col_indices = first_block;
+ }
+ block_starts[num_blocks] = row_indices.size();
- const size_type num_blocks = block_object.n_block_rows();
+ // transform row indices to local index space
+ for (size_type i = block_starts[1]; i < row_indices.size(); ++i)
+ row_indices[i] =
+ block_object.get_row_indices().global_to_local(row_indices[i]).second;
+ }
- // find end of rows.
- block_starts[0] = 0;
- for (size_type i = 1; i < num_blocks; ++i)
- {
- row_iterator first_block =
- Utilities::lower_bound(col_indices,
- row_indices.end(),
- block_object.get_row_indices().block_start(i));
- block_starts[i] = first_block - row_indices.begin();
- col_indices = first_block;
- }
- block_starts[num_blocks] = row_indices.size();
+ // resolves constraints of one column at the innermost loop. goes through
+ // the origin of each global entry and finds out which data we need to
+ // collect.
+ template <typename number>
+ static inline number
+ resolve_matrix_entry(const GlobalRowsFromLocal<number> &global_rows,
+ const GlobalRowsFromLocal<number> &global_cols,
+ const size_type i,
+ const size_type j,
+ const size_type loc_row,
+ const FullMatrix<number> & local_matrix)
+ {
+ const size_type loc_col = global_cols.local_row(j);
+ number col_val;
- // transform row indices to local index space
- for (size_type i = block_starts[1]; i < row_indices.size(); ++i)
- row_indices[i] =
- block_object.get_row_indices().global_to_local(row_indices[i]).second;
- }
+ // case 1: row has direct contribution in local matrix. decide whether col
+ // has a direct contribution. if not, set the value to zero.
+ if (loc_row != numbers::invalid_size_type)
+ {
+ col_val = ((loc_col != numbers::invalid_size_type) ?
+ local_matrix(loc_row, loc_col) :
+ 0);
+
+ // account for indirect contributions by constraints in column
+ for (size_type p = 0; p < global_cols.size(j); ++p)
+ col_val += (local_matrix(loc_row, global_cols.local_row(j, p)) *
+ global_cols.constraint_value(j, p));
+ }
- // resolves constraints of one column at the innermost loop. goes through
- // the origin of each global entry and finds out which data we need to
- // collect.
- template <typename number>
- static inline number
- resolve_matrix_entry(const GlobalRowsFromLocal<number> &global_rows,
- const GlobalRowsFromLocal<number> &global_cols,
- const size_type i,
- const size_type j,
- const size_type loc_row,
- const FullMatrix<number> & local_matrix)
- {
- const size_type loc_col = global_cols.local_row(j);
- number col_val;
+ // case 2: row has no direct contribution in local matrix
+ else
+ col_val = 0;
- // case 1: row has direct contribution in local matrix. decide whether col
- // has a direct contribution. if not, set the value to zero.
- if (loc_row != numbers::invalid_size_type)
- {
- col_val = ((loc_col != numbers::invalid_size_type) ?
- local_matrix(loc_row, loc_col) :
- 0);
-
- // account for indirect contributions by constraints in column
- for (size_type p = 0; p < global_cols.size(j); ++p)
- col_val += (local_matrix(loc_row, global_cols.local_row(j, p)) *
- global_cols.constraint_value(j, p));
- }
+ // account for indirect contributions by constraints in row, going trough
+ // the direct and indirect references in the given column.
+ for (size_type q = 0; q < global_rows.size(i); ++q)
+ {
+ number add_this =
+ (loc_col != numbers::invalid_size_type) ?
+ local_matrix(global_rows.local_row(i, q), loc_col) :
+ 0;
+
+ for (size_type p = 0; p < global_cols.size(j); ++p)
+ add_this += (local_matrix(global_rows.local_row(i, q),
+ global_cols.local_row(j, p)) *
+ global_cols.constraint_value(j, p));
+ col_val += add_this * global_rows.constraint_value(i, q);
+ }
+ return col_val;
+ }
- // case 2: row has no direct contribution in local matrix
- else
- col_val = 0;
+ // computes all entries that need to be written into global_rows[i]. Lists
+ // the resulting values in val_ptr, and the corresponding column indices in
+ // col_ptr.
+ template <typename number>
+ inline void
+ resolve_matrix_row(const GlobalRowsFromLocal<number> &global_rows,
+ const GlobalRowsFromLocal<number> &global_cols,
+ const size_type i,
+ const size_type column_start,
+ const size_type column_end,
+ const FullMatrix<number> & local_matrix,
+ size_type *& col_ptr,
+ number *& val_ptr)
+ {
+ if (column_end == column_start)
+ return;
- // account for indirect contributions by constraints in row, going trough
- // the direct and indirect references in the given column.
- for (size_type q = 0; q < global_rows.size(i); ++q)
- {
- number add_this = (loc_col != numbers::invalid_size_type) ?
- local_matrix(global_rows.local_row(i, q), loc_col) :
- 0;
-
- for (size_type p = 0; p < global_cols.size(j); ++p)
- add_this += (local_matrix(global_rows.local_row(i, q),
- global_cols.local_row(j, p)) *
- global_cols.constraint_value(j, p));
- col_val += add_this * global_rows.constraint_value(i, q);
- }
- return col_val;
- }
+ AssertIndexRange(column_end - 1, global_cols.size());
+ const size_type loc_row = global_rows.local_row(i);
- // computes all entries that need to be written into global_rows[i]. Lists
- // the resulting values in val_ptr, and the corresponding column indices in
- // col_ptr.
- template <typename number>
- inline void
- resolve_matrix_row(const GlobalRowsFromLocal<number> &global_rows,
- const GlobalRowsFromLocal<number> &global_cols,
- const size_type i,
- const size_type column_start,
- const size_type column_end,
- const FullMatrix<number> & local_matrix,
- size_type *& col_ptr,
- number *& val_ptr)
- {
- if (column_end == column_start)
- return;
+ // fast function if there are no indirect references to any of the local
+ // rows at all on this set of dofs (saves a lot of checks). the only check
+ // we actually need to perform is whether the matrix element is zero.
+ if (global_rows.have_indirect_rows() == false &&
+ global_cols.have_indirect_rows() == false)
+ {
+ AssertIndexRange(loc_row, local_matrix.m());
+ const number *matrix_ptr = &local_matrix(loc_row, 0);
- AssertIndexRange(column_end - 1, global_cols.size());
- const size_type loc_row = global_rows.local_row(i);
+ for (size_type j = column_start; j < column_end; ++j)
+ {
+ const size_type loc_col = global_cols.local_row(j);
+ AssertIndexRange(loc_col, local_matrix.n());
+ const number col_val = matrix_ptr[loc_col];
+ if (col_val != number())
+ {
+ *val_ptr++ = static_cast<number>(col_val);
+ *col_ptr++ = global_cols.global_row(j);
+ }
+ }
+ }
- // fast function if there are no indirect references to any of the local
- // rows at all on this set of dofs (saves a lot of checks). the only check
- // we actually need to perform is whether the matrix element is zero.
- if (global_rows.have_indirect_rows() == false &&
- global_cols.have_indirect_rows() == false)
- {
- AssertIndexRange(loc_row, local_matrix.m());
- const number *matrix_ptr = &local_matrix(loc_row, 0);
+ // more difficult part when there are indirect references and when we need
+ // to do some more checks.
+ else
+ {
+ for (size_type j = column_start; j < column_end; ++j)
+ {
+ number col_val = resolve_matrix_entry(
+ global_rows, global_cols, i, j, loc_row, local_matrix);
- for (size_type j = column_start; j < column_end; ++j)
- {
- const size_type loc_col = global_cols.local_row(j);
- AssertIndexRange(loc_col, local_matrix.n());
- const number col_val = matrix_ptr[loc_col];
- if (col_val != number())
- {
- *val_ptr++ = static_cast<number>(col_val);
- *col_ptr++ = global_cols.global_row(j);
- }
- }
- }
+ // if we got some nontrivial value, append it to the array of
+ // values.
+ if (col_val != number())
+ {
+ *val_ptr++ = static_cast<number>(col_val);
+ *col_ptr++ = global_cols.global_row(j);
+ }
+ }
+ }
+ }
- // more difficult part when there are indirect references and when we need
- // to do some more checks.
- else
+ // specialized function that can write into the row of a
+ // SparseMatrix<number>.
+ namespace dealiiSparseMatrix
+ {
+ template <typename SparseMatrixIterator, typename LocalType>
+ static inline void
+ add_value(const LocalType value,
+ const size_type row,
+ const size_type column,
+ SparseMatrixIterator &matrix_values)
{
- for (size_type j = column_start; j < column_end; ++j)
+ (void)row;
+ if (value != LocalType())
{
- number col_val = resolve_matrix_entry(
- global_rows, global_cols, i, j, loc_row, local_matrix);
-
- // if we got some nontrivial value, append it to the array of
- // values.
- if (col_val != number())
- {
- *val_ptr++ = static_cast<number>(col_val);
- *col_ptr++ = global_cols.global_row(j);
- }
+ while (matrix_values->column() < column)
+ ++matrix_values;
+ Assert(matrix_values->column() == column,
+ typename SparseMatrix<
+ typename SparseMatrixIterator::MatrixType::value_type>::
+ ExcInvalidIndex(row, column));
+ matrix_values->value() += value;
}
}
- }
+ } // namespace dealiiSparseMatrix
- // specialized function that can write into the row of a
- // SparseMatrix<number>.
- namespace dealiiSparseMatrix
- {
- template <typename SparseMatrixIterator, typename LocalType>
- static inline void
- add_value(const LocalType value,
- const size_type row,
- const size_type column,
- SparseMatrixIterator &matrix_values)
+ // similar as before, now with shortcut for deal.II sparse matrices. this
+ // lets us avoid using extra arrays, and does all the operations just in
+ // place, i.e., in the respective matrix row
+ template <typename number>
+ inline void
+ resolve_matrix_row(const GlobalRowsFromLocal<number> &global_rows,
+ const size_type i,
+ const size_type column_start,
+ const size_type column_end,
+ const FullMatrix<number> & local_matrix,
+ SparseMatrix<number> * sparse_matrix)
{
- (void)row;
- if (value != LocalType())
- {
- while (matrix_values->column() < column)
- ++matrix_values;
- Assert(matrix_values->column() == column,
- typename SparseMatrix<
- typename SparseMatrixIterator::MatrixType::value_type>::
- ExcInvalidIndex(row, column));
- matrix_values->value() += value;
- }
- }
- } // namespace dealiiSparseMatrix
+ if (column_end == column_start)
+ return;
- // similar as before, now with shortcut for deal.II sparse matrices. this
- // lets us avoid using extra arrays, and does all the operations just in
- // place, i.e., in the respective matrix row
- template <typename number>
- inline void
- resolve_matrix_row(const GlobalRowsFromLocal<number> &global_rows,
- const size_type i,
- const size_type column_start,
- const size_type column_end,
- const FullMatrix<number> & local_matrix,
- SparseMatrix<number> * sparse_matrix)
- {
- if (column_end == column_start)
- return;
+ AssertIndexRange(column_end - 1, global_rows.size());
+ const SparsityPattern &sparsity = sparse_matrix->get_sparsity_pattern();
- AssertIndexRange(column_end - 1, global_rows.size());
- const SparsityPattern &sparsity = sparse_matrix->get_sparsity_pattern();
+ if (sparsity.n_nonzero_elements() == 0)
+ return;
- if (sparsity.n_nonzero_elements() == 0)
- return;
+ const size_type row = global_rows.global_row(i);
+ const size_type loc_row = global_rows.local_row(i);
- const size_type row = global_rows.global_row(i);
- const size_type loc_row = global_rows.local_row(i);
+ typename SparseMatrix<number>::iterator matrix_values =
+ sparse_matrix->begin(row);
+ const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols();
- typename SparseMatrix<number>::iterator matrix_values =
- sparse_matrix->begin(row);
- const bool optimize_diagonal = sparsity.n_rows() == sparsity.n_cols();
+ // distinguish three cases about what can happen for checking whether the
+ // diagonal is the first element of the row. this avoids if statements at
+ // the innermost loop positions
- // distinguish three cases about what can happen for checking whether the
- // diagonal is the first element of the row. this avoids if statements at
- // the innermost loop positions
+ if (!optimize_diagonal) // case 1: no diagonal optimization in matrix
+ {
+ if (global_rows.have_indirect_rows() == false)
+ {
+ AssertIndexRange(loc_row, local_matrix.m());
+ const number *matrix_ptr = &local_matrix(loc_row, 0);
- if (!optimize_diagonal) // case 1: no diagonal optimization in matrix
- {
- if (global_rows.have_indirect_rows() == false)
- {
- AssertIndexRange(loc_row, local_matrix.m());
- const number *matrix_ptr = &local_matrix(loc_row, 0);
+ for (size_type j = column_start; j < column_end; ++j)
+ {
+ const size_type loc_col = global_rows.local_row(j);
+ const number col_val = matrix_ptr[loc_col];
+ dealiiSparseMatrix::add_value(col_val,
+ row,
+ global_rows.global_row(j),
+ matrix_values);
+ }
+ }
+ else
+ {
+ for (size_type j = column_start; j < column_end; ++j)
+ {
+ number col_val = resolve_matrix_entry(
+ global_rows, global_rows, i, j, loc_row, local_matrix);
+ dealiiSparseMatrix::add_value(col_val,
+ row,
+ global_rows.global_row(j),
+ matrix_values);
+ }
+ }
+ }
+ else if (i >= column_start && i < column_end) // case 2: can split loop
+ {
+ ++matrix_values; // jump over diagonal element
+ if (global_rows.have_indirect_rows() == false)
+ {
+ AssertIndexRange(loc_row, local_matrix.m());
+ const number *matrix_ptr = &local_matrix(loc_row, 0);
- for (size_type j = column_start; j < column_end; ++j)
- {
- const size_type loc_col = global_rows.local_row(j);
- const number col_val = matrix_ptr[loc_col];
- dealiiSparseMatrix::add_value(col_val,
- row,
- global_rows.global_row(j),
- matrix_values);
- }
- }
- else
- {
- for (size_type j = column_start; j < column_end; ++j)
- {
- number col_val = resolve_matrix_entry(
- global_rows, global_rows, i, j, loc_row, local_matrix);
- dealiiSparseMatrix::add_value(col_val,
- row,
- global_rows.global_row(j),
- matrix_values);
- }
- }
- }
- else if (i >= column_start && i < column_end) // case 2: can split loop
- {
- ++matrix_values; // jump over diagonal element
- if (global_rows.have_indirect_rows() == false)
- {
- AssertIndexRange(loc_row, local_matrix.m());
- const number *matrix_ptr = &local_matrix(loc_row, 0);
+ sparse_matrix->begin(row)->value() += matrix_ptr[loc_row];
+ for (size_type j = column_start; j < i; ++j)
+ {
+ const size_type loc_col = global_rows.local_row(j);
+ const number col_val = matrix_ptr[loc_col];
+ dealiiSparseMatrix::add_value(col_val,
+ row,
+ global_rows.global_row(j),
+ matrix_values);
+ }
+ for (size_type j = i + 1; j < column_end; ++j)
+ {
+ const size_type loc_col = global_rows.local_row(j);
+ const number col_val = matrix_ptr[loc_col];
+ dealiiSparseMatrix::add_value(col_val,
+ row,
+ global_rows.global_row(j),
+ matrix_values);
+ }
+ }
+ else
+ {
+ sparse_matrix->begin(row)->value() += resolve_matrix_entry(
+ global_rows, global_rows, i, i, loc_row, local_matrix);
+ for (size_type j = column_start; j < i; ++j)
+ {
+ number col_val = resolve_matrix_entry(
+ global_rows, global_rows, i, j, loc_row, local_matrix);
+ dealiiSparseMatrix::add_value(col_val,
+ row,
+ global_rows.global_row(j),
+ matrix_values);
+ }
+ for (size_type j = i + 1; j < column_end; ++j)
+ {
+ number col_val = resolve_matrix_entry(
+ global_rows, global_rows, i, j, loc_row, local_matrix);
+ dealiiSparseMatrix::add_value(col_val,
+ row,
+ global_rows.global_row(j),
+ matrix_values);
+ }
+ }
+ }
+ // case 3: can't say - need to check inside the loop
+ else if (global_rows.have_indirect_rows() == false)
+ {
+ ++matrix_values; // jump over diagonal element
+ AssertIndexRange(loc_row, local_matrix.m());
+ const number *matrix_ptr = &local_matrix(loc_row, 0);
- sparse_matrix->begin(row)->value() += matrix_ptr[loc_row];
- for (size_type j = column_start; j < i; ++j)
- {
- const size_type loc_col = global_rows.local_row(j);
- const number col_val = matrix_ptr[loc_col];
- dealiiSparseMatrix::add_value(col_val,
- row,
- global_rows.global_row(j),
- matrix_values);
- }
- for (size_type j = i + 1; j < column_end; ++j)
- {
- const size_type loc_col = global_rows.local_row(j);
- const number col_val = matrix_ptr[loc_col];
- dealiiSparseMatrix::add_value(col_val,
- row,
- global_rows.global_row(j),
- matrix_values);
- }
- }
- else
- {
- sparse_matrix->begin(row)->value() += resolve_matrix_entry(
- global_rows, global_rows, i, i, loc_row, local_matrix);
- for (size_type j = column_start; j < i; ++j)
- {
- number col_val = resolve_matrix_entry(
- global_rows, global_rows, i, j, loc_row, local_matrix);
+ for (size_type j = column_start; j < column_end; ++j)
+ {
+ const size_type loc_col = global_rows.local_row(j);
+ const number col_val = matrix_ptr[loc_col];
+ if (row == global_rows.global_row(j))
+ sparse_matrix->begin(row)->value() += col_val;
+ else
dealiiSparseMatrix::add_value(col_val,
row,
global_rows.global_row(j),
matrix_values);
- }
- for (size_type j = i + 1; j < column_end; ++j)
- {
- number col_val = resolve_matrix_entry(
- global_rows, global_rows, i, j, loc_row, local_matrix);
+ }
+ }
+ else
+ {
+ ++matrix_values; // jump over diagonal element
+ for (size_type j = column_start; j < column_end; ++j)
+ {
+ number col_val = resolve_matrix_entry(
+ global_rows, global_rows, i, j, loc_row, local_matrix);
+ if (row == global_rows.global_row(j))
+ sparse_matrix->begin(row)->value() += col_val;
+ else
dealiiSparseMatrix::add_value(col_val,
row,
global_rows.global_row(j),
matrix_values);
- }
- }
- }
- // case 3: can't say - need to check inside the loop
- else if (global_rows.have_indirect_rows() == false)
- {
- ++matrix_values; // jump over diagonal element
- AssertIndexRange(loc_row, local_matrix.m());
- const number *matrix_ptr = &local_matrix(loc_row, 0);
+ }
+ }
+ }
- for (size_type j = column_start; j < column_end; ++j)
- {
- const size_type loc_col = global_rows.local_row(j);
- const number col_val = matrix_ptr[loc_col];
- if (row == global_rows.global_row(j))
- sparse_matrix->begin(row)->value() += col_val;
- else
- dealiiSparseMatrix::add_value(col_val,
- row,
- global_rows.global_row(j),
- matrix_values);
- }
- }
- else
- {
- ++matrix_values; // jump over diagonal element
- for (size_type j = column_start; j < column_end; ++j)
- {
- number col_val = resolve_matrix_entry(
- global_rows, global_rows, i, j, loc_row, local_matrix);
- if (row == global_rows.global_row(j))
- sparse_matrix->begin(row)->value() += col_val;
- else
- dealiiSparseMatrix::add_value(col_val,
- row,
- global_rows.global_row(j),
- matrix_values);
- }
- }
- }
+ // Same function to resolve all entries that will be added to the given
+ // global row global_rows[i] as before, now for sparsity pattern
+ template <typename number>
+ inline void
+ resolve_matrix_row(const GlobalRowsFromLocal<number> &global_rows,
+ const size_type i,
+ const size_type column_start,
+ const size_type column_end,
+ const Table<2, bool> & dof_mask,
+ std::vector<size_type>::iterator & col_ptr)
+ {
+ if (column_end == column_start)
+ return;
- // Same function to resolve all entries that will be added to the given
- // global row global_rows[i] as before, now for sparsity pattern
- template <typename number>
- inline void
- resolve_matrix_row(const GlobalRowsFromLocal<number> &global_rows,
- const size_type i,
- const size_type column_start,
- const size_type column_end,
- const Table<2, bool> & dof_mask,
- std::vector<size_type>::iterator & col_ptr)
- {
- if (column_end == column_start)
- return;
+ const size_type loc_row = global_rows.local_row(i);
- const size_type loc_row = global_rows.local_row(i);
+ // fast function if there are no indirect references to any of the local
+ // rows at all on this set of dofs
+ if (global_rows.have_indirect_rows() == false)
+ {
+ Assert(loc_row < dof_mask.n_rows(), ExcInternalError());
- // fast function if there are no indirect references to any of the local
- // rows at all on this set of dofs
- if (global_rows.have_indirect_rows() == false)
- {
- Assert(loc_row < dof_mask.n_rows(), ExcInternalError());
+ for (size_type j = column_start; j < column_end; ++j)
+ {
+ const size_type loc_col = global_rows.local_row(j);
+ Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
- for (size_type j = column_start; j < column_end; ++j)
- {
- const size_type loc_col = global_rows.local_row(j);
- Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
+ if (dof_mask(loc_row, loc_col) == true)
+ *col_ptr++ = global_rows.global_row(j);
+ }
+ }
- if (dof_mask(loc_row, loc_col) == true)
- *col_ptr++ = global_rows.global_row(j);
- }
- }
+ // slower functions when there are indirect references and when we need to
+ // do some more checks.
+ else
+ {
+ for (size_type j = column_start; j < column_end; ++j)
+ {
+ const size_type loc_col = global_rows.local_row(j);
+ if (loc_row != numbers::invalid_size_type)
+ {
+ Assert(loc_row < dof_mask.n_rows(), ExcInternalError());
+ if (loc_col != numbers::invalid_size_type)
+ {
+ Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
+ if (dof_mask(loc_row, loc_col) == true)
+ goto add_this_index;
+ }
- // slower functions when there are indirect references and when we need to
- // do some more checks.
- else
- {
- for (size_type j = column_start; j < column_end; ++j)
- {
- const size_type loc_col = global_rows.local_row(j);
- if (loc_row != numbers::invalid_size_type)
- {
- Assert(loc_row < dof_mask.n_rows(), ExcInternalError());
- if (loc_col != numbers::invalid_size_type)
- {
- Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
- if (dof_mask(loc_row, loc_col) == true)
+ for (size_type p = 0; p < global_rows.size(j); ++p)
+ if (dof_mask(loc_row, global_rows.local_row(j, p)) == true)
goto add_this_index;
- }
+ }
- for (size_type p = 0; p < global_rows.size(j); ++p)
- if (dof_mask(loc_row, global_rows.local_row(j, p)) == true)
- goto add_this_index;
- }
+ for (size_type q = 0; q < global_rows.size(i); ++q)
+ {
+ if (loc_col != numbers::invalid_size_type)
+ {
+ Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
+ if (dof_mask(global_rows.local_row(i, q), loc_col) ==
+ true)
+ goto add_this_index;
+ }
- for (size_type q = 0; q < global_rows.size(i); ++q)
- {
- if (loc_col != numbers::invalid_size_type)
- {
- Assert(loc_col < dof_mask.n_cols(), ExcInternalError());
- if (dof_mask(global_rows.local_row(i, q), loc_col) == true)
+ for (size_type p = 0; p < global_rows.size(j); ++p)
+ if (dof_mask(global_rows.local_row(i, q),
+ global_rows.local_row(j, p)) == true)
goto add_this_index;
- }
-
- for (size_type p = 0; p < global_rows.size(j); ++p)
- if (dof_mask(global_rows.local_row(i, q),
- global_rows.local_row(j, p)) == true)
- goto add_this_index;
- }
+ }
- continue;
- // if we got some nontrivial value, append it to the array of
- // values.
- add_this_index:
- *col_ptr++ = global_rows.global_row(j);
- }
- }
- }
+ continue;
+ // if we got some nontrivial value, append it to the array of
+ // values.
+ add_this_index:
+ *col_ptr++ = global_rows.global_row(j);
+ }
+ }
+ }
- // to make sure that the global matrix remains invertible, we need to do
- // something with the diagonal elements. Add the average of the
- // absolute values of the local matrix diagonals, so the resulting entry
- // will always be positive and furthermore be in the same order of magnitude
- // as the other elements of the matrix. If all local matrix diagonals are
- // zero, add the l1 norm of the local matrix divided by the matrix size
- // to the diagonal of the global matrix. If the entire local matrix is zero,
- // add 1 to the diagonal of the global matrix.
- //
- // note that this also captures the special case that a dof is both
- // constrained and fixed (this can happen for hanging nodes in 3d that also
- // happen to be on the boundary). in that case, following the program flow
- // in distribute_local_to_global, it is realized that when distributing the
- // row and column no elements of the matrix are actually touched if all the
- // degrees of freedom to which this dof is constrained are also constrained
- // (the usual case with hanging nodes in 3d). however, in the line below, we
- // do actually do something with this dof
- template <typename number, typename MatrixType, typename VectorType>
- inline void
- set_matrix_diagonals(
- const internals::GlobalRowsFromLocal<number> &global_rows,
- const std::vector<size_type> & local_dof_indices,
- const FullMatrix<number> & local_matrix,
- const AffineConstraints<number> & constraints,
- MatrixType & global_matrix,
- VectorType & global_vector,
- bool use_inhomogeneities_for_rhs)
- {
- if (global_rows.n_constraints() > 0)
- {
- number average_diagonal = number();
- for (size_type i = 0; i < local_matrix.m(); ++i)
- average_diagonal += std::abs(local_matrix(i, i));
- average_diagonal /= static_cast<number>(local_matrix.m());
+ // to make sure that the global matrix remains invertible, we need to do
+ // something with the diagonal elements. Add the average of the
+ // absolute values of the local matrix diagonals, so the resulting entry
+ // will always be positive and furthermore be in the same order of magnitude
+ // as the other elements of the matrix. If all local matrix diagonals are
+ // zero, add the l1 norm of the local matrix divided by the matrix size
+ // to the diagonal of the global matrix. If the entire local matrix is zero,
+ // add 1 to the diagonal of the global matrix.
+ //
+ // note that this also captures the special case that a dof is both
+ // constrained and fixed (this can happen for hanging nodes in 3d that also
+ // happen to be on the boundary). in that case, following the program flow
+ // in distribute_local_to_global, it is realized that when distributing the
+ // row and column no elements of the matrix are actually touched if all the
+ // degrees of freedom to which this dof is constrained are also constrained
+ // (the usual case with hanging nodes in 3d). however, in the line below, we
+ // do actually do something with this dof
+ template <typename number, typename MatrixType, typename VectorType>
+ inline void
+ set_matrix_diagonals(
+ const internal::AffineConstraints::GlobalRowsFromLocal<number>
+ & global_rows,
+ const std::vector<size_type> & local_dof_indices,
+ const FullMatrix<number> & local_matrix,
+ const dealii::AffineConstraints<number> &constraints,
+ MatrixType & global_matrix,
+ VectorType & global_vector,
+ bool use_inhomogeneities_for_rhs)
+ {
+ if (global_rows.n_constraints() > 0)
+ {
+ number average_diagonal = number();
+ for (size_type i = 0; i < local_matrix.m(); ++i)
+ average_diagonal += std::abs(local_matrix(i, i));
+ average_diagonal /= static_cast<number>(local_matrix.m());
- // handle the case that all diagonal elements are zero
- if (average_diagonal == static_cast<number>(0.))
- {
- average_diagonal = static_cast<number>(local_matrix.l1_norm()) /
- static_cast<number>(local_matrix.m());
- // if the entire matrix is zero, use 1. for the diagonal
- if (average_diagonal == static_cast<number>(0.))
- average_diagonal = static_cast<number>(1.);
- }
+ // handle the case that all diagonal elements are zero
+ if (average_diagonal == static_cast<number>(0.))
+ {
+ average_diagonal = static_cast<number>(local_matrix.l1_norm()) /
+ static_cast<number>(local_matrix.m());
+ // if the entire matrix is zero, use 1. for the diagonal
+ if (average_diagonal == static_cast<number>(0.))
+ average_diagonal = static_cast<number>(1.);
+ }
- for (size_type i = 0; i < global_rows.n_constraints(); i++)
- {
- const size_type local_row = global_rows.constraint_origin(i);
- const size_type global_row = local_dof_indices[local_row];
- const number new_diagonal =
- (std::abs(local_matrix(local_row, local_row)) != 0. ?
- std::abs(local_matrix(local_row, local_row)) :
- average_diagonal);
- global_matrix.add(global_row, global_row, new_diagonal);
-
- // if the use_inhomogeneities_for_rhs flag is set to true, the
- // inhomogeneities are used to create the global vector. instead
- // of fill in a zero in the ith components with an inhomogeneity,
- // we set those to: inhomogeneity(i)*global_matrix (i,i).
- if (use_inhomogeneities_for_rhs == true)
- global_vector(global_row) +=
- new_diagonal * constraints.get_inhomogeneity(global_row);
- }
- }
- }
+ for (size_type i = 0; i < global_rows.n_constraints(); i++)
+ {
+ const size_type local_row = global_rows.constraint_origin(i);
+ const size_type global_row = local_dof_indices[local_row];
+ const number new_diagonal =
+ (std::abs(local_matrix(local_row, local_row)) != 0. ?
+ std::abs(local_matrix(local_row, local_row)) :
+ average_diagonal);
+ global_matrix.add(global_row, global_row, new_diagonal);
+
+ // if the use_inhomogeneities_for_rhs flag is set to true, the
+ // inhomogeneities are used to create the global vector. instead
+ // of fill in a zero in the ith components with an inhomogeneity,
+ // we set those to: inhomogeneity(i)*global_matrix (i,i).
+ if (use_inhomogeneities_for_rhs == true)
+ global_vector(global_row) +=
+ new_diagonal * constraints.get_inhomogeneity(global_row);
+ }
+ }
+ }
- // similar function as the one above for setting matrix diagonals, but now
- // doing that for sparsity patterns when setting them up using
- // add_entries_local_to_global. In case we keep constrained entries, add all
- // the rows and columns related to the constrained dof, otherwise just add
- // the diagonal
- template <typename number, typename SparsityPatternType>
- inline void
- set_sparsity_diagonals(
- const internals::GlobalRowsFromLocal<number> &global_rows,
- const std::vector<size_type> & local_dof_indices,
- const Table<2, bool> & dof_mask,
- const bool keep_constrained_entries,
- SparsityPatternType & sparsity_pattern)
- {
- // if we got constraints, need to add the diagonal element and, if the
- // user requested so, also the rest of the entries in rows and columns
- // that have been left out above
- if (global_rows.n_constraints() > 0)
- {
- for (size_type i = 0; i < global_rows.n_constraints(); i++)
- {
- const size_type local_row = global_rows.constraint_origin(i);
- const size_type global_row = local_dof_indices[local_row];
- if (keep_constrained_entries == true)
- {
- for (size_type j = 0; j < local_dof_indices.size(); ++j)
- {
- if (dof_mask(local_row, j) == true)
- sparsity_pattern.add(global_row, local_dof_indices[j]);
- if (dof_mask(j, local_row) == true)
- sparsity_pattern.add(local_dof_indices[j], global_row);
- }
- }
- else
- // don't keep constrained entries - just add the diagonal.
- sparsity_pattern.add(global_row, global_row);
- }
- }
- }
+ // similar function as the one above for setting matrix diagonals, but now
+ // doing that for sparsity patterns when setting them up using
+ // add_entries_local_to_global. In case we keep constrained entries, add all
+ // the rows and columns related to the constrained dof, otherwise just add
+ // the diagonal
+ template <typename number, typename SparsityPatternType>
+ inline void
+ set_sparsity_diagonals(
+ const internal::AffineConstraints::GlobalRowsFromLocal<number>
+ & global_rows,
+ const std::vector<size_type> &local_dof_indices,
+ const Table<2, bool> & dof_mask,
+ const bool keep_constrained_entries,
+ SparsityPatternType & sparsity_pattern)
+ {
+ // if we got constraints, need to add the diagonal element and, if the
+ // user requested so, also the rest of the entries in rows and columns
+ // that have been left out above
+ if (global_rows.n_constraints() > 0)
+ {
+ for (size_type i = 0; i < global_rows.n_constraints(); i++)
+ {
+ const size_type local_row = global_rows.constraint_origin(i);
+ const size_type global_row = local_dof_indices[local_row];
+ if (keep_constrained_entries == true)
+ {
+ for (size_type j = 0; j < local_dof_indices.size(); ++j)
+ {
+ if (dof_mask(local_row, j) == true)
+ sparsity_pattern.add(global_row, local_dof_indices[j]);
+ if (dof_mask(j, local_row) == true)
+ sparsity_pattern.add(local_dof_indices[j], global_row);
+ }
+ }
+ else
+ // don't keep constrained entries - just add the diagonal.
+ sparsity_pattern.add(global_row, global_row);
+ }
+ }
+ }
-} // end of namespace internals
+ } // end of namespace AffineConstraints
+} // end of namespace internal
// Basic idea of setting up a list of
// all global dofs: first find all rows and columns
template <typename number>
void
AffineConstraints<number>::make_sorted_row_list(
- const std::vector<size_type> & local_dof_indices,
- internals::GlobalRowsFromLocal<number> &global_rows) const
+ const std::vector<size_type> & local_dof_indices,
+ internal::AffineConstraints::GlobalRowsFromLocal<number> &global_rows) const
{
const size_type n_local_dofs = local_dof_indices.size();
AssertDimension(n_local_dofs, global_rows.size());
template <typename MatrixScalar, typename VectorScalar>
inline typename ProductType<VectorScalar, MatrixScalar>::type
AffineConstraints<number>::resolve_vector_entry(
- const size_type i,
- const internals::GlobalRowsFromLocal<number> &global_rows,
- const Vector<VectorScalar> & local_vector,
- const std::vector<size_type> & local_dof_indices,
- const FullMatrix<MatrixScalar> & local_matrix) const
+ const size_type i,
+ const internal::AffineConstraints::GlobalRowsFromLocal<number> &global_rows,
+ const Vector<VectorScalar> & local_vector,
+ const std::vector<size_type> & local_dof_indices,
+ const FullMatrix<MatrixScalar> &local_matrix) const
{
const size_type loc_row = global_rows.local_row(i);
const size_type n_inhomogeneous_rows = global_rows.n_inhomogeneities();
const size_type n_local_dofs = local_dof_indices.size();
- typename internals::AffineConstraintsData<number>::ScratchDataAccessor
- scratch_data;
+ typename internal::AffineConstraints::AffineConstraintsData<
+ number>::ScratchDataAccessor scratch_data;
- internals::GlobalRowsFromLocal<number> &global_rows =
+ internal::AffineConstraints::GlobalRowsFromLocal<number> &global_rows =
scratch_data->global_rows;
global_rows.reinit(n_local_dofs);
make_sorted_row_list(local_dof_indices, global_rows);
// cast is uncritical here and only used to avoid compiler
// warnings. We never access a non-double array
number *val_ptr = vals.data();
- internals::resolve_matrix_row(global_rows,
- global_rows,
- i,
- 0,
- n_actual_dofs,
- local_matrix,
- col_ptr,
- val_ptr);
+ internal::AffineConstraints::resolve_matrix_row(global_rows,
+ global_rows,
+ i,
+ 0,
+ n_actual_dofs,
+ local_matrix,
+ col_ptr,
+ val_ptr);
const size_type n_values = col_ptr - cols.data();
if (n_values > 0)
global_matrix.add(
row, n_values, cols.data(), vals.data(), false, true);
}
else
- internals::resolve_matrix_row(
+ internal::AffineConstraints::resolve_matrix_row(
global_rows, i, 0, n_actual_dofs, local_matrix, sparse_matrix);
// now to the vectors. besides doing the same job as we did above (i.e.,
}
}
- internals::set_matrix_diagonals(global_rows,
- local_dof_indices,
- local_matrix,
- *this,
- global_matrix,
- global_vector,
- use_inhomogeneities_for_rhs);
+ internal::AffineConstraints::set_matrix_diagonals(
+ global_rows,
+ local_dof_indices,
+ local_matrix,
+ *this,
+ global_matrix,
+ global_vector,
+ use_inhomogeneities_for_rhs);
}
// similar function as above, but now specialized for block matrices. See the
}
Assert(sorted == true, ExcMatrixNotClosed());
- typename internals::AffineConstraintsData<number>::ScratchDataAccessor
- scratch_data;
+ typename internal::AffineConstraints::AffineConstraintsData<
+ number>::ScratchDataAccessor scratch_data;
const size_type n_local_dofs = local_dof_indices.size();
- internals::GlobalRowsFromLocal<number> &global_rows =
+ internal::AffineConstraints::GlobalRowsFromLocal<number> &global_rows =
scratch_data->global_rows;
global_rows.reinit(n_local_dofs);
const size_type num_blocks = global_matrix.n_block_rows();
std::vector<size_type> &block_starts = scratch_data->block_starts;
block_starts.resize(num_blocks + 1);
- internals::make_block_starts(global_matrix, global_rows, block_starts);
+ internal::AffineConstraints::make_block_starts(global_matrix,
+ global_rows,
+ block_starts);
std::vector<size_type> &cols = scratch_data->columns;
std::vector<number> & vals = scratch_data->values;
{
size_type *col_ptr = cols.data();
number * val_ptr = vals.data();
- internals::resolve_matrix_row(global_rows,
- global_rows,
- i,
- start_block,
- end_block,
- local_matrix,
- col_ptr,
- val_ptr);
+ internal::AffineConstraints::resolve_matrix_row(global_rows,
+ global_rows,
+ i,
+ start_block,
+ end_block,
+ local_matrix,
+ col_ptr,
+ val_ptr);
const size_type n_values = col_ptr - cols.data();
if (n_values > 0)
global_matrix.block(block, block_col)
dynamic_cast<SparseMatrix<number> *>(
&global_matrix.block(block, block_col));
Assert(sparse_matrix != nullptr, ExcInternalError());
- internals::resolve_matrix_row(global_rows,
- i,
- start_block,
- end_block,
- local_matrix,
- sparse_matrix);
+ internal::AffineConstraints::resolve_matrix_row(
+ global_rows,
+ i,
+ start_block,
+ end_block,
+ local_matrix,
+ sparse_matrix);
}
}
}
}
- internals::set_matrix_diagonals(global_rows,
- local_dof_indices,
- local_matrix,
- *this,
- global_matrix,
- global_vector,
- use_inhomogeneities_for_rhs);
+ internal::AffineConstraints::set_matrix_diagonals(
+ global_rows,
+ local_dof_indices,
+ local_matrix,
+ *this,
+ global_matrix,
+ global_vector,
+ use_inhomogeneities_for_rhs);
}
const size_type n_local_row_dofs = row_indices.size();
const size_type n_local_col_dofs = col_indices.size();
- typename internals::AffineConstraintsData<
+ typename internal::AffineConstraints::AffineConstraintsData<
typename MatrixType::value_type>::ScratchDataAccessor scratch_data;
- internals::GlobalRowsFromLocal<number> &global_rows =
+ internal::AffineConstraints::GlobalRowsFromLocal<number> &global_rows =
scratch_data->global_rows;
global_rows.reinit(n_local_row_dofs);
- internals::GlobalRowsFromLocal<number> &global_cols =
+ internal::AffineConstraints::GlobalRowsFromLocal<number> &global_cols =
scratch_data->global_columns;
global_cols.reinit(n_local_col_dofs);
// calculate all the data that will be written into the matrix row.
size_type *col_ptr = cols.data();
number * val_ptr = vals.data();
- internals::resolve_matrix_row(global_rows,
- global_cols,
- i,
- 0,
- n_actual_col_dofs,
- local_matrix,
- col_ptr,
- val_ptr);
+ internal::AffineConstraints::resolve_matrix_row(global_rows,
+ global_cols,
+ i,
+ 0,
+ n_actual_col_dofs,
+ local_matrix,
+ col_ptr,
+ val_ptr);
const size_type n_values = col_ptr - cols.data();
if (n_values > 0)
global_matrix.add(row, n_values, cols.data(), vals.data(), false, true);
ExcNotQuadratic());
const size_type n_local_dofs = local_dof_indices.size();
- typename internals::AffineConstraintsData<number>::ScratchDataAccessor
- scratch_data;
+ typename internal::AffineConstraints::AffineConstraintsData<
+ number>::ScratchDataAccessor scratch_data;
const bool dof_mask_is_active = (dof_mask.n_rows() == n_local_dofs);
if (dof_mask_is_active == true)
// complicated case: we need to filter out some indices. then the function
// gets similar to the function for distributing matrix entries, see there
// for additional comments.
- internals::GlobalRowsFromLocal<number> &global_rows =
+ internal::AffineConstraints::GlobalRowsFromLocal<number> &global_rows =
scratch_data->global_rows;
global_rows.reinit(n_local_dofs);
make_sorted_row_list(local_dof_indices, global_rows);
{
std::vector<size_type>::iterator col_ptr = cols.begin();
const size_type row = global_rows.global_row(i);
- internals::resolve_matrix_row(
+ internal::AffineConstraints::resolve_matrix_row(
global_rows, i, 0, n_actual_dofs, dof_mask, col_ptr);
// finally, write all the information that accumulated under the given
if (col_ptr != cols.begin())
sparsity_pattern.add_entries(row, cols.begin(), col_ptr, true);
}
- internals::set_sparsity_diagonals(global_rows,
- local_dof_indices,
- dof_mask,
- keep_constrained_entries,
- sparsity_pattern);
+ internal::AffineConstraints::set_sparsity_diagonals(global_rows,
+ local_dof_indices,
+ dof_mask,
+ keep_constrained_entries,
+ sparsity_pattern);
}
template <typename number>
const size_type n_local_dofs = local_dof_indices.size();
const size_type num_blocks = sparsity_pattern.n_block_rows();
- typename internals::AffineConstraintsData<number>::ScratchDataAccessor
- scratch_data;
+ typename internal::AffineConstraints::AffineConstraintsData<
+ number>::ScratchDataAccessor scratch_data;
const bool dof_mask_is_active = (dof_mask.n_rows() == n_local_dofs);
if (dof_mask_is_active == true)
// additional construct that also takes care of block indices.
std::vector<size_type> &block_starts = scratch_data->block_starts;
block_starts.resize(num_blocks + 1);
- internals::make_block_starts(sparsity_pattern,
- actual_dof_indices,
- block_starts);
+ internal::AffineConstraints::make_block_starts(sparsity_pattern,
+ actual_dof_indices,
+ block_starts);
for (size_type block = 0; block < num_blocks; ++block)
{
// difficult case with dof_mask, similar to the distribute_local_to_global
// function for block matrices
- internals::GlobalRowsFromLocal<number> &global_rows =
+ internal::AffineConstraints::GlobalRowsFromLocal<number> &global_rows =
scratch_data->global_rows;
global_rows.reinit(n_local_dofs);
make_sorted_row_list(local_dof_indices, global_rows);
// additional construct that also takes care of block indices.
std::vector<size_type> &block_starts = scratch_data->block_starts;
block_starts.resize(num_blocks + 1);
- internals::make_block_starts(sparsity_pattern, global_rows, block_starts);
+ internal::AffineConstraints::make_block_starts(sparsity_pattern,
+ global_rows,
+ block_starts);
std::vector<size_type> &cols = scratch_data->columns;
cols.resize(n_actual_dofs);
const size_type begin_block = block_starts[block_col],
end_block = block_starts[block_col + 1];
std::vector<size_type>::iterator col_ptr = cols.begin();
- internals::resolve_matrix_row(
+ internal::AffineConstraints::resolve_matrix_row(
global_rows, i, begin_block, end_block, dof_mask, col_ptr);
sparsity_pattern.block(block, block_col)
}
}
- internals::set_sparsity_diagonals(global_rows,
- local_dof_indices,
- dof_mask,
- keep_constrained_entries,
- sparsity_pattern);
+ internal::AffineConstraints::set_sparsity_diagonals(global_rows,
+ local_dof_indices,
+ dof_mask,
+ keep_constrained_entries,
+ sparsity_pattern);
}
DEAL_II_NAMESPACE_CLOSE