inline void
AlignedVector<T>::shrink_to_fit()
{
-# ifdef DEBUG
- Assert(replicated_across_communicator == false,
- ExcAlignedVectorChangeAfterReplication());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(replicated_across_communicator == false,
+ ExcAlignedVectorChangeAfterReplication());
+ }
const size_type used_size = used_elements_end - elements.get();
const size_type allocated_size = allocated_elements_end - elements.get();
if (allocated_size > used_size)
// **** Consistency check ****
// At this point, each process should have a copy of the data.
// Verify this in some sort of round-about way
-# ifdef DEBUG
- replicated_across_communicator = true;
- const std::vector<char> packed_data = Utilities::pack(*this);
- const int hash =
- std::accumulate(packed_data.begin(), packed_data.end(), int(0));
- Assert(Utilities::MPI::max(hash, communicator) == hash, ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ replicated_across_communicator = true;
+ const std::vector<char> packed_data = Utilities::pack(*this);
+ const int hash =
+ std::accumulate(packed_data.begin(), packed_data.end(), int(0));
+ Assert(Utilities::MPI::max(hash, communicator) == hash,
+ ExcInternalError());
+ }
# else
// No MPI -> nothing to replicate
inline ArrayView<ElementType, MemorySpaceType>::ArrayView(
value_type *starting_element,
const std::size_t n_elements)
- :
-#ifdef DEBUG
- starting_element(n_elements > 0 ? starting_element : nullptr)
-#else
- starting_element(starting_element)
-#endif
+ : // In debug mode, make sure that n_elements>0 and if it is not, set
+ // the pointer to a nullptr to trigger segfaults if anyone ever wanted
+ // to access elements of the array. In release mode, just take the
+ // pointer as given.
+ starting_element((library_build_mode == LibraryBuildMode::release) ||
+ (n_elements > 0) ?
+ starting_element :
+ nullptr)
, n_elements(n_elements)
{}
ArrayView<ElementType, MemorySpaceType>::reinit(value_type *starting_element,
const std::size_t n_elements)
{
-#ifdef DEBUG
- if (n_elements > 0)
- this->starting_element = starting_element;
+ if constexpr (running_in_debug_mode())
+ {
+ if (n_elements > 0)
+ this->starting_element = starting_element;
+ else
+ this->starting_element = nullptr;
+ }
else
- this->starting_element = nullptr;
-#else
- this->starting_element = starting_element;
-#endif
+ {
+ this->starting_element = starting_element;
+ }
this->n_elements = n_elements;
}
* The following code can be used to query the I/O method.
* @code
* auto dataset = group.create_dataset<double>("name", dimensions);
- * #ifdef DEBUG
- * dataset.set_query_io_mode(true);
- * #endif
+ * if constexpr (running_in_debug_mode())
+ * dataset.set_query_io_mode(true);
*
* if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
* {
v = r.nth_index_in_set + r.end - r.begin;
}
-#ifdef DEBUG
- size_type s = 0;
- for (const auto &range : ranges)
- s += (range.end - range.begin);
- Assert(s == v, ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ size_type s = 0;
+ for (const auto &range : ranges)
+ s += (range.end - range.begin);
+ Assert(s == v, ExcInternalError());
+ }
return v;
}
#ifdef DEAL_II_WITH_MPI
if (job_supports_mpi())
{
-# ifdef DEBUG
- {
- const unsigned int rank = this_mpi_process(mpi_communicator);
- unsigned int size = values.size();
- unsigned int size_min = 0;
- unsigned int size_max = 0;
- int ierr2 = 0;
- ierr2 = MPI_Reduce(&size,
- &size_min,
- 1,
- MPI_UNSIGNED,
- MPI_MIN,
- 0,
- mpi_communicator);
- AssertThrowMPI(ierr2);
- ierr2 = MPI_Reduce(&size,
- &size_max,
- 1,
- MPI_UNSIGNED,
- MPI_MAX,
- 0,
- mpi_communicator);
- AssertThrowMPI(ierr2);
- if (rank == 0)
- Assert(size_min == size_max,
- ExcMessage(
- "values has different size across MPI processes."));
- }
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ const unsigned int rank = this_mpi_process(mpi_communicator);
+ unsigned int size = values.size();
+ unsigned int size_min = 0;
+ unsigned int size_max = 0;
+ int ierr2 = 0;
+ ierr2 = MPI_Reduce(&size,
+ &size_min,
+ 1,
+ MPI_UNSIGNED,
+ MPI_MIN,
+ 0,
+ mpi_communicator);
+ AssertThrowMPI(ierr2);
+ ierr2 = MPI_Reduce(&size,
+ &size_max,
+ 1,
+ MPI_UNSIGNED,
+ MPI_MAX,
+ 0,
+ mpi_communicator);
+ AssertThrowMPI(ierr2);
+ if (rank == 0)
+ Assert(
+ size_min == size_max,
+ ExcMessage(
+ "values has different size across MPI processes."));
+ }
+ }
const int ierr =
MPI_Allreduce(values != output ? values.data() : MPI_IN_PLACE,
static_cast<void *>(output.data()),
AssertThrowMPI(ierr);
}
-# ifdef DEBUG
- // note: IBarrier seems to make problem during testing, this
- // additional Barrier seems to help
- ierr = MPI_Barrier(comm);
- AssertThrowMPI(ierr);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // note: IBarrier seems to make problem during testing, this
+ // additional Barrier seems to help
+ ierr = MPI_Barrier(comm);
+ AssertThrowMPI(ierr);
+ }
}
# endif
}
inline void
ObserverPointer<T, P>::swap(ObserverPointer<T, Q> &other)
{
-#ifdef DEBUG
- ObserverPointer<T, P> aux(pointer, id);
- *this = other;
- other = aux;
-#else
- std::swap(pointer, other.pointer);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ ObserverPointer<T, P> aux(pointer, id);
+ *this = other;
+ other = aux;
+ }
+ else
+ {
+ std::swap(pointer, other.pointer);
+ }
}
// function, in the sense that we simply populate our array of independent
// values with a meaningful number. However, in this case we need to
// double check that we're not registering these variables twice
-# ifdef DEBUG
- const std::vector<unsigned int> index_set(
- internal::extract_field_component_indices<dim>(extractor));
- for (const unsigned int index : index_set)
+ if constexpr (running_in_debug_mode())
{
- Assert(
- this->registered_independent_variable_values[index] == false,
- ExcMessage(
- "Overlapping indices for independent variables. "
- "One or more indices associated with the field that "
- "is being registered as an independent variable have "
- "already been associated with another field. This suggests "
- "that the component offsets used to construct their counterpart "
- "extractors are incompatible with one another. Make sure that "
- "the first component for each extractor properly takes into "
- "account the dimensionality of the preceding fields."));
+ const std::vector<unsigned int> index_set(
+ internal::extract_field_component_indices<dim>(extractor));
+ for (const unsigned int index : index_set)
+ {
+ Assert(
+ this->registered_independent_variable_values[index] == false,
+ ExcMessage(
+ "Overlapping indices for independent variables. "
+ "One or more indices associated with the field that "
+ "is being registered as an independent variable have "
+ "already been associated with another field. This suggests "
+ "that the component offsets used to construct their counterpart "
+ "extractors are incompatible with one another. Make sure that "
+ "the first component for each extractor properly takes into "
+ "account the dimensionality of the preceding fields."));
+ }
}
-# endif
set_independent_variable(value, extractor);
}
this->neighbor_index(i),
this->dof_handler);
-#ifdef DEBUG
- if (q.state() != IteratorState::past_the_end)
- Assert(q->used(), ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (q.state() != IteratorState::past_the_end)
+ Assert(q->used(), ExcInternalError());
+ }
return q;
}
TriaIterator<DoFCellAccessor<dimension_, space_dimension_, level_dof_access>>
q(this->tria, this->level() + 1, this->child_index(i), this->dof_handler);
-#ifdef DEBUG
- if (q.state() != IteratorState::past_the_end)
- Assert(q->used(), ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (q.state() != IteratorState::past_the_end)
+ Assert(q->used(), ExcInternalError());
+ }
return q;
}
, dof_renumbering(dof_renumbering)
, quadrature_renumbering(quadrature_renumbering)
{
-// Check that the renumbering vectors are valid.
-# ifdef DEBUG
- // While for dofs we admit invalid values, this is not the case for
- // quadrature points.
- for (const auto i : dof_renumbering)
- Assert(i < n_inner_dofs || i == numbers::invalid_unsigned_int,
- ExcIndexRange(i, 0, n_inner_dofs));
-
- for (const auto q : quadrature_renumbering)
- AssertIndexRange(q, n_inner_quadrature_points);
-# endif
+ // Check that the renumbering vectors are valid.
+ if constexpr (running_in_debug_mode())
+ {
+ // While for dofs we admit invalid values, this is not the case for
+ // quadrature points.
+ for (const auto i : dof_renumbering)
+ Assert(i < n_inner_dofs || i == numbers::invalid_unsigned_int,
+ ExcIndexRange(i, 0, n_inner_dofs));
+
+ for (const auto q : quadrature_renumbering)
+ AssertIndexRange(q, n_inner_quadrature_points);
+ }
}
if (name_end < name.size())
name.erase(name_end);
- // Ensure that the element we are looking for isn't in the map
- // yet. This only requires us to read the map, so it can happen
- // in a shared locked state
-#ifdef DEBUG
- {
- std::shared_lock<std::shared_mutex> lock(
- internal::FEToolsAddFENameHelper::fe_name_map_lock);
-
- Assert(
- internal::FEToolsAddFENameHelper::get_fe_name_map()[dim][spacedim].find(
- name) ==
- internal::FEToolsAddFENameHelper::get_fe_name_map()[dim][spacedim]
- .end(),
- ExcMessage(
- "Cannot change existing element in finite element name list"));
- }
-#endif
+ // Ensure that the element we are looking for isn't in the map
+ // yet. This only requires us to read the map, so it can happen
+ // in a shared locked state
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ std::shared_lock<std::shared_mutex> lock(
+ internal::FEToolsAddFENameHelper::fe_name_map_lock);
+
+ Assert(
+ internal::FEToolsAddFENameHelper::get_fe_name_map()[dim][spacedim]
+ .find(name) ==
+ internal::FEToolsAddFENameHelper::get_fe_name_map()[dim][spacedim]
+ .end(),
+ ExcMessage(
+ "Cannot change existing element in finite element name list"));
+ }
+ }
// Insert the normalized name into the map. This changes the map, so it
const IndexSet u2_elements = u2.locally_owned_elements();
-#ifdef DEBUG
- const IndexSet &dof1_local_dofs = dof1.locally_owned_dofs();
- const IndexSet &dof2_local_dofs = dof2.locally_owned_dofs();
- const IndexSet u1_elements = u1.locally_owned_elements();
- Assert(u1_elements == dof1_local_dofs,
- ExcMessage("The provided vector and DoF handler should have the same"
- " index sets."));
- Assert(u2_elements == dof2_local_dofs,
- ExcMessage("The provided vector and DoF handler should have the same"
- " index sets."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ const IndexSet &dof1_local_dofs = dof1.locally_owned_dofs();
+ const IndexSet &dof2_local_dofs = dof2.locally_owned_dofs();
+ const IndexSet u1_elements = u1.locally_owned_elements();
+ Assert(u1_elements == dof1_local_dofs,
+ ExcMessage(
+ "The provided vector and DoF handler should have the same"
+ " index sets."));
+ Assert(u2_elements == dof2_local_dofs,
+ ExcMessage(
+ "The provided vector and DoF handler should have the same"
+ " index sets."));
+ }
// allocate vectors at maximal
// size. will be reinited in inner
ExcDimensionMismatch(cell1->get_fe().n_components(),
cell2->get_fe().n_components()));
-#ifdef DEBUG
- // For continuous elements on grids with hanging nodes we need
- // hanging node constraints. Consequently, when the elements are
- // continuous no hanging node constraints are allowed.
- const bool hanging_nodes_not_allowed =
- ((cell2->get_fe().n_dofs_per_vertex() != 0) &&
- (constraints.n_constraints() == 0));
-
- if (hanging_nodes_not_allowed)
- for (const unsigned int face : cell1->face_indices())
- Assert(cell1->at_boundary(face) ||
- cell1->neighbor(face)->level() == cell1->level(),
- ExcHangingNodesNotAllowed());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // For continuous elements on grids with hanging nodes we need
+ // hanging node constraints. Consequently, when the elements are
+ // continuous no hanging node constraints are allowed.
+ const bool hanging_nodes_not_allowed =
+ ((cell2->get_fe().n_dofs_per_vertex() != 0) &&
+ (constraints.n_constraints() == 0));
+
+ if (hanging_nodes_not_allowed)
+ for (const unsigned int face : cell1->face_indices())
+ Assert(cell1->at_boundary(face) ||
+ cell1->neighbor(face)->level() == cell1->level(),
+ ExcHangingNodesNotAllowed());
+ }
const unsigned int dofs_per_cell1 = cell1->get_fe().n_dofs_per_cell();
const unsigned int dofs_per_cell2 = cell2->get_fe().n_dofs_per_cell();
Assert(u1_interpolated.size() == dof1.n_dofs(),
ExcDimensionMismatch(u1_interpolated.size(), dof1.n_dofs()));
-#ifdef DEBUG
- const IndexSet &dof1_local_dofs = dof1.locally_owned_dofs();
- const IndexSet u1_elements = u1.locally_owned_elements();
- const IndexSet u1_interpolated_elements =
- u1_interpolated.locally_owned_elements();
- Assert(u1_elements == dof1_local_dofs,
- ExcMessage("The provided vector and DoF handler should have the same"
- " index sets."));
- Assert(u1_interpolated_elements == dof1_local_dofs,
- ExcMessage("The provided vector and DoF handler should have the same"
- " index sets."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ const IndexSet &dof1_local_dofs = dof1.locally_owned_dofs();
+ const IndexSet u1_elements = u1.locally_owned_elements();
+ const IndexSet u1_interpolated_elements =
+ u1_interpolated.locally_owned_elements();
+ Assert(u1_elements == dof1_local_dofs,
+ ExcMessage(
+ "The provided vector and DoF handler should have the same"
+ " index sets."));
+ Assert(u1_interpolated_elements == dof1_local_dofs,
+ ExcMessage(
+ "The provided vector and DoF handler should have the same"
+ " index sets."));
+ }
Vector<typename OutVector::value_type> u1_local(
dof1.get_fe_collection().max_dofs_per_cell());
if ((cell->subdomain_id() == subdomain_id) ||
(subdomain_id == numbers::invalid_subdomain_id))
{
-#ifdef DEBUG
- // For continuous elements on grids with hanging nodes we need
- // hanging node constraints. Consequently, when the elements are
- // continuous no hanging node constraints are allowed.
- const bool hanging_nodes_not_allowed =
- (cell->get_fe().n_dofs_per_vertex() != 0) ||
- (fe2.n_dofs_per_vertex() != 0);
-
- if (hanging_nodes_not_allowed)
- for (const unsigned int face : cell->face_indices())
- Assert(cell->at_boundary(face) ||
- cell->neighbor(face)->level() == cell->level(),
- ExcHangingNodesNotAllowed());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // For continuous elements on grids with hanging nodes we need
+ // hanging node constraints. Consequently, when the elements are
+ // continuous no hanging node constraints are allowed.
+ const bool hanging_nodes_not_allowed =
+ (cell->get_fe().n_dofs_per_vertex() != 0) ||
+ (fe2.n_dofs_per_vertex() != 0);
+
+ if (hanging_nodes_not_allowed)
+ for (const unsigned int face : cell->face_indices())
+ Assert(cell->at_boundary(face) ||
+ cell->neighbor(face)->level() == cell->level(),
+ ExcHangingNodesNotAllowed());
+ }
const unsigned int dofs_per_cell1 = cell->get_fe().n_dofs_per_cell();
Assert(u1_difference.size() == dof1.n_dofs(),
ExcDimensionMismatch(u1_difference.size(), dof1.n_dofs()));
-#ifdef DEBUG
- const IndexSet &dof1_local_dofs = dof1.locally_owned_dofs();
- const IndexSet u1_elements = u1.locally_owned_elements();
- const IndexSet u1_difference_elements =
- u1_difference.locally_owned_elements();
- Assert(u1_elements == dof1_local_dofs,
- ExcMessage("The provided vector and DoF handler should have the same"
- " index sets."));
- Assert(u1_difference_elements == dof1_local_dofs,
- ExcMessage("The provided vector and DoF handler should have the same"
- " index sets."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ const IndexSet &dof1_local_dofs = dof1.locally_owned_dofs();
+ const IndexSet u1_elements = u1.locally_owned_elements();
+ const IndexSet u1_difference_elements =
+ u1_difference.locally_owned_elements();
+ Assert(u1_elements == dof1_local_dofs,
+ ExcMessage(
+ "The provided vector and DoF handler should have the same"
+ " index sets."));
+ Assert(u1_difference_elements == dof1_local_dofs,
+ ExcMessage(
+ "The provided vector and DoF handler should have the same"
+ " index sets."));
+ }
const unsigned int dofs_per_cell = dof1.get_fe().n_dofs_per_cell();
if ((cell->subdomain_id() == subdomain_id) ||
(subdomain_id == numbers::invalid_subdomain_id))
{
-#ifdef DEBUG
- // For continuous elements on grids with hanging nodes we need
- // hanging node constraints. Consequently, when the elements are
- // continuous no hanging node constraints are allowed.
- const bool hanging_nodes_not_allowed =
- (dof1.get_fe().n_dofs_per_vertex() != 0) ||
- (fe2.n_dofs_per_vertex() != 0);
-
- if (hanging_nodes_not_allowed)
- for (const unsigned int face : cell->face_indices())
- Assert(cell->at_boundary(face) ||
- cell->neighbor(face)->level() == cell->level(),
- ExcHangingNodesNotAllowed());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // For continuous elements on grids with hanging nodes we need
+ // hanging node constraints. Consequently, when the elements are
+ // continuous no hanging node constraints are allowed.
+ const bool hanging_nodes_not_allowed =
+ (dof1.get_fe().n_dofs_per_vertex() != 0) ||
+ (fe2.n_dofs_per_vertex() != 0);
+
+ if (hanging_nodes_not_allowed)
+ for (const unsigned int face : cell->face_indices())
+ Assert(cell->at_boundary(face) ||
+ cell->neighbor(face)->level() == cell->level(),
+ ExcHangingNodesNotAllowed());
+ }
cell->get_dof_values(u1, u1_local);
difference_matrix.vmult(u1_diff_local, u1_local);
// loop over cells and create CRS
for (const auto &cell : cells)
{
-#ifdef DEBUG
- auto vertices_unique = cell.vertices;
- std::sort(vertices_unique.begin(), vertices_unique.end());
- vertices_unique.erase(std::unique(vertices_unique.begin(),
- vertices_unique.end()),
- vertices_unique.end());
-
- Assert(vertices_unique.size() == cell.vertices.size(),
- ExcMessage(
- "The definition of a cell refers to the same vertex several "
- "times. This is not possible. A common reason is that "
- "CellData::vertices has a size that does not match the "
- "size expected from the reference cell. Please resize "
- "CellData::vertices or use the appropriate constructor of "
- "CellData."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ auto vertices_unique = cell.vertices;
+ std::sort(vertices_unique.begin(), vertices_unique.end());
+ vertices_unique.erase(std::unique(vertices_unique.begin(),
+ vertices_unique.end()),
+ vertices_unique.end());
+
+ Assert(
+ vertices_unique.size() == cell.vertices.size(),
+ ExcMessage(
+ "The definition of a cell refers to the same vertex several "
+ "times. This is not possible. A common reason is that "
+ "CellData::vertices has a size that does not match the "
+ "size expected from the reference cell. Please resize "
+ "CellData::vertices or use the appropriate constructor of "
+ "CellData."));
+ }
const ReferenceCell reference_cell =
ReferenceCell::n_vertices_to_type(dim, cell.vertices.size());
// outside the old ball.
}
}
-# ifdef DEBUG
- bool all_vertices_within_ball = true;
+ if constexpr (running_in_debug_mode())
+ {
+ bool all_vertices_within_ball = true;
- // Set all_vertices_within_ball false if any of the vertices of the object
- // are geometrically outside the ball
- for (const unsigned int v : this->vertex_indices())
- if (center.distance(this->vertex(v)) >
- radius + 100. * std::numeric_limits<double>::epsilon())
- {
- all_vertices_within_ball = false;
- break;
- }
- // If all the vertices are not within the ball throw error
- Assert(all_vertices_within_ball, ExcInternalError());
-# endif
+ // Set all_vertices_within_ball false if any of the vertices of the object
+ // are geometrically outside the ball
+ for (const unsigned int v : this->vertex_indices())
+ if (center.distance(this->vertex(v)) >
+ radius + 100. * std::numeric_limits<double>::epsilon())
+ {
+ all_vertices_within_ball = false;
+ break;
+ }
+ // If all the vertices are not within the ball throw error
+ Assert(all_vertices_within_ball, ExcInternalError());
+ }
return std::make_pair(center, radius);
}
const TriaRawIterator<OtherAccessor> &i)
: TriaRawIterator<Accessor>(i.accessor)
{
-#ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || used)"
- // used() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || used)"
+ // used() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
+ }
}
template <typename Accessor>
TriaIterator<Accessor>::TriaIterator(const OtherAccessor &a)
: TriaRawIterator<Accessor>(a)
{
-#ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || used)"
- // used() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || used)"
+ // used() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
+ }
}
template <typename Accessor>
const TriaRawIterator<OtherAccessor> &i)
: TriaIterator<Accessor>(i)
{
-#ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
}
inline TriaIterator<Accessor>::TriaIterator(const TriaRawIterator<Accessor> &i)
: TriaRawIterator<Accessor>(i.accessor)
{
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || used)"
- // used() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || used)"
+ // used() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
+ }
}
const typename Accessor::AccessorData *local_data)
: TriaRawIterator<Accessor>(parent, level, index, local_data)
{
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || used)"
- // used() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || used)"
+ // used() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
+ }
}
const typename Accessor::AccessorData *local_data)
: TriaRawIterator<Accessor>(tria_accessor, local_data)
{
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || used)"
- // used() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || used)"
+ // used() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
+ }
}
TriaIterator<Accessor>::operator=(const TriaRawIterator<Accessor> &i)
{
this->accessor.copy_from(i.accessor);
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || used)"
- // used() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || used)"
+ // used() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
+ }
return *this;
}
TriaIterator<Accessor>::operator=(const TriaRawIterator<OtherAccessor> &i)
{
this->accessor.copy_from(i.accessor);
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || used)"
- // used() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || used)"
+ // used() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used(), ExcAssignmentOfUnusedObject());
+ }
return *this;
}
const TriaRawIterator<Accessor> &i)
: TriaIterator<Accessor>(i)
{
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
}
const TriaIterator<Accessor> &i)
: TriaIterator<Accessor>(i)
{
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
}
const typename Accessor::AccessorData *local_data)
: TriaIterator<Accessor>(parent, level, index, local_data)
{
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
}
const typename Accessor::AccessorData *local_data)
: TriaIterator<Accessor>(tria_accessor, local_data)
{
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
}
TriaActiveIterator<Accessor>::operator=(const TriaRawIterator<Accessor> &i)
{
this->accessor.copy_from(i.accessor);
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used() && this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used() && this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
return *this;
}
TriaActiveIterator<Accessor>::operator=(const TriaRawIterator<OtherAccessor> &i)
{
this->accessor.copy_from(i.accessor);
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.used() && this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.used() && this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
return *this;
}
TriaActiveIterator<Accessor>::operator=(const TriaIterator<OtherAccessor> &i)
{
this->accessor.copy_from(i.accessor);
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
return *this;
}
TriaActiveIterator<Accessor>::operator=(const TriaIterator<Accessor> &i)
{
this->accessor.copy_from(i.accessor);
-# ifdef DEBUG
- // do this like this, because:
- // if we write
- // "Assert (IteratorState::past_the_end || !has_children())"
- // has_children() is called anyway, even if
- // state==IteratorState::past_the_end, and will then
- // throw the exception!
- if (this->state() != IteratorState::past_the_end)
- Assert(this->accessor.has_children() == false,
- ExcAssignmentOfInactiveObject());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // do this like this, because:
+ // if we write
+ // "Assert (IteratorState::past_the_end || !has_children())"
+ // has_children() is called anyway, even if
+ // state==IteratorState::past_the_end, and will then
+ // throw the exception!
+ if (this->state() != IteratorState::past_the_end)
+ Assert(this->accessor.has_children() == false,
+ ExcAssignmentOfInactiveObject());
+ }
return *this;
}
// of Q4 elements in 3d, and so should cover the vast majority of
// cases. If we have a constraint with more dependencies, then
// that's just going to require a heap allocation.
-#ifdef DEBUG
- {
- boost::container::small_vector<size_type, 25> column_indices;
- column_indices.reserve(dependencies.size());
- for (const auto &d : dependencies)
- column_indices.emplace_back(d.first);
- std::sort(column_indices.begin(), column_indices.end());
- Assert(std::adjacent_find(column_indices.begin(), column_indices.end()) ==
- column_indices.end(),
- ExcMessage(
- "You are trying to insert a constraint that lists the same "
- "degree of freedom more than once on the right hand side. This is "
- "not allowed."));
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ boost::container::small_vector<size_type, 25> column_indices;
+ column_indices.reserve(dependencies.size());
+ for (const auto &d : dependencies)
+ column_indices.emplace_back(d.first);
+ std::sort(column_indices.begin(), column_indices.end());
+ Assert(
+ std::adjacent_find(column_indices.begin(), column_indices.end()) ==
+ column_indices.end(),
+ ExcMessage(
+ "You are trying to insert a constraint that lists the same "
+ "degree of freedom more than once on the right hand side. This is "
+ "not allowed."));
+ }
+ }
// The following can happen when we compute with distributed meshes and dof
-#ifdef DEBUG
// In debug mode we are computing an estimate for the maximum number
// of constraints so that we can bail out if there is a cycle in the
// constraints (which is easier than searching for cycles in the graph).
// Let us figure out the largest dof index. This is an upper bound for the
// number of constraints because it is an approximation for the number of dofs
// in our system.
- size_type largest_idx = 0;
- for (const ConstraintLine &line : lines)
- for (const std::pair<size_type, number> &entry : line.entries)
- largest_idx = std::max(largest_idx, entry.first);
-#endif
+ [[maybe_unused]] size_type largest_idx = 0;
+ if constexpr (running_in_debug_mode())
+ {
+ for (const ConstraintLine &line : lines)
+ for (const std::pair<size_type, number> &entry : line.entries)
+ largest_idx = std::max(largest_idx, entry.first);
+ }
- // replace references to dofs that are themselves constrained. note that
- // because we may replace references to other dofs that may themselves be
- // constrained to third ones, we have to iterate over all this until we
- // replace no chains of constraints any more
- //
- // the iteration replaces references to constrained degrees of freedom by
- // second-order references. for example if x3=x0/2+x2/2 and x2=x0/2+x1/2,
- // then the new list will be x3=x0/2+x0/4+x1/4. note that x0 appear
- // twice. we will throw this duplicate out in the following step, where
- // we sort the list so that throwing out duplicates becomes much more
- // efficient. also, we have to do it only once, rather than in each
- // iteration
-#ifdef DEBUG
- size_type iteration = 0;
-#endif
- bool chained_constraint_replaced = false;
- std::vector<bool> line_finalized(lines.size(), false);
+ // replace references to dofs that are themselves constrained. note that
+ // because we may replace references to other dofs that may themselves be
+ // constrained to third ones, we have to iterate over all this until we
+ // replace no chains of constraints any more
+ //
+ // the iteration replaces references to constrained degrees of freedom by
+ // second-order references. for example if x3=x0/2+x2/2 and x2=x0/2+x1/2,
+ // then the new list will be x3=x0/2+x0/4+x1/4. note that x0 appear
+ // twice. we will throw this duplicate out in the following step, where
+ // we sort the list so that throwing out duplicates becomes much more
+ // efficient. also, we have to do it only once, rather than in each
+ // iteration
+ [[maybe_unused]] size_type iteration = 0;
+ bool chained_constraint_replaced = false;
+ std::vector<bool> line_finalized(lines.size(), false);
do
{
chained_constraint_replaced = false;
{
ConstraintLine &line = lines[line_index];
-#ifdef DEBUG
// we need to keep track of how many replacements we do in this
// line, because we can end up in a cycle A->B->C->A without the
// number of entries growing.
- size_type n_replacements = 0;
-#endif
+ [[maybe_unused]] size_type n_replacements = 0;
// loop over all entries of this line (including ones that we
// have appended in this go around) and see whether they are
constrained_line.entries[i].first,
constrained_line.entries[i].second * weight);
-#ifdef DEBUG
- // keep track of how many entries we replace in this
- // line. If we do more than there are constraints or
- // dofs in our system, we must have a cycle.
- ++n_replacements;
- Assert(n_replacements / 2 < largest_idx,
- ExcMessage("Cycle in constraints detected!"));
-#endif
+ if constexpr (library_build_mode ==
+ LibraryBuildMode::debug)
+ {
+ // keep track of how many entries we replace in this
+ // line. If we do more than there are constraints or
+ // dofs in our system, we must have a cycle.
+ ++n_replacements;
+ Assert(n_replacements / 2 < largest_idx,
+ ExcMessage(
+ "Cycle in constraints detected!"));
+ }
}
else
// the DoF that we encountered is not constrained by a
}
}
-#ifdef DEBUG
- // increase iteration count. note that we should not iterate more
- // times than there are constraints, since this puts a natural upper
- // bound on the length of constraint chains
- ++iteration;
- Assert(iteration <= lines.size() + 1, ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // increase iteration count. note that we should not iterate more
+ // times than there are constraints, since this puts a natural upper
+ // bound on the length of constraint chains
+ ++iteration;
+ Assert(iteration <= lines.size() + 1, ExcInternalError());
+ }
}
while (chained_constraint_replaced == true);
entry.first += offset;
}
-#ifdef DEBUG
- // make sure that lines, lines_cache and local_lines
- // are still linked correctly
- for (size_type index = 0; index < lines_cache.size(); ++index)
- Assert(lines_cache[index] == numbers::invalid_size_type ||
- calculate_line_index(lines[lines_cache[index]].index) == index,
- ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // make sure that lines, lines_cache and local_lines
+ // are still linked correctly
+ for (size_type index = 0; index < lines_cache.size(); ++index)
+ Assert(lines_cache[index] == numbers::invalid_size_type ||
+ calculate_line_index(lines[lines_cache[index]].index) == index,
+ ExcInternalError());
+ }
}
for (const ConstraintLine &line : lines)
if (mask.is_element(line.index))
{
-#ifdef DEBUG
- for (const std::pair<size_type, number> &entry : line.entries)
+ if constexpr (running_in_debug_mode())
{
- Assert(
- mask.is_element(entry.first),
- ExcMessage(
- "In creating a view of an AffineConstraints "
- "object, the constraint on degree of freedom " +
- std::to_string(line.index) + " (which corresponds to the " +
- std::to_string(mask.index_within_set(line.index)) +
- "th degree of freedom selected in the mask) "
- "is constrained against degree of freedom " +
- std::to_string(entry.first) +
- ", but this degree of freedom is not listed in the mask and "
- "consequently cannot be transcribed into the index space "
- "of the output object."));
+ for (const std::pair<size_type, number> &entry : line.entries)
+ {
+ Assert(
+ mask.is_element(entry.first),
+ ExcMessage(
+ "In creating a view of an AffineConstraints "
+ "object, the constraint on degree of freedom " +
+ std::to_string(line.index) + " (which corresponds to the " +
+ std::to_string(mask.index_within_set(line.index)) +
+ "th degree of freedom selected in the mask) "
+ "is constrained against degree of freedom " +
+ std::to_string(entry.first) +
+ ", but this degree of freedom is not listed in the mask and "
+ "consequently cannot be transcribed into the index space "
+ "of the output object."));
+ }
}
-#endif
std::vector<std::pair<size_type, number>> translated_entries =
line.entries;
}
- // Check that the set of indices we will import is a superset of
- // the locally-owned ones. This *should* be the case if, as one
- // would expect, the AffineConstraint object was initialized
- // with a locally-relevant index set that is indeed a superset
- // of the locally-owned indices. But you never know what people
- // pass as arguments...
-#ifdef DEBUG
- if (needed_elements_for_distribute != IndexSet())
+ // Check that the set of indices we will import is a superset of
+ // the locally-owned ones. This *should* be the case if, as one
+ // would expect, the AffineConstraint object was initialized
+ // with a locally-relevant index set that is indeed a superset
+ // of the locally-owned indices. But you never know what people
+ // pass as arguments...
+ if constexpr (running_in_debug_mode())
{
- Assert(vec_owned_elements.size() ==
- needed_elements_for_distribute.size(),
- ExcMessage("You have previously initialized this "
- "AffineConstraints object with an index set "
- "that stated that vectors have size " +
- std::to_string(locally_owned_dofs.size()) +
- " entries, but you are now calling "
- "AffineConstraints::distribute() with a vector "
- "of size " +
- std::to_string(vec_owned_elements.size()) +
- "."));
-
- for (const auto i : vec_owned_elements)
- Assert(needed_elements_for_distribute.is_element(i),
- ExcInternalError());
+ if (needed_elements_for_distribute != IndexSet())
+ {
+ Assert(vec_owned_elements.size() ==
+ needed_elements_for_distribute.size(),
+ ExcMessage(
+ "You have previously initialized this "
+ "AffineConstraints object with an index set "
+ "that stated that vectors have size " +
+ std::to_string(locally_owned_dofs.size()) +
+ " entries, but you are now calling "
+ "AffineConstraints::distribute() with a vector "
+ "of size " +
+ std::to_string(vec_owned_elements.size()) + "."));
+
+ for (const auto i : vec_owned_elements)
+ Assert(needed_elements_for_distribute.is_element(i),
+ ExcInternalError());
+ }
}
-#endif
VectorType ghosted_vector;
return_op.block = [&block_matrix](unsigned int i,
unsigned int j) -> BlockType {
-#ifdef DEBUG
- const unsigned int m = block_matrix.n_block_rows();
- const unsigned int n = block_matrix.n_block_cols();
- AssertIndexRange(i, m);
- AssertIndexRange(j, n);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ const unsigned int m = block_matrix.n_block_rows();
+ const unsigned int n = block_matrix.n_block_cols();
+ AssertIndexRange(i, m);
+ AssertIndexRange(j, n);
+ }
return BlockType(block_matrix.block(i, j));
};
return_op.block = [&block_matrix](unsigned int i,
unsigned int j) -> BlockType {
-#ifdef DEBUG
- const unsigned int m = block_matrix.n_block_rows();
- const unsigned int n = block_matrix.n_block_cols();
- Assert(m == n, ExcDimensionMismatch(m, n));
- AssertIndexRange(i, m);
- AssertIndexRange(j, n);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ const unsigned int m = block_matrix.n_block_rows();
+ const unsigned int n = block_matrix.n_block_cols();
+ Assert(m == n, ExcDimensionMismatch(m, n));
+ AssertIndexRange(i, m);
+ AssertIndexRange(j, n);
+ }
if (i == j)
return BlockType(block_matrix.block(i, j));
else
temporary_data.column_values[col_index.first][local_index] = value;
}
-# ifdef DEBUG
- // If in debug mode, do a check whether
- // the right length has been obtained.
- size_type length = 0;
- for (unsigned int i = 0; i < this->n_block_cols(); ++i)
- length += temporary_data.counter_within_block[i];
- Assert(length <= n_cols, ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // If in debug mode, do a check whether
+ // the right length has been obtained.
+ size_type length = 0;
+ for (unsigned int i = 0; i < this->n_block_cols(); ++i)
+ length += temporary_data.counter_within_block[i];
+ Assert(length <= n_cols, ExcInternalError());
+ }
// Now we found out about where the
// individual columns should start and
// efficiently.
if (col_indices_are_sorted == true)
{
-# ifdef DEBUG
- // check whether indices really are
- // sorted.
- size_type before = col_indices[0];
- for (size_type i = 1; i < n_cols; ++i)
- if (col_indices[i] <= before)
- {
- Assert(false,
- ExcMessage("Flag col_indices_are_sorted is set, but "
- "indices appear to not be sorted."));
- }
- else
- before = col_indices[i];
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // check whether indices really are
+ // sorted.
+ size_type before = col_indices[0];
+ for (size_type i = 1; i < n_cols; ++i)
+ if (col_indices[i] <= before)
+ {
+ Assert(false,
+ ExcMessage("Flag col_indices_are_sorted is set, but "
+ "indices appear to not be sorted."));
+ }
+ else
+ before = col_indices[i];
+ }
const std::pair<unsigned int, size_type> row_index =
this->row_block_indices.global_to_local(row);
temporary_data.column_values[col_index.first][local_index] = value;
}
-# ifdef DEBUG
- // If in debug mode, do a check whether
- // the right length has been obtained.
- size_type length = 0;
- for (unsigned int i = 0; i < this->n_block_cols(); ++i)
- length += temporary_data.counter_within_block[i];
- Assert(length <= n_cols, ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // If in debug mode, do a check whether
+ // the right length has been obtained.
+ size_type length = 0;
+ for (unsigned int i = 0; i < this->n_block_cols(); ++i)
+ length += temporary_data.counter_within_block[i];
+ Assert(length <= n_cols, ExcInternalError());
+ }
// Now we found out about where the
// individual columns should start and
block_column_indices[0].push_back(local_index);
// Check that calculation:
-#ifdef DEBUG
- {
- auto check_block_and_col = column_indices.global_to_local(*it);
- Assert(current_block == check_block_and_col.first,
- ExcInternalError());
- Assert(local_index == check_block_and_col.second,
- ExcInternalError());
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ auto check_block_and_col = column_indices.global_to_local(*it);
+ Assert(current_block == check_block_and_col.first,
+ ExcInternalError());
+ Assert(local_index == check_block_and_col.second,
+ ExcInternalError());
+ }
+ }
}
// add whatever is left over:
sub_objects[row_index.first][current_block]->add_entries(
{
#ifdef DEAL_II_WITH_MPI
-# ifdef DEBUG
- Assert(Utilities::MPI::job_supports_mpi() ||
- (update_ghost_values_requests.empty() &&
- compress_requests.empty()),
- ExcInternalError());
-
- // make sure that there are not outstanding requests from updating
- // ghost values or compress
- if (update_ghost_values_requests.size() > 0)
- {
- int flag = 1;
- const int ierr = MPI_Testall(update_ghost_values_requests.size(),
- update_ghost_values_requests.data(),
- &flag,
- MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- Assert(flag == 1,
- ExcMessage(
- "MPI found unfinished update_ghost_values() requests "
- "when calling swap, which is not allowed."));
- }
- if (compress_requests.size() > 0)
+ if constexpr (running_in_debug_mode())
{
- int flag = 1;
- const int ierr = MPI_Testall(compress_requests.size(),
- compress_requests.data(),
- &flag,
- MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- Assert(flag == 1,
- ExcMessage("MPI found unfinished compress() requests "
- "when calling swap, which is not allowed."));
+ Assert(Utilities::MPI::job_supports_mpi() ||
+ (update_ghost_values_requests.empty() &&
+ compress_requests.empty()),
+ ExcInternalError());
+
+ // make sure that there are not outstanding requests from updating
+ // ghost values or compress
+ if (update_ghost_values_requests.size() > 0)
+ {
+ int flag = 1;
+ const int ierr = MPI_Testall(update_ghost_values_requests.size(),
+ update_ghost_values_requests.data(),
+ &flag,
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ Assert(flag == 1,
+ ExcMessage(
+ "MPI found unfinished update_ghost_values() requests "
+ "when calling swap, which is not allowed."));
+ }
+ if (compress_requests.size() > 0)
+ {
+ int flag = 1;
+ const int ierr = MPI_Testall(compress_requests.size(),
+ compress_requests.data(),
+ &flag,
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ Assert(flag == 1,
+ ExcMessage("MPI found unfinished compress() requests "
+ "when calling swap, which is not allowed."));
+ }
}
-# endif
std::swap(compress_requests, v.compress_requests);
std::swap(update_ghost_values_requests, v.update_ghost_values_requests);
Vector<Number,
MemorySpaceType>::assert_no_residual_content_in_ghost_region() const
{
-#ifdef DEBUG
- // This should only be called for non-ghosted vectors
- Assert(!vector_is_ghosted, ExcInternalError());
+ if constexpr (running_in_debug_mode())
+ {
+ // This should only be called for non-ghosted vectors
+ Assert(!vector_is_ghosted, ExcInternalError());
- // Run a reduction over the ghost range only to find out whether some
- // entries are non-zero
- real_type sum = real_type();
- dealii::internal::VectorOperations::
- functions<Number, Number, MemorySpaceType>::norm_1(
- thread_loop_partitioner,
- partitioner->n_ghost_indices(),
- sum,
- data,
- partitioner->locally_owned_size());
-
- Assert(sum == real_type(),
- ExcMessage("You called a vector space operation like add(), "
- "scale(), operator* for a non-ghosted vector, which "
- "will not update the content in the memory locations "
- "reserved for ghost values. However, a non-zero "
- "content was detected for some of those entries, which "
- "can lead to an invalid state of the vector. Please "
- "call Vector::compress(VectorOperation::add) or "
- "Vector::zero_out_ghost_values() before calling a "
- "vector space operation to avoid this problem."));
-#endif
+ // Run a reduction over the ghost range only to find out whether some
+ // entries are non-zero
+ real_type sum = real_type();
+ dealii::internal::VectorOperations::
+ functions<Number, Number, MemorySpaceType>::norm_1(
+ thread_loop_partitioner,
+ partitioner->n_ghost_indices(),
+ sum,
+ data,
+ partitioner->locally_owned_size());
+
+ Assert(sum == real_type(),
+ ExcMessage(
+ "You called a vector space operation like add(), "
+ "scale(), operator* for a non-ghosted vector, which "
+ "will not update the content in the memory locations "
+ "reserved for ghost values. However, a non-zero "
+ "content was detected for some of those entries, which "
+ "can lead to an invalid state of the vector. Please "
+ "call Vector::compress(VectorOperation::add) or "
+ "Vector::zero_out_ghost_values() before calling a "
+ "vector space operation to avoid this problem."));
+ }
}
// leave it at this. While it may
// not be the most efficient way,
// it is at least thread safe.
- // #ifdef DEBUG
+ // if constexpr (running_in_debug_mode()){
Assert(bi.first == row, ExcBlockIndexMismatch(bi.first, row));
for (size_type j = 0; j < n_cols; ++j)
matrix.add(bi.second, bj.second, values[j]);
}
- // #endif
+ // }
}
BlockVector::has_ghost_elements() const
{
bool ghosted = block(0).has_ghost_elements();
-# ifdef DEBUG
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ Assert(block(i).has_ghost_elements() == ghosted,
+ ExcInternalError());
+ }
return ghosted;
}
inline void
close_matrix(Mat &matrix)
{
-# ifdef DEBUG
- set_matrix_option(matrix, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
-# else
- set_matrix_option(matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ set_matrix_option(matrix, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_TRUE);
+ }
+ else
+ {
+ set_matrix_option(matrix, MAT_NEW_NONZERO_LOCATIONS, PETSC_FALSE);
+ }
}
}
// Apply inverse diagonal
this->inverse_vmult(block, x_cell, b_cell);
-#ifdef DEBUG
- for (unsigned int i = 0; i < x_cell.size(); ++i)
+ if constexpr (running_in_debug_mode())
{
- AssertIsFinite(x_cell(i));
+ for (unsigned int i = 0; i < x_cell.size(); ++i)
+ {
+ AssertIsFinite(x_cell(i));
+ }
}
-#endif
// Store in result vector
row = additional_data->block_list.begin(block);
for (size_type row_cell = 0; row_cell < bs; ++row_cell, ++row)
{
// check whether the given indices are
// really sorted
-#ifdef DEBUG
- for (size_type i = 1; i < n_cols; ++i)
- Assert(col_indices[i] > col_indices[i - 1],
- ExcMessage(
- "List of indices is unsorted or contains duplicates."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (size_type i = 1; i < n_cols; ++i)
+ Assert(col_indices[i] > col_indices[i - 1],
+ ExcMessage(
+ "List of indices is unsorted or contains duplicates."));
+ }
const size_type *this_cols = &cols->colnums[cols->rowstart[row]];
const size_type row_length_1 = cols->row_length(row) - 1;
const number value = number(values[j]);
AssertIsFinite(value);
-#ifdef DEBUG
- if (elide_zero_values == true && value == number())
- continue;
-#else
- if (value == number())
- continue;
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (elide_zero_values == true && value == number())
+ continue;
+ }
+ else
+ {
+ if (value == number())
+ continue;
+ }
// check whether the next index to add is
// the next present index in the sparsity
void
AssertNoZerosOnDiagonal(const SparseMatrix<number> &matrix)
{
-#ifdef DEBUG
- for (typename SparseMatrix<number>::size_type row = 0; row < matrix.m();
- ++row)
- Assert(matrix.diag_element(row) != number(),
- ExcMessage(
- "There is a zero on the diagonal of this matrix "
- "in row " +
- std::to_string(row) +
- ". The preconditioner you selected cannot work if that "
- "is the case because one of its steps requires "
- "division by the diagonal elements of the matrix."
- "\n\n"
- "You should check whether you have correctly "
- "assembled the matrix that you use for this "
- "preconditioner. If it is correct that there are "
- "zeros on the diagonal, then you will have to chose "
- "a different preconditioner."));
-#else
- (void)matrix;
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (typename SparseMatrix<number>::size_type row = 0;
+ row < matrix.m();
+ ++row)
+ Assert(matrix.diag_element(row) != number(),
+ ExcMessage(
+ "There is a zero on the diagonal of this matrix "
+ "in row " +
+ std::to_string(row) +
+ ". The preconditioner you selected cannot work if that "
+ "is the case because one of its steps requires "
+ "division by the diagonal elements of the matrix."
+ "\n\n"
+ "You should check whether you have correctly "
+ "assembled the matrix that you use for this "
+ "preconditioner. If it is correct that there are "
+ "zeros on the diagonal, then you will have to chose "
+ "a different preconditioner."));
+ }
+ else
+ {
+ (void)matrix;
+ }
}
} // namespace SparseMatrixImplementation
} // namespace internal
BlockVector::has_ghost_elements() const
{
bool ghosted = block(0).has_ghost_elements();
-# ifdef DEBUG
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ Assert(block(i).has_ghost_elements() == ghosted,
+ ExcInternalError());
+ }
return ghosted;
}
const MPI_Comm communicator,
const bool exchange_data)
{
-# ifdef DEBUG
- std::vector<typename TpetraTypes::MapType<MemorySpace>> tpetra_maps;
- for (size_type i = 0; i < block_sparsity_pattern.n_block_rows(); ++i)
- tpetra_maps.push_back(
- parallel_partitioning[i]
- .template make_tpetra_map<
- typename TpetraTypes::NodeType<MemorySpace>>(communicator,
- false));
-
- Assert(tpetra_maps.size() == block_sparsity_pattern.n_block_rows(),
- ExcDimensionMismatch(tpetra_maps.size(),
- block_sparsity_pattern.n_block_rows()));
- Assert(tpetra_maps.size() == block_sparsity_pattern.n_block_cols(),
- ExcDimensionMismatch(tpetra_maps.size(),
- block_sparsity_pattern.n_block_cols()));
-
- const size_type n_block_rows = tpetra_maps.size();
- (void)n_block_rows;
-
- Assert(n_block_rows == block_sparsity_pattern.n_block_rows(),
- ExcDimensionMismatch(n_block_rows,
- block_sparsity_pattern.n_block_rows()));
- Assert(n_block_rows == block_sparsity_pattern.n_block_cols(),
- ExcDimensionMismatch(n_block_rows,
- block_sparsity_pattern.n_block_cols()));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ std::vector<typename TpetraTypes::MapType<MemorySpace>> tpetra_maps;
+ for (size_type i = 0; i < block_sparsity_pattern.n_block_rows(); ++i)
+ tpetra_maps.push_back(
+ parallel_partitioning[i]
+ .template make_tpetra_map<
+ typename TpetraTypes::NodeType<MemorySpace>>(communicator,
+ false));
+
+ Assert(tpetra_maps.size() == block_sparsity_pattern.n_block_rows(),
+ ExcDimensionMismatch(tpetra_maps.size(),
+ block_sparsity_pattern.n_block_rows()));
+ Assert(tpetra_maps.size() == block_sparsity_pattern.n_block_cols(),
+ ExcDimensionMismatch(tpetra_maps.size(),
+ block_sparsity_pattern.n_block_cols()));
+
+ const size_type n_block_rows = tpetra_maps.size();
+ (void)n_block_rows;
+
+ Assert(n_block_rows == block_sparsity_pattern.n_block_rows(),
+ ExcDimensionMismatch(n_block_rows,
+ block_sparsity_pattern.n_block_rows()));
+ Assert(n_block_rows == block_sparsity_pattern.n_block_cols(),
+ ExcDimensionMismatch(n_block_rows,
+ block_sparsity_pattern.n_block_cols()));
+ }
// Call the other basic reinit function, ...
BlockVector<Number, MemorySpace>::has_ghost_elements() const
{
bool ghosted = this->block(0).has_ghost_elements();
-# ifdef DEBUG
- for (unsigned int i = 0; i < this->n_blocks(); ++i)
- Assert(this->block(i).has_ghost_elements() == ghosted,
- ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (unsigned int i = 0; i < this->n_blocks(); ++i)
+ Assert(this->block(i).has_ghost_elements() == ghosted,
+ ExcInternalError());
+ }
return ghosted;
}
{
Assert(m() == n(), ExcNotQuadratic());
-# ifdef DEBUG
- // use operator() in debug mode because it checks if this is a valid
- // element (in parallel)
- return operator()(i, i);
-# else
- // Trilinos doesn't seem to have a more efficient way to access the
- // diagonal than by just using the standard el(i,j) function.
- return el(i, i);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // use operator() in debug mode because it checks if this is a valid
+ // element (in parallel)
+ return operator()(i, i);
+ }
+ else
+ {
+ // Trilinos doesn't seem to have a more efficient way to access the
+ // diagonal than by just using the standard el(i,j) function.
+ return el(i, i);
+ }
}
} // namespace TpetraWrappers
has_ghost = (vector->getMap()->isOneToOne() == false);
-# ifdef DEBUG
- MPI_Comm comm = Utilities::Trilinos::teuchos_comm_to_mpi_comm(
- vector->getMap()->getComm());
- const size_type n_elements_global =
- Utilities::MPI::sum(vector->getLocalLength(), comm);
- Assert(has_ghost || n_elements_global == size(), ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ MPI_Comm comm = Utilities::Trilinos::teuchos_comm_to_mpi_comm(
+ vector->getMap()->getComm());
+ const size_type n_elements_global =
+ Utilities::MPI::sum(vector->getLocalLength(), comm);
+ Assert(has_ghost || n_elements_global == size(), ExcInternalError());
+ }
}
const size_type begin = vector->getMap()->getMinGlobalIndex();
const size_type end = vector->getMap()->getMaxGlobalIndex() + 1;
-# ifdef DEBUG
- const size_type n_local_elements =
-# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
- vector->getMap()->getLocalNumElements();
-# else
- vector->getMap()->getNodeNumElements();
-# endif
- Assert(
- end - begin == n_local_elements,
- ExcMessage(
- "This function only makes sense if the elements that this "
- "vector stores on the current processor form a contiguous range. "
- "This does not appear to be the case for the current vector."));
+ if constexpr (running_in_debug_mode())
+ {
+ const size_type n_local_elements =
+# if DEAL_II_TRILINOS_VERSION_GTE(14, 0, 0)
+ vector->getMap()->getLocalNumElements();
+# else
+ vector->getMap()->getNodeNumElements();
# endif
+ Assert(
+ end - begin == n_local_elements,
+ ExcMessage(
+ "This function only makes sense if the elements that this "
+ "vector stores on the current processor form a contiguous range. "
+ "This does not appear to be the case for the current vector."));
+ }
return std::make_pair(begin, end);
}
{
use_active_cells = mg_level == numbers::invalid_unsigned_int;
-# ifdef DEBUG
- // safety check
- if (use_active_cells)
- for (const auto &cell_level : cell_levels)
- {
- typename dealii::Triangulation<dim>::cell_iterator dcell(
- &triangulation, cell_level.first, cell_level.second);
- Assert(dcell->is_active(), ExcInternalError());
- }
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // safety check
+ if (use_active_cells)
+ for (const auto &cell_level : cell_levels)
+ {
+ typename dealii::Triangulation<dim>::cell_iterator dcell(
+ &triangulation, cell_level.first, cell_level.second);
+ Assert(dcell->is_active(), ExcInternalError());
+ }
+ }
// step 1: add ghost cells for those cells that we identify as
// interesting
inner_face.second.shared_faces[i]);
}
-# ifdef DEBUG
- // check consistency of faces on both sides
- std::vector<std::pair<CellId, CellId>> check_faces;
- check_faces.insert(check_faces.end(),
- owned_faces_lower.begin(),
- owned_faces_lower.end());
- check_faces.insert(check_faces.end(),
- owned_faces_higher.begin(),
- owned_faces_higher.end());
- std::sort(check_faces.begin(), check_faces.end());
- AssertDimension(check_faces.size(),
- inner_face.second.shared_faces.size());
- for (unsigned int i = 0; i < check_faces.size(); ++i)
- Assert(check_faces[i] == inner_face.second.shared_faces[i],
- ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // check consistency of faces on both sides
+ std::vector<std::pair<CellId, CellId>> check_faces;
+ check_faces.insert(check_faces.end(),
+ owned_faces_lower.begin(),
+ owned_faces_lower.end());
+ check_faces.insert(check_faces.end(),
+ owned_faces_higher.begin(),
+ owned_faces_higher.end());
+ std::sort(check_faces.begin(), check_faces.end());
+ AssertDimension(check_faces.size(),
+ inner_face.second.shared_faces.size());
+ for (unsigned int i = 0; i < check_faces.size(); ++i)
+ Assert(check_faces[i] == inner_face.second.shared_faces[i],
+ ExcInternalError());
+ }
// now only set half of the faces as the ones to keep
if (my_domain < inner_face.first)
faces_type = std::move(new_faces_type);
}
-# ifdef DEBUG
- // final safety checks
- for (const auto &face_type : faces_type)
- AssertDimension(face_type.size(), 0U);
-
- AssertDimension(faces_out.size(), face_partition_data.back());
- unsigned int nfaces = 0;
- for (unsigned int i = face_partition_data[0];
- i < face_partition_data.back();
- ++i)
- for (unsigned int v = 0; v < vectorization_width; ++v)
- nfaces +=
- (faces_out[i].cells_interior[v] != numbers::invalid_unsigned_int);
- AssertDimension(nfaces, faces_in.size());
-
- std::vector<std::pair<unsigned int, unsigned int>> in_faces, out_faces;
- for (const auto &face_in : faces_in)
- in_faces.emplace_back(face_in.cells_interior[0],
- face_in.cells_exterior[0]);
- for (unsigned int i = face_partition_data[0];
- i < face_partition_data.back();
- ++i)
- for (unsigned int v = 0;
- v < vectorization_width &&
- faces_out[i].cells_interior[v] != numbers::invalid_unsigned_int;
- ++v)
- out_faces.emplace_back(faces_out[i].cells_interior[v],
- faces_out[i].cells_exterior[v]);
- std::sort(in_faces.begin(), in_faces.end());
- std::sort(out_faces.begin(), out_faces.end());
- AssertDimension(in_faces.size(), out_faces.size());
- for (unsigned int i = 0; i < in_faces.size(); ++i)
+ if constexpr (running_in_debug_mode())
{
- AssertDimension(in_faces[i].first, out_faces[i].first);
- AssertDimension(in_faces[i].second, out_faces[i].second);
+ // final safety checks
+ for (const auto &face_type : faces_type)
+ AssertDimension(face_type.size(), 0U);
+
+ AssertDimension(faces_out.size(), face_partition_data.back());
+ unsigned int nfaces = 0;
+ for (unsigned int i = face_partition_data[0];
+ i < face_partition_data.back();
+ ++i)
+ for (unsigned int v = 0; v < vectorization_width; ++v)
+ nfaces += (faces_out[i].cells_interior[v] !=
+ numbers::invalid_unsigned_int);
+ AssertDimension(nfaces, faces_in.size());
+
+ std::vector<std::pair<unsigned int, unsigned int>> in_faces,
+ out_faces;
+ for (const auto &face_in : faces_in)
+ in_faces.emplace_back(face_in.cells_interior[0],
+ face_in.cells_exterior[0]);
+ for (unsigned int i = face_partition_data[0];
+ i < face_partition_data.back();
+ ++i)
+ for (unsigned int v = 0;
+ v < vectorization_width && faces_out[i].cells_interior[v] !=
+ numbers::invalid_unsigned_int;
+ ++v)
+ out_faces.emplace_back(faces_out[i].cells_interior[v],
+ faces_out[i].cells_exterior[v]);
+ std::sort(in_faces.begin(), in_faces.end());
+ std::sort(out_faces.begin(), out_faces.end());
+ AssertDimension(in_faces.size(), out_faces.size());
+ for (unsigned int i = 0; i < in_faces.size(); ++i)
+ {
+ AssertDimension(in_faces[i].first, out_faces[i].first);
+ AssertDimension(in_faces[i].second, out_faces[i].second);
+ }
}
-# endif
}
#endif // ifndef DOXYGEN
apply_hanging_node_constraints(false);
-# ifdef DEBUG
- this->dof_values_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->dof_values_initialized = true;
+ }
}
internal::VectorReader<Number, VectorizedArrayType> reader;
read_write_operation(reader, src_data.first, src_data.second, mask, false);
-# ifdef DEBUG
- this->dof_values_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->dof_values_initialized = true;
+ }
}
const unsigned int first_index,
const std::bitset<n_lanes> &mask) const
{
-# ifdef DEBUG
- Assert(this->dof_values_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->dof_values_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
apply_hanging_node_constraints(true);
const unsigned int first_index,
const std::bitset<n_lanes> &mask) const
{
-# ifdef DEBUG
- Assert(this->dof_values_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->dof_values_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
const auto dst_data = internal::get_vector_data<n_components_>(
dst,
const unsigned int first_index,
const std::bitset<n_lanes> &mask) const
{
-# ifdef DEBUG
- Assert(this->dof_values_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->dof_values_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
const auto dst_data = internal::get_vector_data<n_components_>(
dst,
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
get_value(const unsigned int q_point) const
{
-# ifdef DEBUG
- Assert(this->values_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->values_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
if constexpr (n_components == 1)
internal::MatrixFreeFunctions::ElementType::tensor_raviart_thomas)
{
// Piola transform is required
-# ifdef DEBUG
- Assert(this->values_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->values_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->J_value != nullptr,
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
get_gradient(const unsigned int q_point) const
{
-# ifdef DEBUG
- Assert(this->gradients_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->gradients_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->jacobian != nullptr,
internal::MatrixFreeFunctions::ElementType::tensor_raviart_thomas)
{
// Piola transform is required
-# ifdef DEBUG
- Assert(this->gradients_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->gradients_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->jacobian != nullptr,
get_normal_derivative(const unsigned int q_point) const
{
AssertIndexRange(q_point, this->n_quadrature_points);
-# ifdef DEBUG
- Assert(this->gradients_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->gradients_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
Assert(this->normal_x_jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
get_hessian(const unsigned int q_point) const
{
-# ifdef DEBUG
- Assert(this->hessians_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->hessians_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->jacobian != nullptr,
get_hessian_diagonal(const unsigned int q_point) const
{
Assert(!is_face, ExcNotImplemented());
-# ifdef DEBUG
- Assert(this->hessians_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->hessians_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->jacobian != nullptr, ExcNotImplemented());
get_laplacian(const unsigned int q_point) const
{
Assert(is_face == false, ExcNotImplemented());
-# ifdef DEBUG
- Assert(this->hessians_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->hessians_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
const gradient_type hess_diag = get_hessian_diagonal(q_point);
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
get_normal_hessian(const unsigned int q_point) const
{
-# ifdef DEBUG
- Assert(this->hessians_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->hessians_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->normal_x_jacobian != nullptr,
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
submit_dof_value(const value_type val_in, const unsigned int dof)
{
-# ifdef DEBUG
- this->dof_values_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->dof_values_initialized = true;
+ }
const std::size_t dofs = this->data->dofs_per_component_on_cell;
AssertIndexRange(dof, this->data->dofs_per_component_on_cell);
for (unsigned int comp = 0; comp < n_components; ++comp)
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
submit_value(const value_type val_in, const unsigned int q_point)
{
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->J_value != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_values"));
-# ifdef DEBUG
- this->values_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->values_quad_submitted = true;
+ }
const std::size_t nqp = this->n_quadrature_points;
VectorizedArrayType *values = this->values_quad + q_point;
Assert(this->J_value != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_value"));
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
- this->values_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ this->values_quad_submitted = true;
+ }
VectorizedArrayType *values = this->values_quad + q_point;
const std::size_t nqp = this->n_quadrature_points;
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
submit_gradient(const gradient_type grad_in, const unsigned int q_point)
{
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->J_value != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
Assert(this->jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_gradients"));
-# ifdef DEBUG
- this->gradients_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->gradients_quad_submitted = true;
+ }
if constexpr (dim > 1 && n_components == dim)
{
{
// Piola transform is required
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->J_value != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
Assert(this->jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_gradients"));
-# ifdef DEBUG
- this->gradients_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->gradients_quad_submitted = true;
+ }
VectorizedArrayType *gradients = this->gradients_quad + q_point * dim;
VectorizedArrayType *values =
Assert(this->normal_x_jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_gradients"));
-# ifdef DEBUG
- this->gradients_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->gradients_quad_submitted = true;
+ }
const std::size_t nqp_d = this->n_quadrature_points * dim;
VectorizedArrayType *gradients = this->gradients_quad + q_point * dim;
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
submit_hessian(const hessian_type hessian_in, const unsigned int q_point)
{
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->J_value != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
Assert(this->jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_hessians"));
-# ifdef DEBUG
- this->hessians_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->hessians_quad_submitted = true;
+ }
// compute hessian_unit = J^T * hessian_in(u) * J
const std::size_t nqp = this->n_quadrature_points;
submit_normal_hessian(const value_type normal_hessian_in,
const unsigned int q_point)
{
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->J_value != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
Assert(this->jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_hessians"));
-# ifdef DEBUG
- this->hessians_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->hessians_quad_submitted = true;
+ }
// compute hessian_unit = J^T * hessian_in(u) * J
const std::size_t nqp = this->n_quadrature_points;
FEEvaluationBase<dim, n_components_, Number, is_face, VectorizedArrayType>::
integrate_value() const
{
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
- Assert(this->values_quad_submitted == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ Assert(this->values_quad_submitted == true,
+ internal::ExcAccessToUninitializedField());
+ }
Tensor<1, n_components, VectorizedArrayType> return_value;
const std::size_t nqp = this->n_quadrature_points;
"Do not try to modify the default template parameters used for"
" selectively enabling this function via std::enable_if!");
-# ifdef DEBUG
- Assert(this->gradients_quad_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->gradients_quad_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"Do not try to modify the default template parameters used for"
" selectively enabling this function via std::enable_if!");
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->J_value != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
Assert(this->jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_gradients"));
-# ifdef DEBUG
- this->gradients_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->gradients_quad_submitted = true;
+ }
const std::size_t nqp_d = this->n_quadrature_points * dim;
VectorizedArrayType *gradients = this->gradients_quad + q_point * dim;
// could have used base class operator, but that involves some overhead
// which is inefficient. it is nice to have the symmetric tensor because
// that saves some operations
-# ifdef DEBUG
- Assert(this->is_reinitialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->is_reinitialized, ExcNotInitialized());
+ }
AssertIndexRange(q_point, this->n_quadrature_points);
Assert(this->J_value != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
Assert(this->jacobian != nullptr,
internal::ExcMatrixFreeAccessToUninitializedMappingField(
"update_gradients"));
-# ifdef DEBUG
- this->gradients_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->gradients_quad_submitted = true;
+ }
const std::size_t nqp_d = this->n_quadrature_points * dim;
VectorizedArrayType *gradients = this->gradients_quad + dim * q_point;
"is the index of the active FE, which you can use to exclude "
"FE_Nothing."));
-# ifdef DEBUG
- // print error message when the dimensions do not match. Propose a possible
- // fix
- if ((static_cast<unsigned int>(fe_degree) != numbers::invalid_unsigned_int &&
- static_cast<unsigned int>(fe_degree) !=
- this->data->data.front().fe_degree) ||
- n_q_points != this->n_quadrature_points)
- {
- std::string message =
- "-------------------------------------------------------\n";
- message += "Illegal arguments in constructor/wrong template arguments!\n";
- message += " Called --> FEEvaluation<dim,";
- message += Utilities::int_to_string(fe_degree) + ",";
- message += Utilities::int_to_string(n_q_points_1d);
- message += "," + Utilities::int_to_string(n_components);
- message += ",Number>(data";
- if (first_selected_component != numbers::invalid_unsigned_int)
+ if constexpr (running_in_debug_mode())
+ {
+ // print error message when the dimensions do not match. Propose a
+ // possible fix
+ if ((static_cast<unsigned int>(fe_degree) !=
+ numbers::invalid_unsigned_int &&
+ static_cast<unsigned int>(fe_degree) !=
+ this->data->data.front().fe_degree) ||
+ n_q_points != this->n_quadrature_points)
{
- message += ", " + Utilities::int_to_string(dof_no) + ", ";
- message += Utilities::int_to_string(this->quad_no) + ", ";
- message += Utilities::int_to_string(first_selected_component);
- }
- message += ")\n";
+ std::string message =
+ "-------------------------------------------------------\n";
+ message +=
+ "Illegal arguments in constructor/wrong template arguments!\n";
+ message += " Called --> FEEvaluation<dim,";
+ message += Utilities::int_to_string(fe_degree) + ",";
+ message += Utilities::int_to_string(n_q_points_1d);
+ message += "," + Utilities::int_to_string(n_components);
+ message += ",Number>(data";
+ if (first_selected_component != numbers::invalid_unsigned_int)
+ {
+ message += ", " + Utilities::int_to_string(dof_no) + ", ";
+ message += Utilities::int_to_string(this->quad_no) + ", ";
+ message += Utilities::int_to_string(first_selected_component);
+ }
+ message += ")\n";
- // check whether some other vector component has the correct number of
- // points
- unsigned int proposed_dof_comp = numbers::invalid_unsigned_int,
- proposed_fe_comp = numbers::invalid_unsigned_int,
- proposed_quad_comp = numbers::invalid_unsigned_int;
- if (dof_no != numbers::invalid_unsigned_int)
- {
- if (static_cast<unsigned int>(fe_degree) ==
- this->data->data.front().fe_degree)
+ // check whether some other vector component has the correct number of
+ // points
+ unsigned int proposed_dof_comp = numbers::invalid_unsigned_int,
+ proposed_fe_comp = numbers::invalid_unsigned_int,
+ proposed_quad_comp = numbers::invalid_unsigned_int;
+ if (dof_no != numbers::invalid_unsigned_int)
{
- proposed_dof_comp = dof_no;
- proposed_fe_comp = first_selected_component;
+ if (static_cast<unsigned int>(fe_degree) ==
+ this->data->data.front().fe_degree)
+ {
+ proposed_dof_comp = dof_no;
+ proposed_fe_comp = first_selected_component;
+ }
+ else
+ for (unsigned int no = 0;
+ no < this->matrix_free->n_components();
+ ++no)
+ for (unsigned int nf = 0;
+ nf < this->matrix_free->n_base_elements(no);
+ ++nf)
+ if (this->matrix_free
+ ->get_shape_info(no, 0, nf, this->active_fe_index, 0)
+ .data.front()
+ .fe_degree == static_cast<unsigned int>(fe_degree))
+ {
+ proposed_dof_comp = no;
+ proposed_fe_comp = nf;
+ break;
+ }
+ if (n_q_points ==
+ this->mapping_data->descriptor[this->active_quad_index]
+ .n_q_points)
+ proposed_quad_comp = this->quad_no;
+ else
+ for (unsigned int no = 0;
+ no <
+ this->matrix_free->get_mapping_info().cell_data.size();
+ ++no)
+ if (this->matrix_free->get_mapping_info()
+ .cell_data[no]
+ .descriptor[this->active_quad_index]
+ .n_q_points == n_q_points)
+ {
+ proposed_quad_comp = no;
+ break;
+ }
}
- else
- for (unsigned int no = 0; no < this->matrix_free->n_components();
- ++no)
- for (unsigned int nf = 0;
- nf < this->matrix_free->n_base_elements(no);
- ++nf)
- if (this->matrix_free
- ->get_shape_info(no, 0, nf, this->active_fe_index, 0)
- .data.front()
- .fe_degree == static_cast<unsigned int>(fe_degree))
- {
- proposed_dof_comp = no;
- proposed_fe_comp = nf;
- break;
- }
- if (n_q_points ==
- this->mapping_data->descriptor[this->active_quad_index]
- .n_q_points)
- proposed_quad_comp = this->quad_no;
- else
- for (unsigned int no = 0;
- no < this->matrix_free->get_mapping_info().cell_data.size();
- ++no)
- if (this->matrix_free->get_mapping_info()
- .cell_data[no]
- .descriptor[this->active_quad_index]
- .n_q_points == n_q_points)
+ if (proposed_dof_comp != numbers::invalid_unsigned_int &&
+ proposed_quad_comp != numbers::invalid_unsigned_int)
+ {
+ if (proposed_dof_comp != first_selected_component)
+ message += "Wrong vector component selection:\n";
+ else
+ message += "Wrong quadrature formula selection:\n";
+ message += " Did you mean FEEvaluation<dim,";
+ message += Utilities::int_to_string(fe_degree) + ",";
+ message += Utilities::int_to_string(n_q_points_1d);
+ message += "," + Utilities::int_to_string(n_components);
+ message += ",Number>(data";
+ if (dof_no != numbers::invalid_unsigned_int)
{
- proposed_quad_comp = no;
- break;
+ message +=
+ ", " + Utilities::int_to_string(proposed_dof_comp) + ", ";
+ message +=
+ Utilities::int_to_string(proposed_quad_comp) + ", ";
+ message += Utilities::int_to_string(proposed_fe_comp);
}
- }
- if (proposed_dof_comp != numbers::invalid_unsigned_int &&
- proposed_quad_comp != numbers::invalid_unsigned_int)
- {
- if (proposed_dof_comp != first_selected_component)
- message += "Wrong vector component selection:\n";
- else
- message += "Wrong quadrature formula selection:\n";
+ message += ")?\n";
+ std::string correct_pos;
+ if (proposed_dof_comp != dof_no)
+ correct_pos = " ^ ";
+ else
+ correct_pos = " ";
+ if (proposed_quad_comp != this->quad_no)
+ correct_pos += " ^ ";
+ else
+ correct_pos += " ";
+ if (proposed_fe_comp != first_selected_component)
+ correct_pos += " ^\n";
+ else
+ correct_pos += " \n";
+ message +=
+ " " +
+ correct_pos;
+ }
+ // ok, did not find the numbers specified by the template arguments in
+ // the given list. Suggest correct template arguments
+ const unsigned int proposed_n_q_points_1d = static_cast<unsigned int>(
+ std::pow(1.001 * this->n_quadrature_points, 1. / dim));
+ message += "Wrong template arguments:\n";
message += " Did you mean FEEvaluation<dim,";
- message += Utilities::int_to_string(fe_degree) + ",";
- message += Utilities::int_to_string(n_q_points_1d);
+ message +=
+ Utilities::int_to_string(this->data->data.front().fe_degree) + ",";
+ message += Utilities::int_to_string(proposed_n_q_points_1d);
message += "," + Utilities::int_to_string(n_components);
message += ",Number>(data";
if (dof_no != numbers::invalid_unsigned_int)
{
+ message += ", " + Utilities::int_to_string(dof_no) + ", ";
+ message += Utilities::int_to_string(this->quad_no);
message +=
- ", " + Utilities::int_to_string(proposed_dof_comp) + ", ";
- message += Utilities::int_to_string(proposed_quad_comp) + ", ";
- message += Utilities::int_to_string(proposed_fe_comp);
+ ", " + Utilities::int_to_string(first_selected_component);
}
message += ")?\n";
std::string correct_pos;
- if (proposed_dof_comp != dof_no)
- correct_pos = " ^ ";
- else
- correct_pos = " ";
- if (proposed_quad_comp != this->quad_no)
- correct_pos += " ^ ";
+ if (this->data->data.front().fe_degree !=
+ static_cast<unsigned int>(fe_degree))
+ correct_pos = " ^";
else
- correct_pos += " ";
- if (proposed_fe_comp != first_selected_component)
+ correct_pos = " ";
+ if (proposed_n_q_points_1d != n_q_points_1d)
correct_pos += " ^\n";
else
correct_pos += " \n";
- message += " " +
- correct_pos;
+ message += " " + correct_pos;
+
+ Assert(static_cast<unsigned int>(fe_degree) ==
+ this->data->data.front().fe_degree &&
+ n_q_points == this->n_quadrature_points,
+ ExcMessage(message));
}
- // ok, did not find the numbers specified by the template arguments in
- // the given list. Suggest correct template arguments
- const unsigned int proposed_n_q_points_1d = static_cast<unsigned int>(
- std::pow(1.001 * this->n_quadrature_points, 1. / dim));
- message += "Wrong template arguments:\n";
- message += " Did you mean FEEvaluation<dim,";
- message +=
- Utilities::int_to_string(this->data->data.front().fe_degree) + ",";
- message += Utilities::int_to_string(proposed_n_q_points_1d);
- message += "," + Utilities::int_to_string(n_components);
- message += ",Number>(data";
if (dof_no != numbers::invalid_unsigned_int)
- {
- message += ", " + Utilities::int_to_string(dof_no) + ", ";
- message += Utilities::int_to_string(this->quad_no);
- message += ", " + Utilities::int_to_string(first_selected_component);
- }
- message += ")?\n";
- std::string correct_pos;
- if (this->data->data.front().fe_degree !=
- static_cast<unsigned int>(fe_degree))
- correct_pos = " ^";
- else
- correct_pos = " ";
- if (proposed_n_q_points_1d != n_q_points_1d)
- correct_pos += " ^\n";
- else
- correct_pos += " \n";
- message += " " + correct_pos;
-
- Assert(static_cast<unsigned int>(fe_degree) ==
- this->data->data.front().fe_degree &&
- n_q_points == this->n_quadrature_points,
- ExcMessage(message));
- }
- if (dof_no != numbers::invalid_unsigned_int)
- AssertDimension(
- n_q_points,
- this->mapping_data->descriptor[this->active_quad_index].n_q_points);
-# endif
+ AssertDimension(
+ n_q_points,
+ this->mapping_data->descriptor[this->active_quad_index].n_q_points);
+ }
}
&this->mapping_data->quadrature_points
[this->mapping_data->quadrature_point_offsets[this->cell]];
-# ifdef DEBUG
- this->is_reinitialized = true;
- this->dof_values_initialized = false;
- this->values_quad_initialized = false;
- this->gradients_quad_initialized = false;
- this->hessians_quad_initialized = false;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->is_reinitialized = true;
+ this->dof_values_initialized = false;
+ this->values_quad_initialized = false;
+ this->gradients_quad_initialized = false;
+ this->hessians_quad_initialized = false;
+ }
}
}
}
-# ifdef DEBUG
- this->is_reinitialized = true;
- this->dof_values_initialized = false;
- this->values_quad_initialized = false;
- this->gradients_quad_initialized = false;
- this->hessians_quad_initialized = false;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->is_reinitialized = true;
+ this->dof_values_initialized = false;
+ this->values_quad_initialized = false;
+ this->gradients_quad_initialized = false;
+ this->hessians_quad_initialized = false;
+ }
}
else
cell->get_dof_indices(this->local_dof_indices);
-# ifdef DEBUG
- this->is_reinitialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->is_reinitialized = true;
+ }
}
Assert(this->mapped_geometry.get() != 0, ExcNotInitialized());
this->mapped_geometry->reinit(cell);
-# ifdef DEBUG
- this->is_reinitialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->is_reinitialized = true;
+ }
}
VectorizedArrayType>::
evaluate(const EvaluationFlags::EvaluationFlags evaluation_flags)
{
-# ifdef DEBUG
- Assert(this->dof_values_initialized == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->dof_values_initialized == true,
+ internal::ExcAccessToUninitializedField());
+ }
evaluate(this->values_dofs, evaluation_flags);
}
*this);
}
-# ifdef DEBUG
- this->values_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::values;
- this->gradients_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::gradients;
- this->hessians_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::hessians;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->values_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::values;
+ this->gradients_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::gradients;
+ this->hessians_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::hessians;
+ }
}
{
integrate(integration_flag, this->values_dofs);
-# ifdef DEBUG
- this->dof_values_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->dof_values_initialized = true;
+ }
}
VectorizedArrayType *values_array,
const bool sum_into_values_array)
{
-# ifdef DEBUG
- if (integration_flag & EvaluationFlags::values)
- Assert(this->values_quad_submitted == true,
- internal::ExcAccessToUninitializedField());
- if (integration_flag & EvaluationFlags::gradients)
- Assert(this->gradients_quad_submitted == true,
- internal::ExcAccessToUninitializedField());
- if ((integration_flag & EvaluationFlags::hessians) != 0u)
- Assert(this->hessians_quad_submitted == true,
- internal::ExcAccessToUninitializedField());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (integration_flag & EvaluationFlags::values)
+ Assert(this->values_quad_submitted == true,
+ internal::ExcAccessToUninitializedField());
+ if (integration_flag & EvaluationFlags::gradients)
+ Assert(this->gradients_quad_submitted == true,
+ internal::ExcAccessToUninitializedField());
+ if ((integration_flag & EvaluationFlags::hessians) != 0u)
+ Assert(this->hessians_quad_submitted == true,
+ internal::ExcAccessToUninitializedField());
+ }
Assert(this->matrix_free != nullptr ||
this->mapped_geometry->is_initialized(),
ExcNotInitialized());
sum_into_values_array);
}
-# ifdef DEBUG
- this->dof_values_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->dof_values_initialized = true;
+ }
}
this->mapping_data->quadrature_point_offsets[this->cell];
}
-# ifdef DEBUG
- this->is_reinitialized = true;
- this->dof_values_initialized = false;
- this->values_quad_initialized = false;
- this->gradients_quad_initialized = false;
- this->hessians_quad_initialized = false;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->is_reinitialized = true;
+ this->dof_values_initialized = false;
+ this->values_quad_initialized = false;
+ this->gradients_quad_initialized = false;
+ this->hessians_quad_initialized = false;
+ }
}
.quadrature_point_offsets[index];
}
-# ifdef DEBUG
- this->is_reinitialized = true;
- this->dof_values_initialized = false;
- this->values_quad_initialized = false;
- this->gradients_quad_initialized = false;
- this->hessians_quad_initialized = false;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->is_reinitialized = true;
+ this->dof_values_initialized = false;
+ this->values_quad_initialized = false;
+ this->gradients_quad_initialized = false;
+ this->hessians_quad_initialized = false;
+ }
}
VectorizedArrayType>::
evaluate(const EvaluationFlags::EvaluationFlags evaluation_flag)
{
-# ifdef DEBUG
- Assert(this->dof_values_initialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->dof_values_initialized, ExcNotInitialized());
+ }
evaluate(this->values_dofs, evaluation_flag);
}
internal::FEFaceEvaluationFactory<dim, VectorizedArrayType>::evaluate(
n_components, evaluation_flag_actual, values_array, *this);
-# ifdef DEBUG
- this->values_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::values;
- this->gradients_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::gradients;
- this->hessians_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::hessians;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->values_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::values;
+ this->gradients_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::gradients;
+ this->hessians_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::hessians;
+ }
}
VectorizedArrayType>::
project_to_face(const EvaluationFlags::EvaluationFlags evaluation_flag)
{
-# ifdef DEBUG
- Assert(this->dof_values_initialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(this->dof_values_initialized, ExcNotInitialized());
+ }
project_to_face(this->values_dofs, evaluation_flag);
}
internal::FEFaceEvaluationFactory<dim, VectorizedArrayType>::
evaluate_in_face(n_components, evaluation_flag_actual, *this);
-# ifdef DEBUG
- this->values_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::values;
- this->gradients_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::gradients;
- this->hessians_quad_initialized =
- evaluation_flag_actual & EvaluationFlags::hessians;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->values_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::values;
+ this->gradients_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::gradients;
+ this->hessians_quad_initialized =
+ evaluation_flag_actual & EvaluationFlags::hessians;
+ }
}
{
integrate(integration_flag, this->values_dofs, sum_into_values);
-# ifdef DEBUG
- this->dof_values_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->dof_values_initialized = true;
+ }
}
{
collect_from_face(integration_flag, this->values_dofs, sum_into_values);
-# ifdef DEBUG
- this->dof_values_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->dof_values_initialized = true;
+ }
}
this->evaluate(evaluation_flag);
}
-# ifdef DEBUG
- this->values_quad_initialized = evaluation_flag & EvaluationFlags::values;
- this->gradients_quad_initialized =
- evaluation_flag & EvaluationFlags::gradients;
- this->hessians_quad_initialized = evaluation_flag & EvaluationFlags::hessians;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ this->values_quad_initialized = evaluation_flag & EvaluationFlags::values;
+ this->gradients_quad_initialized =
+ evaluation_flag & EvaluationFlags::gradients;
+ this->hessians_quad_initialized =
+ evaluation_flag & EvaluationFlags::hessians;
+ }
}
const std::array<unsigned int, n_lanes> &
get_cell_ids() const
{
-// implemented inline to avoid compilation problems on Windows
-#ifdef DEBUG
- Assert(is_reinitialized, ExcNotInitialized());
-#endif
+ // implemented inline to avoid compilation problems on Windows
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(is_reinitialized, ExcNotInitialized());
+ }
return cell_ids;
}
const std::array<unsigned int, n_lanes> &
get_face_ids() const
{
-// implemented inline to avoid compilation problems on Windows
-#ifdef DEBUG
- Assert(is_reinitialized && is_face, ExcNotInitialized());
-#endif
+ // implemented inline to avoid compilation problems on Windows
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(is_reinitialized && is_face, ExcNotInitialized());
+ }
return face_ids;
}
unsigned int
get_cell_or_face_batch_id() const
{
-// implemented inline to avoid compilation problems on Windows
-#ifdef DEBUG
- Assert(is_reinitialized, ExcNotInitialized());
-#endif
+ // implemented inline to avoid compilation problems on Windows
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(is_reinitialized, ExcNotInitialized());
+ }
return cell;
}
const std::array<unsigned int, n_lanes> &
get_cell_or_face_ids() const
{
-// implemented inline to avoid compilation problems on Windows
-#ifdef DEBUG
- Assert(is_reinitialized, ExcNotInitialized());
-#endif
+ // implemented inline to avoid compilation problems on Windows
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(is_reinitialized, ExcNotInitialized());
+ }
if (!is_face || dof_access_index ==
internal::MatrixFreeFunctions::DoFInfo::dof_access_cell)
quadrature_points = nullptr;
quadrature_weights = other.quadrature_weights;
-# ifdef DEBUG
- is_reinitialized = false;
- dof_values_initialized = false;
- values_quad_initialized = false;
- gradients_quad_initialized = false;
- hessians_quad_initialized = false;
- values_quad_submitted = false;
- gradients_quad_submitted = false;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ is_reinitialized = false;
+ dof_values_initialized = false;
+ values_quad_initialized = false;
+ gradients_quad_initialized = false;
+ hessians_quad_initialized = false;
+ values_quad_submitted = false;
+ gradients_quad_submitted = false;
+ }
cell = numbers::invalid_unsigned_int;
interior_face = other.is_interior_face();
// include 12 extra fields to insert some padding between values, gradients
// and hessians, which helps to reduce the probability of cache conflicts
const unsigned int allocated_size = size_scratch_data + size_data_arrays + 12;
-# ifdef DEBUG
- scratch_data_array->clear();
- scratch_data_array->resize(allocated_size,
- Number(numbers::signaling_nan<ScalarNumber>()));
-# else
- scratch_data_array->resize_fast(allocated_size);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ scratch_data_array->clear();
+ scratch_data_array->resize(
+ allocated_size, Number(numbers::signaling_nan<ScalarNumber>()));
+ }
+ else
+ {
+ scratch_data_array->resize_fast(allocated_size);
+ }
scratch_data.reinit(scratch_data_array->begin() + size_data_arrays + 12,
size_scratch_data);
inline Number *
FEEvaluationData<dim, Number, is_face>::begin_dof_values()
{
-# ifdef DEBUG
- dof_values_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ dof_values_initialized = true;
+ }
return values_dofs;
}
inline const Number *
FEEvaluationData<dim, Number, is_face>::begin_values() const
{
-# ifdef DEBUG
- Assert(values_quad_initialized || values_quad_submitted, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(values_quad_initialized || values_quad_submitted,
+ ExcNotInitialized());
+ }
return values_quad;
}
inline Number *
FEEvaluationData<dim, Number, is_face>::begin_values()
{
-# ifdef DEBUG
- values_quad_initialized = true;
- values_quad_submitted = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ values_quad_initialized = true;
+ values_quad_submitted = true;
+ }
return values_quad;
}
inline const Number *
FEEvaluationData<dim, Number, is_face>::begin_gradients() const
{
-# ifdef DEBUG
- Assert(gradients_quad_initialized || gradients_quad_submitted,
- ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(gradients_quad_initialized || gradients_quad_submitted,
+ ExcNotInitialized());
+ }
return gradients_quad;
}
inline Number *
FEEvaluationData<dim, Number, is_face>::begin_gradients()
{
-# ifdef DEBUG
- gradients_quad_submitted = true;
- gradients_quad_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ gradients_quad_submitted = true;
+ gradients_quad_initialized = true;
+ }
return gradients_quad;
}
inline const Number *
FEEvaluationData<dim, Number, is_face>::begin_hessians() const
{
-# ifdef DEBUG
- Assert(hessians_quad_initialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(hessians_quad_initialized, ExcNotInitialized());
+ }
return hessians_quad;
}
inline Number *
FEEvaluationData<dim, Number, is_face>::begin_hessians()
{
-# ifdef DEBUG
- hessians_quad_initialized = true;
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ hessians_quad_initialized = true;
+ }
return hessians_quad;
}
inline internal::MatrixFreeFunctions::GeometryType
FEEvaluationData<dim, Number, is_face>::get_cell_type() const
{
-# ifdef DEBUG
- Assert(is_reinitialized, ExcNotInitialized());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(is_reinitialized, ExcNotInitialized());
+ }
return cell_type;
}
mapping_info->compute_data_index_offset(geometry_index);
const unsigned int compressed_data_offset =
mapping_info->compute_compressed_data_index_offset(geometry_index);
-#ifdef DEBUG
- const UpdateFlags update_flags_mapping =
- mapping_info->get_update_flags_mapping();
- if (update_flags_mapping & UpdateFlags::update_quadrature_points)
- real_point_ptr = mapping_info->get_real_point(data_offset);
- if (update_flags_mapping & UpdateFlags::update_jacobians)
- jacobian_ptr =
- mapping_info->get_jacobian(compressed_data_offset, is_interior);
- if (update_flags_mapping & UpdateFlags::update_inverse_jacobians)
- inverse_jacobian_ptr =
- mapping_info->get_inverse_jacobian(compressed_data_offset, is_interior);
- if (update_flags_mapping & UpdateFlags::update_normal_vectors)
- normal_ptr = mapping_info->get_normal_vector(data_offset);
- if (update_flags_mapping & UpdateFlags::update_JxW_values)
- JxW_ptr = mapping_info->get_JxW(data_offset);
-#else
- real_point_ptr = mapping_info->get_real_point(data_offset);
- jacobian_ptr =
- mapping_info->get_jacobian(compressed_data_offset, is_interior);
- inverse_jacobian_ptr =
- mapping_info->get_inverse_jacobian(compressed_data_offset, is_interior);
- normal_ptr = mapping_info->get_normal_vector(data_offset);
- JxW_ptr = mapping_info->get_JxW(data_offset);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ const UpdateFlags update_flags_mapping =
+ mapping_info->get_update_flags_mapping();
+ if (update_flags_mapping & UpdateFlags::update_quadrature_points)
+ real_point_ptr = mapping_info->get_real_point(data_offset);
+ if (update_flags_mapping & UpdateFlags::update_jacobians)
+ jacobian_ptr =
+ mapping_info->get_jacobian(compressed_data_offset, is_interior);
+ if (update_flags_mapping & UpdateFlags::update_inverse_jacobians)
+ inverse_jacobian_ptr =
+ mapping_info->get_inverse_jacobian(compressed_data_offset,
+ is_interior);
+ if (update_flags_mapping & UpdateFlags::update_normal_vectors)
+ normal_ptr = mapping_info->get_normal_vector(data_offset);
+ if (update_flags_mapping & UpdateFlags::update_JxW_values)
+ JxW_ptr = mapping_info->get_JxW(data_offset);
+ }
+ else
+ {
+ real_point_ptr = mapping_info->get_real_point(data_offset);
+ jacobian_ptr =
+ mapping_info->get_jacobian(compressed_data_offset, is_interior);
+ inverse_jacobian_ptr =
+ mapping_info->get_inverse_jacobian(compressed_data_offset, is_interior);
+ normal_ptr = mapping_info->get_normal_vector(data_offset);
+ JxW_ptr = mapping_info->get_JxW(data_offset);
+ }
if (!is_linear && fast_path)
{
const VectorizedDouble jac_det = determinant(jac);
-#ifdef DEBUG
- for (unsigned int v = 0; v < n_lanes_d; ++v)
+ if constexpr (running_in_debug_mode())
{
- const typename Triangulation<dim>::cell_iterator
- cell_iterator(
- &tria,
- cell_array[cell * n_lanes + vv + v].first,
- cell_array[cell * n_lanes + vv + v].second);
-
- Assert(jac_det[v] >
- 1e-12 * Utilities::fixed_power<dim>(
- cell_iterator->diameter() /
- std::sqrt(double(dim))),
- (typename Mapping<dim>::ExcDistortedMappedCell(
- cell_iterator->center(), jac_det[v], q)));
+ for (unsigned int v = 0; v < n_lanes_d; ++v)
+ {
+ const typename Triangulation<dim>::cell_iterator
+ cell_iterator(
+ &tria,
+ cell_array[cell * n_lanes + vv + v].first,
+ cell_array[cell * n_lanes + vv + v].second);
+
+ Assert(
+ jac_det[v] > 1e-12 * Utilities::fixed_power<dim>(
+ cell_iterator->diameter() /
+ std::sqrt(double(dim))),
+ (typename Mapping<dim>::ExcDistortedMappedCell(
+ cell_iterator->center(), jac_det[v], q)));
+ }
+ }
+ else
+ {
+ (void)tria;
+ (void)cell_array;
}
-#else
- (void)tria;
- (void)cell_array;
-#endif
const Tensor<2, dim, VectorizedDouble> inv_jac =
transpose(invert(jac));
1. :
my_data.descriptor[0].quadrature.weight(q));
-#ifdef DEBUG
- for (unsigned int v = 0; v < n_lanes_d; ++v)
- Assert(JxW[v] > 0.0, ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (unsigned int v = 0; v < n_lanes_d; ++v)
+ Assert(JxW[v] > 0.0, ExcInternalError());
+ }
store_vectorized_array(JxW,
vv,
{
// the range over which we are searching must be ordered, otherwise we
// got a range that spans over too many cells
-#ifdef DEBUG
- for (unsigned int i = range.first + 1; i < range.second; ++i)
- Assert(
- fe_indices[i] >= fe_indices[i - 1],
- ExcMessage(
- "Cell range must be over sorted range of FE indices in hp-case!"));
- AssertIndexRange(range.first, fe_indices.size() + 1);
- AssertIndexRange(range.second, fe_indices.size() + 1);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (unsigned int i = range.first + 1; i < range.second; ++i)
+ Assert(
+ fe_indices[i] >= fe_indices[i - 1],
+ ExcMessage(
+ "Cell range must be over sorted range of FE indices in hp-case!"));
+ AssertIndexRange(range.first, fe_indices.size() + 1);
+ AssertIndexRange(range.second, fe_indices.size() + 1);
+ }
std::pair<unsigned int, unsigned int> return_range;
return_range.first = std::lower_bound(fe_indices.begin() + range.first,
fe_indices.begin() + range.second,
task_info.n_procs =
Utilities::MPI::n_mpi_processes(task_info.communicator);
-#ifdef DEBUG
- for (const auto &constraint : constraints)
- Assert(
- constraint->is_closed(task_info.communicator),
- ExcMessage(
- "You have provided a non-empty AffineConstraints object that has not "
- "been closed. Please call AffineConstraints::close() before "
- "calling MatrixFree::reinit()!"));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &constraint : constraints)
+ Assert(
+ constraint->is_closed(task_info.communicator),
+ ExcMessage(
+ "You have provided a non-empty AffineConstraints object that has not "
+ "been closed. Please call AffineConstraints::close() before "
+ "calling MatrixFree::reinit()!"));
+ }
initialize_dof_handlers(dof_handler, additional_data);
for (unsigned int no = 0; no < dof_handler.size(); ++no)
irregular_cells.back() = task_info.n_ghost_cells % n_lanes;
}
-#ifdef DEBUG
- {
- unsigned int n_cells = 0;
- for (unsigned int i = 0; i < task_info.cell_partition_data.back();
- ++i)
- n_cells += irregular_cells[i] > 0 ? irregular_cells[i] : n_lanes;
- AssertDimension(n_cells, task_info.n_active_cells);
- n_cells = 0;
- for (unsigned int i = task_info.cell_partition_data.back();
- i < n_ghost_slots + task_info.cell_partition_data.back();
- ++i)
- n_cells += irregular_cells[i] > 0 ? irregular_cells[i] : n_lanes;
- AssertDimension(n_cells, task_info.n_ghost_cells);
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ unsigned int n_cells = 0;
+ for (unsigned int i = 0; i < task_info.cell_partition_data.back();
+ ++i)
+ n_cells +=
+ irregular_cells[i] > 0 ? irregular_cells[i] : n_lanes;
+ AssertDimension(n_cells, task_info.n_active_cells);
+ n_cells = 0;
+ for (unsigned int i = task_info.cell_partition_data.back();
+ i < n_ghost_slots + task_info.cell_partition_data.back();
+ ++i)
+ n_cells +=
+ irregular_cells[i] > 0 ? irregular_cells[i] : n_lanes;
+ AssertDimension(n_cells, task_info.n_ghost_cells);
+ }
+ }
task_info.cell_partition_data.push_back(
task_info.cell_partition_data.back() + n_ghost_slots);
}
- // Finally perform the renumbering. We also want to group several cells
- // together to a batch of cells for SIMD (vectorized) execution (where the
- // arithmetic operations of several cells will then be done
- // simultaneously).
-#ifdef DEBUG
- {
- AssertDimension(renumbering.size(),
- task_info.n_active_cells + task_info.n_ghost_cells);
- std::vector<unsigned int> sorted_renumbering(renumbering);
- std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
- for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
- Assert(sorted_renumbering[i] == i, ExcInternalError());
- }
-#endif
+ // Finally perform the renumbering. We also want to group several cells
+ // together to a batch of cells for SIMD (vectorized) execution (where the
+ // arithmetic operations of several cells will then be done
+ // simultaneously).
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ AssertDimension(renumbering.size(),
+ task_info.n_active_cells + task_info.n_ghost_cells);
+ std::vector<unsigned int> sorted_renumbering(renumbering);
+ std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
+ for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
+ Assert(sorted_renumbering[i] == i, ExcInternalError());
+ }
+ }
{
std::vector<std::pair<unsigned int, unsigned int>> cell_level_index_old;
cell_level_index.swap(cell_level_index_old);
for (unsigned int i = 0; i < inverse_diagonal_vector.locally_owned_size();
++i)
{
-#ifdef DEBUG
- // only define the type alias in debug mode to avoid a warning
- using Number =
- typename Base<dim, VectorType, VectorizedArrayType>::value_type;
- Assert(diagonal_vector.local_element(i) > Number(0),
- ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // only define the type alias in debug mode to avoid a warning
+ using Number =
+ typename Base<dim, VectorType, VectorizedArrayType>::value_type;
+ Assert(diagonal_vector.local_element(i) > Number(0),
+ ExcInternalError());
+ }
inverse_diagonal_vector.local_element(i) =
1. / inverse_diagonal_vector.local_element(i);
}
#include <deal.II/matrix_free/matrix_free.h>
#include <deal.II/matrix_free/type_traits.h>
-#ifdef DEBUG
-# include <boost/algorithm/string/join.hpp>
-#endif
+#include <boost/algorithm/string/join.hpp>
DEAL_II_NAMESPACE_OPEN
(void)matrix_free;
(void)dof_info;
-#ifdef DEBUG
- if (vec.partitioners_are_compatible(*dof_info.vector_partitioner) == false)
+ if constexpr (running_in_debug_mode())
{
- unsigned int dof_index = numbers::invalid_unsigned_int;
-
- for (unsigned int i = 0; i < matrix_free.n_components(); ++i)
- if (&matrix_free.get_dof_info(i) == &dof_info)
- {
- dof_index = i;
- break;
- }
-
- Assert(dof_index != numbers::invalid_unsigned_int, ExcInternalError());
-
- std::vector<std::string> dof_indices_with_compatible_partitioners;
-
- for (unsigned int i = 0; i < matrix_free.n_components(); ++i)
- if (vec.partitioners_are_compatible(
- *matrix_free.get_dof_info(i).vector_partitioner))
- dof_indices_with_compatible_partitioners.push_back(
- std::to_string(i));
-
- if (dof_indices_with_compatible_partitioners.empty())
+ if (vec.partitioners_are_compatible(*dof_info.vector_partitioner) ==
+ false)
{
- Assert(false,
- ExcMessage("The parallel layout of the given vector is "
- "compatible neither with the Partitioner of the "
- "current FEEvaluation with dof_handler_index=" +
- std::to_string(dof_index) +
- " nor with any Partitioner in MatrixFree. A "
- "potential reason is that you did not use "
- "MatrixFree::initialize_dof_vector() to get a "
- "compatible vector."));
- }
- else
- {
- Assert(
- false,
- ExcMessage(
- "The parallel layout of the given vector is "
- "not compatible with the Partitioner of the "
- "current FEEvaluation with dof_handler_index=" +
- std::to_string(dof_index) +
- ". However, the underlying "
- "MatrixFree contains Partitioner objects that are compatible. "
- "They have the following dof_handler_index values: " +
- boost::algorithm::join(dof_indices_with_compatible_partitioners,
- ", ") +
- ". Did you want to pass any of these values to the "
- "constructor of the current FEEvaluation object or "
- "did you not use MatrixFree::initialize_dof_vector() "
- "with dof_handler_index=" +
- std::to_string(dof_index) +
- " to get a "
- "compatible vector?"));
+ unsigned int dof_index = numbers::invalid_unsigned_int;
+
+ for (unsigned int i = 0; i < matrix_free.n_components(); ++i)
+ if (&matrix_free.get_dof_info(i) == &dof_info)
+ {
+ dof_index = i;
+ break;
+ }
+
+ Assert(dof_index != numbers::invalid_unsigned_int,
+ ExcInternalError());
+
+ std::vector<std::string> dof_indices_with_compatible_partitioners;
+
+ for (unsigned int i = 0; i < matrix_free.n_components(); ++i)
+ if (vec.partitioners_are_compatible(
+ *matrix_free.get_dof_info(i).vector_partitioner))
+ dof_indices_with_compatible_partitioners.push_back(
+ std::to_string(i));
+
+ if (dof_indices_with_compatible_partitioners.empty())
+ {
+ Assert(false,
+ ExcMessage(
+ "The parallel layout of the given vector is "
+ "compatible neither with the Partitioner of the "
+ "current FEEvaluation with dof_handler_index=" +
+ std::to_string(dof_index) +
+ " nor with any Partitioner in MatrixFree. A "
+ "potential reason is that you did not use "
+ "MatrixFree::initialize_dof_vector() to get a "
+ "compatible vector."));
+ }
+ else
+ {
+ Assert(
+ false,
+ ExcMessage(
+ "The parallel layout of the given vector is "
+ "not compatible with the Partitioner of the "
+ "current FEEvaluation with dof_handler_index=" +
+ std::to_string(dof_index) +
+ ". However, the underlying "
+ "MatrixFree contains Partitioner objects that are compatible. "
+ "They have the following dof_handler_index values: " +
+ boost::algorithm::join(
+ dof_indices_with_compatible_partitioners, ", ") +
+ ". Did you want to pass any of these values to the "
+ "constructor of the current FEEvaluation object or "
+ "did you not use MatrixFree::initialize_dof_vector() "
+ "with dof_handler_index=" +
+ std::to_string(dof_index) +
+ " to get a "
+ "compatible vector?"));
+ }
}
}
-#endif
}
VectorizedArrayType *dof_values,
std::bool_constant<true>) const
{
-#ifdef DEBUG
- // in debug mode, run non-vectorized version because this path
- // has additional checks (e.g., regarding ghosting)
- process_dofs_vectorized(
- dofs_per_cell, dof_index, vec, dof_values, std::bool_constant<false>());
-#else
- const Number *vec_ptr = vec.begin() + dof_index;
- for (unsigned int i = 0; i < dofs_per_cell;
- ++i, vec_ptr += VectorizedArrayType::size())
- dof_values[i].load(vec_ptr);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // in debug mode, run non-vectorized version because this path
+ // has additional checks (e.g., regarding ghosting)
+ process_dofs_vectorized(dofs_per_cell,
+ dof_index,
+ vec,
+ dof_values,
+ std::bool_constant<false>());
+ }
+ else
+ {
+ const Number *vec_ptr = vec.begin() + dof_index;
+ for (unsigned int i = 0; i < dofs_per_cell;
+ ++i, vec_ptr += VectorizedArrayType::size())
+ dof_values[i].load(vec_ptr);
+ }
}
(void)constant_offset;
(void)vec;
-#ifdef DEBUG
- // in debug mode, run non-vectorized version because this path
- // has additional checks (e.g., regarding ghosting)
- Assert(vec_ptr == vec.begin() + constant_offset, ExcInternalError());
- process_dof_gather(indices,
- vec,
- constant_offset,
- vec_ptr,
- res,
- std::bool_constant<false>());
-#else
- res.gather(vec_ptr, indices);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // in debug mode, run non-vectorized version because this path
+ // has additional checks (e.g., regarding ghosting)
+ Assert(vec_ptr == vec.begin() + constant_offset, ExcInternalError());
+ process_dof_gather(indices,
+ vec,
+ constant_offset,
+ vec_ptr,
+ res,
+ std::bool_constant<false>());
+ }
+ else
+ {
+ res.gather(vec_ptr, indices);
+ }
}
fe.n_dofs_per_vertex() > 0;
});
-#ifdef DEBUG
- const bool fine_element_is_discontinuous =
- std::all_of(dof_handler_fine.get_fe_collection().begin(),
- dof_handler_fine.get_fe_collection().end(),
- [](const auto &fe) {
- return fe.n_dofs_per_cell() == 0 ||
- fe.n_dofs_per_vertex() == 0;
- });
+ if constexpr (running_in_debug_mode())
+ {
+ const bool fine_element_is_discontinuous =
+ std::all_of(dof_handler_fine.get_fe_collection().begin(),
+ dof_handler_fine.get_fe_collection().end(),
+ [](const auto &fe) {
+ return fe.n_dofs_per_cell() == 0 ||
+ fe.n_dofs_per_vertex() == 0;
+ });
- Assert(transfer.fine_element_is_continuous !=
- fine_element_is_discontinuous,
- ExcNotImplemented());
-#endif
+ Assert(transfer.fine_element_is_continuous !=
+ fine_element_is_discontinuous,
+ ExcNotImplemented());
+ }
const bool is_feq =
std::all_of(dof_handler_fine.get_fe_collection().begin(),
dof_handler_fine.get_fe_collection().end(),
}
}
-#ifdef DEBUG
- n_active_cells_pre = triangulation->n_active_cells();
-#else
- (void)n_active_cells_pre;
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ n_active_cells_pre = triangulation->n_active_cells();
+ }
+ else
+ {
+ (void)n_active_cells_pre;
+ }
}
CellDataTransfer<dim, spacedim, VectorType>::unpack(const VectorType &in,
VectorType &out)
{
-#ifdef DEBUG
- Assert(in.size() == n_active_cells_pre,
- ExcDimensionMismatch(in.size(), n_active_cells_pre));
- Assert(out.size() == triangulation->n_active_cells(),
- ExcDimensionMismatch(out.size(), triangulation->n_active_cells()));
-#else
- (void)n_active_cells_pre;
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(in.size() == n_active_cells_pre,
+ ExcDimensionMismatch(in.size(), n_active_cells_pre));
+ Assert(out.size() == triangulation->n_active_cells(),
+ ExcDimensionMismatch(out.size(), triangulation->n_active_cells()));
+ }
+ else
+ {
+ (void)n_active_cells_pre;
+ }
// Transfer data of persisting cells.
for (const auto &persisting : persisting_cells_active_index)
copy_data.cell->face(face)->boundary_id()) !=
boundary_functions.end())
{
-#ifdef DEBUG
- // in debug mode: compute an element in the matrix which is
- // guaranteed to belong to a boundary dof. We do this to check
- // that the entries in the cell matrix are guaranteed to be zero
- // if the respective dof is not on the boundary. Since because of
- // round-off, the actual value of the matrix entry may be
- // only close to zero, we assert that it is small relative to an
- // element which is guaranteed to be nonzero. (absolute smallness
- // does not suffice since the size of the domain scales in here)
- //
- // for this purpose we seek the diagonal of the matrix, where
- // there must be an element belonging to the boundary. we take the
- // maximum diagonal entry.
- types::global_dof_index max_element = 0;
- for (const auto index : dof_to_boundary_mapping)
- if ((index != numbers::invalid_dof_index) &&
- (index > max_element))
- max_element = index;
- Assert(max_element == matrix.n() - 1, ExcInternalError());
-
- double max_diag_entry = 0;
- for (unsigned int i = 0; i < copy_data.dofs_per_cell; ++i)
- if (std::abs(copy_data.cell_matrix[pos](i, i)) > max_diag_entry)
- max_diag_entry = std::abs(copy_data.cell_matrix[pos](i, i));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // in debug mode: compute an element in the matrix which is
+ // guaranteed to belong to a boundary dof. We do this to check
+ // that the entries in the cell matrix are guaranteed to be
+ // zero if the respective dof is not on the boundary. Since
+ // because of round-off, the actual value of the matrix entry
+ // may be only close to zero, we assert that it is small
+ // relative to an element which is guaranteed to be nonzero.
+ // (absolute smallness does not suffice since the size of the
+ // domain scales in here)
+ //
+ // for this purpose we seek the diagonal of the matrix, where
+ // there must be an element belonging to the boundary. we take
+ // the maximum diagonal entry.
+ types::global_dof_index max_element = 0;
+ for (const auto index : dof_to_boundary_mapping)
+ if ((index != numbers::invalid_dof_index) &&
+ (index > max_element))
+ max_element = index;
+ Assert(max_element == matrix.n() - 1, ExcInternalError());
+
+ double max_diag_entry = 0;
+ for (unsigned int i = 0; i < copy_data.dofs_per_cell; ++i)
+ if (std::abs(copy_data.cell_matrix[pos](i, i)) >
+ max_diag_entry)
+ max_diag_entry =
+ std::abs(copy_data.cell_matrix[pos](i, i));
+ }
for (unsigned int i = 0; i < copy_data.dofs_per_cell; ++i)
{
// but it needs to be implemented
if (dim >= 3)
{
-#ifdef DEBUG
- // Assert that there are no hanging nodes at the boundary
- int level = -1;
- for (const auto &cell : dof.active_cell_iterators())
- for (auto f : cell->face_indices())
- {
- if (cell->at_boundary(f))
+ if constexpr (running_in_debug_mode())
+ {
+ // Assert that there are no hanging nodes at the boundary
+ int level = -1;
+ for (const auto &cell : dof.active_cell_iterators())
+ for (auto f : cell->face_indices())
{
- if (level == -1)
- level = cell->level();
- else
+ if (cell->at_boundary(f))
{
- Assert(
- level == cell->level(),
- ExcMessage(
- "The mesh you use in projecting boundary values "
- "has hanging nodes at the boundary. This would require "
- "dealing with hanging node constraints when solving "
- "the linear system on the boundary, but this is not "
- "currently implemented."));
+ if (level == -1)
+ level = cell->level();
+ else
+ {
+ Assert(
+ level == cell->level(),
+ ExcMessage(
+ "The mesh you use in projecting boundary values "
+ "has hanging nodes at the boundary. This would require "
+ "dealing with hanging node constraints when solving "
+ "the linear system on the boundary, but this is not "
+ "currently implemented."));
+ }
}
}
- }
-#endif
+ }
}
sparsity.compress();
{
Assert(cellwise_error.size() == tria.n_active_cells(),
ExcMessage("input vector cell_error has invalid size!"));
-#ifdef DEBUG
- {
- // check that off-processor entries are zero. Otherwise we will compute
- // wrong results below!
- typename InVector::size_type i = 0;
- typename Triangulation<dim, spacedim>::active_cell_iterator it =
- tria.begin_active();
- for (; i < cellwise_error.size(); ++i, ++it)
- if (!it->is_locally_owned())
- Assert(
- std::fabs(cellwise_error[i]) < 1e-20,
- ExcMessage(
- "cellwise_error of cells that are not locally owned need to be zero!"));
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ // check that off-processor entries are zero. Otherwise we will
+ // compute wrong results below!
+ typename InVector::size_type i = 0;
+ typename Triangulation<dim, spacedim>::active_cell_iterator it =
+ tria.begin_active();
+ for (; i < cellwise_error.size(); ++i, ++it)
+ if (!it->is_locally_owned())
+ Assert(
+ std::fabs(cellwise_error[i]) < 1e-20,
+ ExcMessage(
+ "cellwise_error of cells that are not locally owned need to be zero!"));
+ }
+ }
const MPI_Comm comm = tria.get_mpi_communicator();
if (selected)
{
-#ifdef DEBUG
- // make sure that all selected base elements are indeed
- // interpolatory
-
- if (const auto fe_system =
- dynamic_cast<const FESystem<dim> *>(&fe[fe_index]))
+ if constexpr (running_in_debug_mode())
{
- const auto index =
- fe_system->system_to_base_index(i).first.first;
- Assert(fe_system->base_element(index)
- .has_generalized_support_points(),
- ExcMessage("The component mask supplied to "
- "VectorTools::interpolate selects a "
- "non-interpolatory element."));
+ // make sure that all selected base elements are indeed
+ // interpolatory
+
+ if (const auto fe_system =
+ dynamic_cast<const FESystem<dim> *>(&fe[fe_index]))
+ {
+ const auto index =
+ fe_system->system_to_base_index(i).first.first;
+ Assert(fe_system->base_element(index)
+ .has_generalized_support_points(),
+ ExcMessage("The component mask supplied to "
+ "VectorTools::interpolate selects a "
+ "non-interpolatory element."));
+ }
}
-#endif
// Add local values to the global vectors
if (needs_expensive_algorithm[fe_index])
const DiagonalMatrix<decltype(rhs)> &preconditioner =
use_lumped ? *mass_matrix.get_matrix_lumped_diagonal_inverse() :
*mass_matrix.get_matrix_diagonal_inverse();
-#ifdef DEBUG
- // Make sure we picked a valid preconditioner
- const auto &diagonal = preconditioner.get_vector();
- for (const Number &v : diagonal)
- Assert(v > 0.0, ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Make sure we picked a valid preconditioner
+ const auto &diagonal = preconditioner.get_vector();
+ for (const Number &v : diagonal)
+ Assert(v > 0.0, ExcInternalError());
+ }
cg.solve(mass_matrix, work_result, rhs, preconditioner);
work_result += inhomogeneities;
Assert(patches.size() > 0, DataOutBase::ExcNoPatches());
// We currently don't support writing mixed meshes:
-# ifdef DEBUG
- for (const auto &patch : patches)
- Assert(patch.reference_cell == patches[0].reference_cell,
- ExcNotImplemented());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &patch : patches)
+ Assert(patch.reference_cell == patches[0].reference_cell,
+ ExcNotImplemented());
+ }
XDMFEntry entry(h5_mesh_filename,
h5_solution_filename,
void
DataOutInterface<dim, spacedim>::validate_dataset_names() const
{
-#ifdef DEBUG
- {
- // Check that names for datasets are only used once. This is somewhat
- // complicated, because vector ranges might have a name or not.
- std::set<std::string> all_names;
-
- const std::vector<
- std::tuple<unsigned int,
- unsigned int,
- std::string,
- DataComponentInterpretation::DataComponentInterpretation>>
- ranges = this->get_nonscalar_data_ranges();
- const std::vector<std::string> data_names = this->get_dataset_names();
- const unsigned int n_data_sets = data_names.size();
- std::vector<bool> data_set_written(n_data_sets, false);
-
- for (const auto &range : ranges)
+ if constexpr (running_in_debug_mode())
+ {
{
- const std::string &name = std::get<2>(range);
- if (!name.empty())
+ // Check that names for datasets are only used once. This is somewhat
+ // complicated, because vector ranges might have a name or not.
+ std::set<std::string> all_names;
+
+ const std::vector<
+ std::tuple<unsigned int,
+ unsigned int,
+ std::string,
+ DataComponentInterpretation::DataComponentInterpretation>>
+ ranges = this->get_nonscalar_data_ranges();
+ const std::vector<std::string> data_names = this->get_dataset_names();
+ const unsigned int n_data_sets = data_names.size();
+ std::vector<bool> data_set_written(n_data_sets, false);
+
+ for (const auto &range : ranges)
{
- Assert(all_names.find(name) == all_names.end(),
- ExcMessage(
- "Error: names of fields in DataOut need to be unique, "
- "but '" +
- name + "' is used more than once."));
- all_names.insert(name);
- for (unsigned int i = std::get<0>(range); i <= std::get<1>(range);
- ++i)
- data_set_written[i] = true;
+ const std::string &name = std::get<2>(range);
+ if (!name.empty())
+ {
+ Assert(all_names.find(name) == all_names.end(),
+ ExcMessage(
+ "Error: names of fields in DataOut need to be unique, "
+ "but '" +
+ name + "' is used more than once."));
+ all_names.insert(name);
+ for (unsigned int i = std::get<0>(range);
+ i <= std::get<1>(range);
+ ++i)
+ data_set_written[i] = true;
+ }
}
- }
- for (unsigned int data_set = 0; data_set < n_data_sets; ++data_set)
- if (data_set_written[data_set] == false)
- {
- const std::string &name = data_names[data_set];
- Assert(all_names.find(name) == all_names.end(),
- ExcMessage(
- "Error: names of fields in DataOut need to be unique, "
- "but '" +
- name + "' is used more than once."));
- all_names.insert(name);
- }
- }
-#endif
+ for (unsigned int data_set = 0; data_set < n_data_sets; ++data_set)
+ if (data_set_written[data_set] == false)
+ {
+ const std::string &name = data_names[data_set];
+ Assert(all_names.find(name) == all_names.end(),
+ ExcMessage(
+ "Error: names of fields in DataOut need to be unique, "
+ "but '" +
+ name + "' is used more than once."));
+ all_names.insert(name);
+ }
+ }
+ }
}
// derived class. Note that the name may be mangled, so it need not be the
// clear-text class name. However, you can obtain the latter by running the
// c++filt program over the output.
-#ifdef DEBUG
-
- // If there are still active pointers, show a message and kill the program.
- // However, under some circumstances, this is not so desirable. For example,
- // in code like this:
- //
- // Triangulation tria;
- // DoFHandler *dh = new DoFHandler(tria);
- // ...some function that throws an exception
- //
- // the exception will lead to the destruction of the triangulation, but since
- // the dof_handler is on the heap it will not be destroyed. This will trigger
- // an assertion in the triangulation. If we kill the program at this point, we
- // will never be able to learn what caused the problem. In this situation,
- // just display a message and continue the program.
- if (counter != 0)
+ if constexpr (running_in_debug_mode())
{
- if (std::uncaught_exceptions() == 0)
+ // If there are still active pointers, show a message and kill the
+ // program. However, under some circumstances, this is not so desirable.
+ // For example, in code like this:
+ //
+ // Triangulation tria;
+ // DoFHandler *dh = new DoFHandler(tria);
+ // ...some function that throws an exception
+ //
+ // the exception will lead to the destruction of the triangulation, but
+ // since the dof_handler is on the heap it will not be destroyed. This
+ // will trigger an assertion in the triangulation. If we kill the program
+ // at this point, we will never be able to learn what caused the problem.
+ // In this situation, just display a message and continue the program.
+ if (counter != 0)
{
- std::string infostring;
- for (const auto &map_entry : counter_map)
+ if (std::uncaught_exceptions() == 0)
{
- if (map_entry.second > 0)
- infostring +=
- "\n from Subscriber " + std::string(map_entry.first);
+ std::string infostring;
+ for (const auto &map_entry : counter_map)
+ {
+ if (map_entry.second > 0)
+ infostring +=
+ "\n from Subscriber " + std::string(map_entry.first);
+ }
+
+ if (infostring.empty())
+ infostring = "<none>";
+
+ AssertNothrow(counter == 0,
+ ExcInUse(counter.load(),
+ object_info->name(),
+ infostring));
+ }
+ else
+ {
+ std::cerr
+ << "---------------------------------------------------------"
+ << std::endl
+ << "An object pointed to by a ObserverPointer is being destroyed."
+ << std::endl
+ << "Under normal circumstances, this would abort the program."
+ << std::endl
+ << "However, another exception is being processed at the"
+ << std::endl
+ << "moment, so the program will continue to run to allow"
+ << std::endl
+ << "this exception to be processed." << std::endl
+ << "---------------------------------------------------------"
+ << std::endl;
}
-
- if (infostring.empty())
- infostring = "<none>";
-
- AssertNothrow(counter == 0,
- ExcInUse(counter.load(),
- object_info->name(),
- infostring));
- }
- else
- {
- std::cerr
- << "---------------------------------------------------------"
- << std::endl
- << "An object pointed to by a ObserverPointer is being destroyed."
- << std::endl
- << "Under normal circumstances, this would abort the program."
- << std::endl
- << "However, another exception is being processed at the"
- << std::endl
- << "moment, so the program will continue to run to allow"
- << std::endl
- << "this exception to be processed." << std::endl
- << "---------------------------------------------------------"
- << std::endl;
}
}
-#endif
}
Assert(next_index == n_elements(), ExcInternalError());
}
-#ifdef DEBUG
- // A consistency check: We should only ever have added indices
- // that are within the range of the index set. Instead of doing
- // this in every one of the many functions that add indices,
- // do this in the current, central location
- for (const auto &range : ranges)
- Assert((range.begin < index_space_size) && (range.end <= index_space_size),
- ExcMessage("In the process of creating the current IndexSet "
- "object, you added indices beyond the size of the index "
- "space. Specifically, you added elements that form the "
- "range [" +
- std::to_string(range.begin) + "," +
- std::to_string(range.end) +
- "), but the size of the index space is only " +
- std::to_string(index_space_size) + "."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // A consistency check: We should only ever have added indices
+ // that are within the range of the index set. Instead of doing
+ // this in every one of the many functions that add indices,
+ // do this in the current, central location
+ for (const auto &range : ranges)
+ Assert((range.begin < index_space_size) &&
+ (range.end <= index_space_size),
+ ExcMessage(
+ "In the process of creating the current IndexSet "
+ "object, you added indices beyond the size of the index "
+ "space. Specifically, you added elements that form the "
+ "range [" +
+ std::to_string(range.begin) + "," + std::to_string(range.end) +
+ "), but the size of the index space is only " +
+ std::to_string(index_space_size) + "."));
+ }
}
start += n_block_indices;
}
-#ifdef DEBUG
- types::global_dof_index sum = 0;
- for (const auto &partition : partitioned)
+ if constexpr (running_in_debug_mode())
{
- sum += partition.size();
+ types::global_dof_index sum = 0;
+ for (const auto &partition : partitioned)
+ {
+ sum += partition.size();
+ }
+ AssertDimension(sum, this->size());
}
- AssertDimension(sum, this->size());
-#endif
return partitioned;
}
compress();
(void)communicator;
-# ifdef DEBUG
- if (!overlapping)
+ if constexpr (running_in_debug_mode())
{
- const size_type n_global_elements =
- Utilities::MPI::sum(n_elements(), communicator);
- Assert(n_global_elements == size(),
- ExcMessage("You are trying to create an Tpetra::Map object "
- "that partitions elements of an index set "
- "between processors. However, the union of the "
- "index sets on different processors does not "
- "contain all indices exactly once: the sum of "
- "the number of entries the various processors "
- "want to store locally is " +
- std::to_string(n_global_elements) +
- " whereas the total size of the object to be "
- "allocated is " +
- std::to_string(size()) +
- ". In other words, there are "
- "either indices that are not spoken for "
- "by any processor, or there are indices that are "
- "claimed by multiple processors."));
+ if (!overlapping)
+ {
+ const size_type n_global_elements =
+ Utilities::MPI::sum(n_elements(), communicator);
+ Assert(n_global_elements == size(),
+ ExcMessage("You are trying to create an Tpetra::Map object "
+ "that partitions elements of an index set "
+ "between processors. However, the union of the "
+ "index sets on different processors does not "
+ "contain all indices exactly once: the sum of "
+ "the number of entries the various processors "
+ "want to store locally is " +
+ std::to_string(n_global_elements) +
+ " whereas the total size of the object to be "
+ "allocated is " +
+ std::to_string(size()) +
+ ". In other words, there are "
+ "either indices that are not spoken for "
+ "by any processor, or there are indices that are "
+ "claimed by multiple processors."));
+ }
}
-# endif
// Find out if the IndexSet is ascending and 1:1. This corresponds to a
// linear Tpetra::Map. Overlapping IndexSets are never 1:1.
compress();
(void)communicator;
-# ifdef DEBUG
- if (!overlapping)
+ if constexpr (running_in_debug_mode())
{
- const size_type n_global_elements =
- Utilities::MPI::sum(n_elements(), communicator);
- Assert(n_global_elements == size(),
- ExcMessage("You are trying to create an Epetra_Map object "
- "that partitions elements of an index set "
- "between processors. However, the union of the "
- "index sets on different processors does not "
- "contain all indices exactly once: the sum of "
- "the number of entries the various processors "
- "want to store locally is " +
- std::to_string(n_global_elements) +
- " whereas the total size of the object to be "
- "allocated is " +
- std::to_string(size()) +
- ". In other words, there are "
- "either indices that are not spoken for "
- "by any processor, or there are indices that are "
- "claimed by multiple processors."));
+ if (!overlapping)
+ {
+ const size_type n_global_elements =
+ Utilities::MPI::sum(n_elements(), communicator);
+ Assert(n_global_elements == size(),
+ ExcMessage("You are trying to create an Epetra_Map object "
+ "that partitions elements of an index set "
+ "between processors. However, the union of the "
+ "index sets on different processors does not "
+ "contain all indices exactly once: the sum of "
+ "the number of entries the various processors "
+ "want to store locally is " +
+ std::to_string(n_global_elements) +
+ " whereas the total size of the object to be "
+ "allocated is " +
+ std::to_string(size()) +
+ ". In other words, there are "
+ "either indices that are not spoken for "
+ "by any processor, or there are indices that are "
+ "claimed by multiple processors."));
+ }
}
-# endif
// Find out if the IndexSet is ascending and 1:1. This corresponds to a
// linear EpetraMap. Overlapping IndexSets are never 1:1.
ierr = MPI_Type_commit(&result);
AssertThrowMPI(ierr);
-# ifdef DEBUG
- MPI_Count size64;
- ierr = MPI_Type_size_x(result, &size64);
- AssertThrowMPI(ierr);
+ if constexpr (running_in_debug_mode())
+ {
+ MPI_Count size64;
+ ierr = MPI_Type_size_x(result, &size64);
+ AssertThrowMPI(ierr);
- Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
-# endif
+ Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
+ }
// Now put the new data type into a std::unique_ptr with a custom
// deleter. We call the std::unique_ptr constructor that as first
const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
-# ifdef DEBUG
- for (const unsigned int destination : destinations)
- AssertIndexRange(destination, n_procs);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const unsigned int destination : destinations)
+ AssertIndexRange(destination, n_procs);
+ }
// Have a little function that checks if destinations provided
// to the current process are unique. The way it does this is
const unsigned int n_procs =
Utilities::MPI::n_mpi_processes(mpi_comm);
-# ifdef DEBUG
- for (const unsigned int destination : destinations)
+ if constexpr (running_in_debug_mode())
{
- AssertIndexRange(destination, n_procs);
- Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
- ExcMessage(
- "There is no point in communicating with ourselves."));
+ for (const unsigned int destination : destinations)
+ {
+ AssertIndexRange(destination, n_procs);
+ Assert(
+ destination != Utilities::MPI::this_mpi_process(mpi_comm),
+ ExcMessage(
+ "There is no point in communicating with ourselves."));
+ }
}
-# endif
// Calculate the number of messages to send to each process
std::vector<unsigned int> dest_vector(n_procs);
// this function in release mode to avoid touching data
// unnecessarily (and overwrite the smaller pieces), as the
// locally owned part comes first
-#ifdef DEBUG
- data.resize(size, invalid_index_value);
- std::fill(data.begin() + start, data.begin() + end, value);
-#else
- data.resize(size, value);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ data.resize(size, invalid_index_value);
+ std::fill(data.begin() + start,
+ data.begin() + end,
+ value);
+ }
+ else
+ {
+ data.resize(size, value);
+ }
}
else
{
Assert(next_index > index_range.first, ExcInternalError());
-#ifdef DEBUG
- // make sure that the owner is the same on the current
- // interval
- for (types::global_dof_index i = index_range.first + 1;
- i < next_index;
- ++i)
- AssertDimension(owner, dof_to_dict_rank(i));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // make sure that the owner is the same on the current
+ // interval
+ for (types::global_dof_index i = index_range.first + 1;
+ i < next_index;
+ ++i)
+ AssertDimension(owner, dof_to_dict_rank(i));
+ }
// add the interval, either to the local range or into a
// buffer to be sent to another processor
// process message: loop over all intervals
for (auto interval : buffer)
{
-# ifdef DEBUG
- for (types::global_dof_index i = interval.first;
- i < interval.second;
- i++)
- Assert(actually_owning_ranks.entry_has_been_set(
- i - local_range.first) == false,
- ExcInternalError());
- Assert(interval.first >= local_range.first &&
- interval.first < local_range.second,
- ExcInternalError());
- Assert(interval.second > local_range.first &&
- interval.second <= local_range.second,
- ExcInternalError());
-# endif
+ if constexpr (library_build_mode ==
+ LibraryBuildMode::debug)
+ {
+ for (types::global_dof_index i = interval.first;
+ i < interval.second;
+ i++)
+ Assert(actually_owning_ranks.entry_has_been_set(
+ i - local_range.first) == false,
+ ExcInternalError());
+ Assert(interval.first >= local_range.first &&
+ interval.first < local_range.second,
+ ExcInternalError());
+ Assert(interval.second > local_range.first &&
+ interval.second <= local_range.second,
+ ExcInternalError());
+ }
actually_owning_ranks.fill(interval.first -
local_range.first,
// process message: loop over all intervals
for (auto interval : request)
{
-# ifdef DEBUG
- for (types::global_dof_index i = interval.first;
- i < interval.second;
- i++)
- Assert(
- actually_owning_ranks.entry_has_been_set(
- i - local_range.first) == false,
- ExcMessage(
- "Multiple processes seem to own the same global index. "
- "A possible reason is that the sets of locally owned "
- "indices are not distinct."));
- Assert(interval.first < interval.second,
- ExcInternalError());
- Assert(
- local_range.first <= interval.first &&
- interval.second <= local_range.second,
- ExcMessage(
- "The specified interval is not handled by the current process."));
-# endif
+ if constexpr (library_build_mode ==
+ LibraryBuildMode::debug)
+ {
+ for (types::global_dof_index i = interval.first;
+ i < interval.second;
+ i++)
+ Assert(
+ actually_owning_ranks.entry_has_been_set(
+ i - local_range.first) == false,
+ ExcMessage(
+ "Multiple processes seem to own the same global index. "
+ "A possible reason is that the sets of locally owned "
+ "indices are not distinct."));
+ Assert(interval.first < interval.second,
+ ExcInternalError());
+ Assert(
+ local_range.first <= interval.first &&
+ interval.second <= local_range.second,
+ ExcMessage(
+ "The specified interval is not handled by the current process."));
+ }
actually_owning_ranks.fill(interval.first -
local_range.first,
interval.second -
}
-# ifdef DEBUG
- for (const auto &it : requested_indices)
+ if constexpr (running_in_debug_mode())
{
- IndexSet copy_set = it.second;
- copy_set.subtract_set(owned_indices);
- Assert(copy_set.n_elements() == 0,
- ExcInternalError(
- "The indices requested from the current "
- "MPI rank should be locally owned here!"));
+ for (const auto &it : requested_indices)
+ {
+ IndexSet copy_set = it.second;
+ copy_set.subtract_set(owned_indices);
+ Assert(copy_set.n_elements() == 0,
+ ExcInternalError(
+ "The indices requested from the current "
+ "MPI rank should be locally owned here!"));
+ }
}
-# endif
#endif // DEAL_II_WITH_MPI
local_range_data.first);
}
-# ifdef DEBUG
-
- // simple check: the number of processors to which we want to send
- // ghosts and the processors to which ghosts reference should be the
- // same
- AssertDimension(
- Utilities::MPI::sum(import_targets_data.size(), communicator),
- Utilities::MPI::sum(ghost_targets_data.size(), communicator));
-
- // simple check: the number of indices to exchange should match from the
- // ghost indices side and the import indices side
- AssertDimension(Utilities::MPI::sum(n_import_indices_data, communicator),
- Utilities::MPI::sum(n_ghost_indices_data, communicator));
-
- // expensive check that the communication channel is sane -> do a ghost
- // exchange step and see whether the ghost indices sent to us by other
- // processes (ghost_indices) are the same as we hold locally
- // (ghost_indices_ref).
- const std::vector<types::global_dof_index> ghost_indices_ref =
- ghost_indices_data.get_index_vector();
- AssertDimension(ghost_indices_ref.size(), n_ghost_indices());
- std::vector<types::global_dof_index> indices_to_send(n_import_indices());
- std::vector<types::global_dof_index> ghost_indices(n_ghost_indices());
-
- const std::vector<types::global_dof_index> my_indices =
- locally_owned_range_data.get_index_vector();
- std::vector<MPI_Request> requests;
- n_ghost_indices_in_larger_set = n_ghost_indices_data;
- export_to_ghosted_array_start(127,
- ArrayView<const types::global_dof_index>(
- my_indices.data(), my_indices.size()),
- make_array_view(indices_to_send),
- make_array_view(ghost_indices),
- requests);
- export_to_ghosted_array_finish(make_array_view(ghost_indices), requests);
- int flag = 0;
- const int ierr = MPI_Testall(requests.size(),
- requests.data(),
- &flag,
- MPI_STATUSES_IGNORE);
- AssertThrowMPI(ierr);
- Assert(flag == 1,
- ExcMessage(
- "MPI found unfinished requests. Check communication setup"));
-
- for (unsigned int i = 0; i < ghost_indices.size(); ++i)
- AssertDimension(ghost_indices[i], ghost_indices_ref[i]);
+ if constexpr (running_in_debug_mode())
+ {
+ // simple check: the number of processors to which we want to send
+ // ghosts and the processors to which ghosts reference should be the
+ // same
+ AssertDimension(
+ Utilities::MPI::sum(import_targets_data.size(), communicator),
+ Utilities::MPI::sum(ghost_targets_data.size(), communicator));
+
+ // simple check: the number of indices to exchange should match from
+ // the ghost indices side and the import indices side
+ AssertDimension(
+ Utilities::MPI::sum(n_import_indices_data, communicator),
+ Utilities::MPI::sum(n_ghost_indices_data, communicator));
+
+ // expensive check that the communication channel is sane -> do a
+ // ghost exchange step and see whether the ghost indices sent to us by
+ // other processes (ghost_indices) are the same as we hold locally
+ // (ghost_indices_ref).
+ const std::vector<types::global_dof_index> ghost_indices_ref =
+ ghost_indices_data.get_index_vector();
+ AssertDimension(ghost_indices_ref.size(), n_ghost_indices());
+ std::vector<types::global_dof_index> indices_to_send(
+ n_import_indices());
+ std::vector<types::global_dof_index> ghost_indices(n_ghost_indices());
+
+ const std::vector<types::global_dof_index> my_indices =
+ locally_owned_range_data.get_index_vector();
+ std::vector<MPI_Request> requests;
+ n_ghost_indices_in_larger_set = n_ghost_indices_data;
+ export_to_ghosted_array_start(
+ 127,
+ ArrayView<const types::global_dof_index>(my_indices.data(),
+ my_indices.size()),
+ make_array_view(indices_to_send),
+ make_array_view(ghost_indices),
+ requests);
+ export_to_ghosted_array_finish(make_array_view(ghost_indices),
+ requests);
+ int flag = 0;
+ const int ierr = MPI_Testall(requests.size(),
+ requests.data(),
+ &flag,
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ Assert(flag == 1,
+ ExcMessage(
+ "MPI found unfinished requests. Check communication setup"));
-# endif
+ for (unsigned int i = 0; i < ghost_indices.size(); ++i)
+ AssertDimension(ghost_indices[i], ghost_indices_ref[i]);
+ }
# endif // #ifdef DEAL_II_WITH_MPI
AssertThrowMPI(ierr);
// Double check that the process with rank 0 in subgroup is active:
-# ifdef DEBUG
- if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
- Utilities::MPI::this_mpi_process(
- mpi_communicator_inactive_with_root) == 0)
- Assert(mpi_process_is_active, ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
+ Utilities::MPI::this_mpi_process(
+ mpi_communicator_inactive_with_root) == 0)
+ Assert(mpi_process_is_active, ExcInternalError());
+ }
}
++present_index;
}
-#ifdef DEBUG
- if (size() > 0)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0;
- for (unsigned int i = 0; i < size(); ++i)
- sum += weights[i];
- // we cannot guarantee the sum of weights to be exactly one, but it should
- // be near that.
- Assert((sum > 0.999999) && (sum < 1.000001), ExcInternalError());
+ if (size() > 0)
+ {
+ double sum = 0;
+ for (unsigned int i = 0; i < size(); ++i)
+ sum += weights[i];
+ // we cannot guarantee the sum of weights to be exactly one, but it
+ // should be near that.
+ Assert((sum > 0.999999) && (sum < 1.000001), ExcInternalError());
+ }
}
-#endif
if (is_tensor_product_flag)
{
++present_index;
}
-# ifdef DEBUG
- if (size() > 0)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0;
- for (unsigned int i = 0; i < size(); ++i)
- sum += weights[i];
- // we cannot guarantee the sum of weights to be exactly one, but it should
- // be near that.
- Assert((sum > 0.999999) && (sum < 1.000001), ExcInternalError());
+ if (size() > 0)
+ {
+ double sum = 0;
+ for (unsigned int i = 0; i < size(); ++i)
+ sum += weights[i];
+ // we cannot guarantee the sum of weights to be exactly one, but it
+ // should be near that.
+ Assert((sum > 0.999999) && (sum < 1.000001), ExcInternalError());
+ }
}
-# endif
}
else if (std::abs(i[0] - 1.0) < 1e-12)
i[0] = 1.0;
-#ifdef DEBUG
- double sum_of_weights = 0;
- for (unsigned int i = 0; i < this->size(); ++i)
- sum_of_weights += this->weight(i);
- Assert(std::fabs(sum_of_weights - 1) < 1e-13, ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ double sum_of_weights = 0;
+ for (unsigned int i = 0; i < this->size(); ++i)
+ sum_of_weights += this->weight(i);
+ Assert(std::fabs(sum_of_weights - 1) < 1e-13, ExcInternalError());
+ }
}
std::map<std::string, Column>::const_iterator col_iter = columns.begin();
unsigned int n = col_iter->second.entries.size();
-#ifdef DEBUG
- std::string first_name = col_iter->first;
- for (++col_iter; col_iter != columns.end(); ++col_iter)
- Assert(col_iter->second.entries.size() == n,
- ExcWrongNumberOfDataEntries(
- col_iter->first, col_iter->second.entries.size(), first_name, n));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ std::string first_name = col_iter->first;
+ for (++col_iter; col_iter != columns.end(); ++col_iter)
+ Assert(col_iter->second.entries.size() == n,
+ ExcWrongNumberOfDataEntries(col_iter->first,
+ col_iter->second.entries.size(),
+ first_name,
+ n));
+ }
return n;
}
}
else
{
-#ifdef DEBUG
- unsigned int n_poly = 1;
- for (unsigned int d = 0; d < dim; ++d)
- n_poly *= polynomials[d].size();
- Assert(i < n_poly, ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ unsigned int n_poly = 1;
+ for (unsigned int d = 0; d < dim; ++d)
+ n_poly *= polynomials[d].size();
+ Assert(i < n_poly, ExcInternalError());
+ }
if (dim == 0)
{
ExcMessage(
"Cannot register symbols once the optimizer is finalized."));
-# ifdef DEBUG
- // Ensure that all of the keys in the map are actually symbolic
- // in nature
- for (const auto &entry : substitution_map)
+ if constexpr (running_in_debug_mode())
{
- const SD::Expression &symbol = entry.first;
- Assert(SymEngine::is_a<SymEngine::Symbol>(*(symbol.get_RCP())),
- ExcMessage("Key entry in map is not a symbol."));
+ // Ensure that all of the keys in the map are actually symbolic
+ // in nature
+ for (const auto &entry : substitution_map)
+ {
+ const SD::Expression &symbol = entry.first;
+ Assert(SymEngine::is_a<SymEngine::Symbol>(*(symbol.get_RCP())),
+ ExcMessage("Key entry in map is not a symbol."));
+ }
}
-# endif
// Merge the two maps, in the process ensuring that there is no
// duplication of symbols
independent_variables_symbols.insert(substitution_map.begin(),
// Check that the registered symbol map and the input map are compatible
// with one another
-# ifdef DEBUG
- const SD::types::symbol_vector symbol_sub_vec =
- Utilities::extract_symbols(substitution_map);
- const SD::types::symbol_vector symbol_vec =
- Utilities::extract_symbols(independent_variables_symbols);
- Assert(symbol_sub_vec.size() == symbol_vec.size(),
- ExcDimensionMismatch(symbol_sub_vec.size(), symbol_vec.size()));
- for (unsigned int i = 0; i < symbol_sub_vec.size(); ++i)
+ if constexpr (running_in_debug_mode())
{
- Assert(numbers::values_are_equal(symbol_sub_vec[i], symbol_vec[i]),
- ExcMessage(
- "The input substitution map is either incomplete, or does "
- "not match that used in the register_symbols() call."));
+ const SD::types::symbol_vector symbol_sub_vec =
+ Utilities::extract_symbols(substitution_map);
+ const SD::types::symbol_vector symbol_vec =
+ Utilities::extract_symbols(independent_variables_symbols);
+ Assert(symbol_sub_vec.size() == symbol_vec.size(),
+ ExcDimensionMismatch(symbol_sub_vec.size(),
+ symbol_vec.size()));
+ for (unsigned int i = 0; i < symbol_sub_vec.size(); ++i)
+ {
+ Assert(
+ numbers::values_are_equal(symbol_sub_vec[i], symbol_vec[i]),
+ ExcMessage(
+ "The input substitution map is either incomplete, or does "
+ "not match that used in the register_symbols() call."));
+ }
}
-# endif
// Extract the values from the substitution map, and use the other
// function
dependent_variables_output.reserve(n_dependent_variables() + 1);
const bool entry_registered =
(map_dep_expr_vec_entry.find(func) != map_dep_expr_vec_entry.end());
-# ifdef DEBUG
- if (entry_registered == true &&
- is_valid_nonunique_dependent_variable(func) == false)
- Assert(entry_registered,
- ExcMessage("Function has already been registered."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (entry_registered == true &&
+ is_valid_nonunique_dependent_variable(func) == false)
+ Assert(entry_registered,
+ ExcMessage("Function has already been registered."));
+ }
if (entry_registered == false)
{
dependent_variables_functions.push_back(func);
{
const bool entry_registered =
(map_dep_expr_vec_entry.find(func) != map_dep_expr_vec_entry.end());
-# ifdef DEBUG
- if (entry_registered == true &&
- is_valid_nonunique_dependent_variable(func) == false)
- Assert(entry_registered,
- ExcMessage("Function has already been registered."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (entry_registered == true &&
+ is_valid_nonunique_dependent_variable(func) == false)
+ Assert(entry_registered,
+ ExcMessage("Function has already been registered."));
+ }
if (entry_registered == false)
{
dependent_variables_functions.push_back(func);
break;
case CellStatus::children_will_be_coarsened:
-#ifdef DEBUG
- for (const auto &child : cell->child_iterators())
- Assert(child->is_active() && child->coarsen_flag_set(),
- typename dealii::Triangulation<
- dim>::ExcInconsistentCoarseningFlags());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &child : cell->child_iterators())
+ Assert(child->is_active() && child->coarsen_flag_set(),
+ typename dealii::Triangulation<
+ dim>::ExcInconsistentCoarseningFlags());
+ }
fe_index = dealii::internal::hp::DoFHandlerImplementation::
dominated_future_fe_on_children<dim, spacedim>(cell);
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
void Triangulation<dim, spacedim>::partition()
{
-# ifdef DEBUG
- // Check that all meshes are the same (or at least have the same
- // total number of active cells):
- const unsigned int max_active_cells =
- Utilities::MPI::max(this->n_active_cells(),
- this->get_mpi_communicator());
- Assert(
- max_active_cells == this->n_active_cells(),
- ExcMessage(
- "A parallel::shared::Triangulation needs to be refined in the same "
- "way on all processors, but the participating processors don't "
- "agree on the number of active cells."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Check that all meshes are the same (or at least have the same
+ // total number of active cells):
+ const unsigned int max_active_cells =
+ Utilities::MPI::max(this->n_active_cells(),
+ this->get_mpi_communicator());
+ Assert(
+ max_active_cells == this->n_active_cells(),
+ ExcMessage(
+ "A parallel::shared::Triangulation needs to be refined in the same "
+ "way on all processors, but the participating processors don't "
+ "agree on the number of active cells."));
+ }
auto partition_settings = (partition_zoltan | partition_metis |
partition_zorder | partition_custom_signal) &
true_subdomain_ids_of_cells[index] = cell->subdomain_id();
}
-# ifdef DEBUG
- {
- // Assert that each cell is owned by a processor
- const unsigned int n_my_cells = std::count_if(
- this->begin_active(),
- typename Triangulation<dim, spacedim>::active_cell_iterator(
- this->end()),
- [](const auto &i) { return (i.is_locally_owned()); });
-
- const unsigned int total_cells =
- Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
- Assert(total_cells == this->n_active_cells(),
- ExcMessage("Not all cells are assigned to a processor."));
- }
-
- // If running with multigrid, assert that each level
- // cell is owned by a processor
- if (settings & construct_multigrid_hierarchy)
+ if constexpr (running_in_debug_mode())
{
- const unsigned int n_my_cells =
- std::count_if(this->begin(), this->end(), [](const auto &i) {
- return (i.is_locally_owned_on_level());
- });
+ {
+ // Assert that each cell is owned by a processor
+ const unsigned int n_my_cells = std::count_if(
+ this->begin_active(),
+ typename Triangulation<dim, spacedim>::active_cell_iterator(
+ this->end()),
+ [](const auto &i) { return (i.is_locally_owned()); });
+
+ const unsigned int total_cells =
+ Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
+ Assert(total_cells == this->n_active_cells(),
+ ExcMessage("Not all cells are assigned to a processor."));
+ }
+
+ // If running with multigrid, assert that each level
+ // cell is owned by a processor
+ if (settings & construct_multigrid_hierarchy)
+ {
+ const unsigned int n_my_cells =
+ std::count_if(this->begin(), this->end(), [](const auto &i) {
+ return (i.is_locally_owned_on_level());
+ });
- const unsigned int total_cells =
- Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
- Assert(total_cells == this->n_cells(),
- ExcMessage("Not all cells are assigned to a processor."));
+ const unsigned int total_cells =
+ Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
+ Assert(total_cells == this->n_cells(),
+ ExcMessage("Not all cells are assigned to a processor."));
+ }
}
-# endif
}
}
}
-# ifdef DEBUG
- // There must not be any chains!
- for (unsigned int i = 0; i < topological_vertex_numbering.size(); ++i)
+ if constexpr (running_in_debug_mode())
{
- const unsigned int j = topological_vertex_numbering[i];
- Assert(j == i || topological_vertex_numbering[j] == j,
- ExcMessage("Got inconclusive constraints with chain: " +
- std::to_string(i) + " vs " + std::to_string(j) +
- " which should be equal to " +
- std::to_string(topological_vertex_numbering[j])));
+ // There must not be any chains!
+ for (unsigned int i = 0; i < topological_vertex_numbering.size();
+ ++i)
+ {
+ const unsigned int j = topological_vertex_numbering[i];
+ Assert(j == i || topological_vertex_numbering[j] == j,
+ ExcMessage(
+ "Got inconclusive constraints with chain: " +
+ std::to_string(i) + " vs " + std::to_string(j) +
+ " which should be equal to " +
+ std::to_string(topological_vertex_numbering[j])));
+ }
}
-# endif
// this code is replicated from grid/tria.cc but using an indirection
}
while (mesh_changed);
-# ifdef DEBUG
- // check if correct number of ghosts is created
- unsigned int num_ghosts = 0;
-
- for (const auto &cell : this->active_cell_iterators())
+ if constexpr (running_in_debug_mode())
{
- if (cell->subdomain_id() != this->my_subdomain &&
- cell->subdomain_id() != numbers::artificial_subdomain_id)
- ++num_ghosts;
- }
+ // check if correct number of ghosts is created
+ unsigned int num_ghosts = 0;
- Assert(num_ghosts == parallel_ghost->ghosts.elem_count,
- ExcInternalError());
-# endif
+ for (const auto &cell : this->active_cell_iterators())
+ {
+ if (cell->subdomain_id() != this->my_subdomain &&
+ cell->subdomain_id() != numbers::artificial_subdomain_id)
+ ++num_ghosts;
+ }
+
+ Assert(num_ghosts == parallel_ghost->ghosts.elem_count,
+ ExcInternalError());
+ }
-# ifdef DEBUG
- // check that our local copy has exactly as many cells as the p4est
- // original (at least if we are on only one processor); for parallel
- // computations, we want to check that we have at least as many as p4est
- // stores locally (in the future we should check that we have exactly as
- // many non-artificial cells as parallel_forest->local_num_quadrants)
- {
- const unsigned int total_local_cells = this->n_active_cells();
+ if constexpr (running_in_debug_mode())
+ {
+ // check that our local copy has exactly as many cells as the p4est
+ // original (at least if we are on only one processor); for parallel
+ // computations, we want to check that we have at least as many as
+ // p4est stores locally (in the future we should check that we have
+ // exactly as many non-artificial cells as
+ // parallel_forest->local_num_quadrants)
+ {
+ const unsigned int total_local_cells = this->n_active_cells();
- if (Utilities::MPI::n_mpi_processes(this->mpi_communicator) == 1)
- {
- Assert(static_cast<unsigned int>(
- parallel_forest->local_num_quadrants) == total_local_cells,
- ExcInternalError());
- }
- else
- {
+ if (Utilities::MPI::n_mpi_processes(this->mpi_communicator) == 1)
+ {
+ Assert(static_cast<unsigned int>(
+ parallel_forest->local_num_quadrants) ==
+ total_local_cells,
+ ExcInternalError());
+ }
+ else
+ {
+ Assert(static_cast<unsigned int>(
+ parallel_forest->local_num_quadrants) <=
+ total_local_cells,
+ ExcInternalError());
+ }
+
+ // count the number of owned, active cells and compare with p4est.
+ unsigned int n_owned = 0;
+ for (const auto &cell : this->active_cell_iterators())
+ {
+ if (cell->subdomain_id() == this->my_subdomain)
+ ++n_owned;
+ }
+
Assert(static_cast<unsigned int>(
- parallel_forest->local_num_quadrants) <= total_local_cells,
+ parallel_forest->local_num_quadrants) == n_owned,
ExcInternalError());
}
-
- // count the number of owned, active cells and compare with p4est.
- unsigned int n_owned = 0;
- for (const auto &cell : this->active_cell_iterators())
- {
- if (cell->subdomain_id() == this->my_subdomain)
- ++n_owned;
- }
-
- Assert(static_cast<unsigned int>(
- parallel_forest->local_num_quadrants) == n_owned,
- ExcInternalError());
- }
-# endif
+ }
this->smooth_grid = save_smooth;
void Triangulation<dim, spacedim>::execute_coarsening_and_refinement()
{
// do not allow anisotropic refinement
-# ifdef DEBUG
- for (const auto &cell : this->active_cell_iterators())
- if (cell->is_locally_owned() && cell->refine_flag_set())
- Assert(cell->refine_flag_set() ==
- RefinementPossibilities<dim>::isotropic_refinement,
- ExcMessage(
- "This class does not support anisotropic refinement"));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &cell : this->active_cell_iterators())
+ if (cell->is_locally_owned() && cell->refine_flag_set())
+ Assert(cell->refine_flag_set() ==
+ RefinementPossibilities<dim>::isotropic_refinement,
+ ExcMessage(
+ "This class does not support anisotropic refinement"));
+ }
// safety check: p4est has an upper limit on the level of a cell
this->data_serializer.unpack_cell_status(this->local_cell_relations);
}
-# ifdef DEBUG
- // Check that we know the level subdomain ids of all our neighbors. This
- // also involves coarser cells that share a vertex if they are active.
- //
- // Example (M= my, O=other):
- // *------*
- // | |
- // | O |
- // | |
- // *---*---*------*
- // | M | M |
- // *---*---*
- // | | M |
- // *---*---*
- // ^- the parent can be owned by somebody else, so O is not a neighbor
- // one level coarser
- if (settings & construct_multigrid_hierarchy)
+ if constexpr (running_in_debug_mode())
{
- for (unsigned int lvl = 0; lvl < this->n_global_levels(); ++lvl)
+ // Check that we know the level subdomain ids of all our neighbors.
+ // This also involves coarser cells that share a vertex if they are
+ // active.
+ //
+ // Example (M= my, O=other):
+ // *------*
+ // | |
+ // | O |
+ // | |
+ // *---*---*------*
+ // | M | M |
+ // *---*---*
+ // | | M |
+ // *---*---*
+ // ^- the parent can be owned by somebody else, so O is not a
+ // neighbor
+ // one level coarser
+ if (settings & construct_multigrid_hierarchy)
{
- std::vector<bool> active_verts =
- this->mark_locally_active_vertices_on_level(lvl);
-
- const unsigned int maybe_coarser_lvl =
- (lvl > 0) ? (lvl - 1) : lvl;
- typename Triangulation<dim, spacedim>::cell_iterator
- cell = this->begin(maybe_coarser_lvl),
- endc = this->end(lvl);
- for (; cell != endc; ++cell)
- if (cell->level() == static_cast<int>(lvl) || cell->is_active())
- {
- const bool is_level_artificial =
- (cell->level_subdomain_id() ==
- numbers::artificial_subdomain_id);
- bool need_to_know = false;
- for (const unsigned int vertex :
- GeometryInfo<dim>::vertex_indices())
- if (active_verts[cell->vertex_index(vertex)])
- {
- need_to_know = true;
- break;
- }
+ for (unsigned int lvl = 0; lvl < this->n_global_levels(); ++lvl)
+ {
+ std::vector<bool> active_verts =
+ this->mark_locally_active_vertices_on_level(lvl);
+
+ const unsigned int maybe_coarser_lvl =
+ (lvl > 0) ? (lvl - 1) : lvl;
+ typename Triangulation<dim, spacedim>::cell_iterator
+ cell = this->begin(maybe_coarser_lvl),
+ endc = this->end(lvl);
+ for (; cell != endc; ++cell)
+ if (cell->level() == static_cast<int>(lvl) ||
+ cell->is_active())
+ {
+ const bool is_level_artificial =
+ (cell->level_subdomain_id() ==
+ numbers::artificial_subdomain_id);
+ bool need_to_know = false;
+ for (const unsigned int vertex :
+ GeometryInfo<dim>::vertex_indices())
+ if (active_verts[cell->vertex_index(vertex)])
+ {
+ need_to_know = true;
+ break;
+ }
- Assert(
- !need_to_know || !is_level_artificial,
- ExcMessage(
- "Internal error: the owner of cell" +
- cell->id().to_string() +
- " is unknown even though it is needed for geometric multigrid."));
- }
+ Assert(
+ !need_to_know || !is_level_artificial,
+ ExcMessage(
+ "Internal error: the owner of cell" +
+ cell->id().to_string() +
+ " is unknown even though it is needed for geometric multigrid."));
+ }
+ }
}
}
-# endif
this->update_periodic_face_map();
this->update_number_cache();
DEAL_II_CXX20_REQUIRES((concepts::is_valid_dim_spacedim<dim, spacedim>))
void Triangulation<dim, spacedim>::repartition()
{
-# ifdef DEBUG
- for (const auto &cell : this->active_cell_iterators())
- if (cell->is_locally_owned())
- Assert(
- !cell->refine_flag_set() && !cell->coarsen_flag_set(),
- ExcMessage(
- "Error: There shouldn't be any cells flagged for coarsening/refinement when calling repartition()."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &cell : this->active_cell_iterators())
+ if (cell->is_locally_owned())
+ Assert(
+ !cell->refine_flag_set() && !cell->coarsen_flag_set(),
+ ExcMessage(
+ "Error: There shouldn't be any cells flagged for coarsening/refinement when calling repartition()."));
+ }
// signal that repartitioning is going to happen
this->signals.pre_distributed_repartition();
this->number_cache.level_ghost_owners.insert(
cell->level_subdomain_id());
-# ifdef DEBUG
- // Check that level_ghost_owners is symmetric by sending a message
- // to everyone
- {
- int ierr = MPI_Barrier(this->mpi_communicator);
- AssertThrowMPI(ierr);
-
- const int mpi_tag = Utilities::MPI::internal::Tags::
- triangulation_base_fill_level_ghost_owners;
-
- // important: preallocate to avoid (re)allocation:
- std::vector<MPI_Request> requests(
- this->number_cache.level_ghost_owners.size());
- unsigned int dummy = 0;
- unsigned int req_counter = 0;
-
- for (const auto &it : this->number_cache.level_ghost_owners)
- {
- ierr = MPI_Isend(&dummy,
- 1,
- MPI_UNSIGNED,
- it,
- mpi_tag,
- this->mpi_communicator,
- &requests[req_counter]);
- AssertThrowMPI(ierr);
- ++req_counter;
- }
-
- for (const auto &it : this->number_cache.level_ghost_owners)
+ if constexpr (running_in_debug_mode())
+ {
+ // Check that level_ghost_owners is symmetric by sending a message
+ // to everyone
{
- unsigned int dummy;
- ierr = MPI_Recv(&dummy,
- 1,
- MPI_UNSIGNED,
- it,
- mpi_tag,
- this->mpi_communicator,
- MPI_STATUS_IGNORE);
+ int ierr = MPI_Barrier(this->mpi_communicator);
AssertThrowMPI(ierr);
- }
- if (requests.size() > 0)
- {
- ierr = MPI_Waitall(requests.size(),
- requests.data(),
- MPI_STATUSES_IGNORE);
+ const int mpi_tag = Utilities::MPI::internal::Tags::
+ triangulation_base_fill_level_ghost_owners;
+
+ // important: preallocate to avoid (re)allocation:
+ std::vector<MPI_Request> requests(
+ this->number_cache.level_ghost_owners.size());
+ unsigned int dummy = 0;
+ unsigned int req_counter = 0;
+
+ for (const auto &it : this->number_cache.level_ghost_owners)
+ {
+ ierr = MPI_Isend(&dummy,
+ 1,
+ MPI_UNSIGNED,
+ it,
+ mpi_tag,
+ this->mpi_communicator,
+ &requests[req_counter]);
+ AssertThrowMPI(ierr);
+ ++req_counter;
+ }
+
+ for (const auto &it : this->number_cache.level_ghost_owners)
+ {
+ unsigned int dummy;
+ ierr = MPI_Recv(&dummy,
+ 1,
+ MPI_UNSIGNED,
+ it,
+ mpi_tag,
+ this->mpi_communicator,
+ MPI_STATUS_IGNORE);
+ AssertThrowMPI(ierr);
+ }
+
+ if (requests.size() > 0)
+ {
+ ierr = MPI_Waitall(requests.size(),
+ requests.data(),
+ MPI_STATUSES_IGNORE);
+ AssertThrowMPI(ierr);
+ }
+
+ ierr = MPI_Barrier(this->mpi_communicator);
AssertThrowMPI(ierr);
}
-
- ierr = MPI_Barrier(this->mpi_communicator);
- AssertThrowMPI(ierr);
- }
-# endif
+ }
Assert(this->number_cache.level_ghost_owners.size() <
Utilities::MPI::n_mpi_processes(this->mpi_communicator),
const std::vector<bool> &vertex_locally_moved)
{
AssertDimension(vertex_locally_moved.size(), this->n_vertices());
-#ifdef DEBUG
- {
- const std::vector<bool> locally_owned_vertices =
- dealii::GridTools::get_locally_owned_vertices(*this);
- for (unsigned int i = 0; i < locally_owned_vertices.size(); ++i)
- Assert((vertex_locally_moved[i] == false) ||
- (locally_owned_vertices[i] == true),
- ExcMessage("The vertex_locally_moved argument must not "
- "contain vertices that are not locally owned"));
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ const std::vector<bool> locally_owned_vertices =
+ dealii::GridTools::get_locally_owned_vertices(*this);
+ for (unsigned int i = 0; i < locally_owned_vertices.size(); ++i)
+ Assert((vertex_locally_moved[i] == false) ||
+ (locally_owned_vertices[i] == true),
+ ExcMessage("The vertex_locally_moved argument must not "
+ "contain vertices that are not locally owned"));
+ }
+ }
Point<spacedim> invalid_point;
for (unsigned int d = 0; d < spacedim; ++d)
return a;
}
- /**
- * Check if a vector is a deal.II vector.
- */
- template <typename VectorType>
- constexpr bool is_dealii_vector =
- std::is_same_v<VectorType,
- dealii::Vector<typename VectorType::value_type>> ||
- std::is_same_v<VectorType,
- dealii::BlockVector<typename VectorType::value_type>> ||
- std::is_same_v<VectorType,
- dealii::LinearAlgebra::distributed::Vector<
- typename VectorType::value_type>> ||
- std::is_same_v<VectorType,
- dealii::LinearAlgebra::distributed::BlockVector<
- typename VectorType::value_type>>;
/**
* Helper functions that call set_ghost_state() if the vector supports this
}
#endif
+ namespace
+ {
+ // Test whether a vector is a deal.II vector
+ template <typename VectorType>
+ constexpr bool is_dealii_vector =
+ std::is_same_v<VectorType,
+ dealii::Vector<typename VectorType::value_type>> ||
+ std::is_same_v<VectorType,
+ dealii::BlockVector<typename VectorType::value_type>> ||
+ std::is_same_v<VectorType,
+ dealii::LinearAlgebra::distributed::Vector<
+ typename VectorType::value_type>> ||
+ std::is_same_v<VectorType,
+ dealii::LinearAlgebra::distributed::BlockVector<
+ typename VectorType::value_type>>;
+ } // namespace
+
+
/**
* Helper function that sets the values on a cell, but also checks if the
* new values are similar to the old values.
{
(void)perform_check;
-#ifdef DEBUG
- if (perform_check && is_dealii_vector<OutputVector>)
+ if constexpr (running_in_debug_mode())
{
- const bool old_ghost_state = values.has_ghost_elements();
- set_ghost_state(values, true);
-
- Vector<number> local_values_old(cell.get_fe().n_dofs_per_cell());
- cell.get_dof_values(values, local_values_old);
-
- for (unsigned int i = 0; i < cell.get_fe().n_dofs_per_cell(); ++i)
+ if (perform_check && is_dealii_vector<OutputVector>)
{
- // a check consistent with the one in
- // Utilities::MPI::Partitioner::import_from_ghosted_array_finish()
- Assert(local_values_old[i] == number() ||
- get_abs(local_values_old[i] - local_values[i]) <=
- get_abs(local_values_old[i] + local_values[i]) *
- 100000. *
- std::numeric_limits<typename numbers::NumberTraits<
- number>::real_type>::epsilon(),
- ExcNonMatchingElementsSetDofValuesByInterpolation<number>(
- local_values[i], local_values_old[i]));
+ const bool old_ghost_state = values.has_ghost_elements();
+ set_ghost_state(values, true);
+
+ Vector<number> local_values_old(cell.get_fe().n_dofs_per_cell());
+ cell.get_dof_values(values, local_values_old);
+
+ for (unsigned int i = 0; i < cell.get_fe().n_dofs_per_cell(); ++i)
+ {
+ // a check consistent with the one in
+ // Utilities::MPI::Partitioner::import_from_ghosted_array_finish()
+ Assert(
+ local_values_old[i] == number() ||
+ get_abs(local_values_old[i] - local_values[i]) <=
+ get_abs(local_values_old[i] + local_values[i]) * 100000. *
+ std::numeric_limits<typename numbers::NumberTraits<
+ number>::real_type>::epsilon(),
+ ExcNonMatchingElementsSetDofValuesByInterpolation<number>(
+ local_values[i], local_values_old[i]));
+ }
+
+ set_ghost_state(values, old_ghost_state);
}
-
- set_ghost_state(values, old_ghost_state);
}
-#endif
cell.set_dof_values(local_values, values);
}
vertex_fe_association[cell->active_fe_index()]
[cell->vertex_index(v)] = true;
- // in debug mode, make sure that each vertex is associated
- // with at least one FE (note that except for unused
- // vertices, all vertices are actually active). this is of
- // course only true for vertices that are part of either
- // ghost or locally owned cells
-#ifdef DEBUG
- for (unsigned int v = 0; v < dof_handler.tria->n_vertices(); ++v)
- if (locally_used_vertices[v] == true)
- if (dof_handler.tria->vertex_used(v) == true)
- {
- unsigned int fe = 0;
- for (; fe < dof_handler.fe_collection.size(); ++fe)
- if (vertex_fe_association[fe][v] == true)
- break;
- Assert(fe != dof_handler.fe_collection.size(),
- ExcInternalError());
- }
-#endif
+ // in debug mode, make sure that each vertex is associated
+ // with at least one FE (note that except for unused
+ // vertices, all vertices are actually active). this is of
+ // course only true for vertices that are part of either
+ // ghost or locally owned cells
+ if constexpr (running_in_debug_mode())
+ {
+ for (unsigned int v = 0; v < dof_handler.tria->n_vertices(); ++v)
+ if (locally_used_vertices[v] == true)
+ if (dof_handler.tria->vertex_used(v) == true)
+ {
+ unsigned int fe = 0;
+ for (; fe < dof_handler.fe_collection.size(); ++fe)
+ if (vertex_fe_association[fe][v] == true)
+ break;
+ Assert(fe != dof_handler.fe_collection.size(),
+ ExcInternalError());
+ }
+ }
const unsigned int d = 0;
const unsigned int l = 0;
// based on the 'least dominant finite element' of its
// children. Consider the childrens' hypothetical future
// index when they have been flagged for p-refinement.
-#ifdef DEBUG
- for (const auto &child : parent->child_iterators())
- Assert(child->is_active() &&
- child->coarsen_flag_set(),
- typename dealii::Triangulation<
- dim>::ExcInconsistentCoarseningFlags());
-#endif
+ if constexpr (library_build_mode ==
+ LibraryBuildMode::debug)
+ {
+ for (const auto &child : parent->child_iterators())
+ Assert(child->is_active() &&
+ child->coarsen_flag_set(),
+ typename dealii::Triangulation<
+ dim>::ExcInconsistentCoarseningFlags());
+ }
const types::fe_index fe_index = dealii::internal::hp::
DoFHandlerImplementation::Implementation::
ExcMessage("The given hp::FECollection contains more finite elements "
"than the DoFHandler can cover with active FE indices."));
-# ifdef DEBUG
- // make sure that the provided FE collection is large enough to
- // cover all FE indices presently in use on the mesh
- if ((hp_cell_active_fe_indices.size() > 0) &&
- (hp_cell_future_fe_indices.size() > 0))
+ if constexpr (running_in_debug_mode())
{
- Assert(hp_capability_enabled, ExcInternalError());
-
- for (const auto &cell :
- this->active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
+ // make sure that the provided FE collection is large enough to
+ // cover all FE indices presently in use on the mesh
+ if ((hp_cell_active_fe_indices.size() > 0) &&
+ (hp_cell_future_fe_indices.size() > 0))
{
- Assert(cell->active_fe_index() < ff.size(),
- ExcInvalidFEIndex(cell->active_fe_index(), ff.size()));
- Assert(cell->future_fe_index() < ff.size(),
- ExcInvalidFEIndex(cell->future_fe_index(), ff.size()));
+ Assert(hp_capability_enabled, ExcInternalError());
+
+ for (const auto &cell : this->active_cell_iterators() |
+ IteratorFilters::LocallyOwnedCell())
+ {
+ Assert(cell->active_fe_index() < ff.size(),
+ ExcInvalidFEIndex(cell->active_fe_index(), ff.size()));
+ Assert(cell->future_fe_index() < ff.size(),
+ ExcInvalidFEIndex(cell->future_fe_index(), ff.size()));
+ }
}
}
-# endif
//
// register the new finite element collection
AssertDimension(new_numbers.size(), this->n_locally_owned_dofs());
-# ifdef DEBUG
- // assert that the new indices are consecutively numbered if we are
- // working on a single processor. this doesn't need to
- // hold in the case of a parallel mesh since we map the interval
- // [0...n_dofs()) into itself but only globally, not on each processor
- if (this->n_locally_owned_dofs() == this->n_dofs())
+ if constexpr (running_in_debug_mode())
{
- std::vector<types::global_dof_index> tmp(new_numbers);
- std::sort(tmp.begin(), tmp.end());
- std::vector<types::global_dof_index>::const_iterator p = tmp.begin();
- types::global_dof_index i = 0;
- for (; p != tmp.end(); ++p, ++i)
- Assert(*p == i, ExcNewNumbersNotConsecutive(i));
+ // assert that the new indices are consecutively numbered if we are
+ // working on a single processor. this doesn't need to
+ // hold in the case of a parallel mesh since we map the interval
+ // [0...n_dofs()) into itself but only globally, not on each processor
+ if (this->n_locally_owned_dofs() == this->n_dofs())
+ {
+ std::vector<types::global_dof_index> tmp(new_numbers);
+ std::sort(tmp.begin(), tmp.end());
+ std::vector<types::global_dof_index>::const_iterator p =
+ tmp.begin();
+ types::global_dof_index i = 0;
+ for (; p != tmp.end(); ++p, ++i)
+ Assert(*p == i, ExcNewNumbersNotConsecutive(i));
+ }
+ else
+ for (const auto new_number : new_numbers)
+ Assert(
+ new_number < this->n_dofs(),
+ ExcMessage(
+ "New DoF index is not less than the total number of dofs."));
}
- else
- for (const auto new_number : new_numbers)
- Assert(new_number < this->n_dofs(),
- ExcMessage(
- "New DoF index is not less than the total number of dofs."));
-# endif
// uncompress the internal storage scheme of dofs on cells so that
// we can access dofs in turns. uncompress in parallel, starting
ExcMessage(
"You need to distribute DoFs before you can renumber them."));
-# ifdef DEBUG
- if (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim> *>(
- &*this->tria) != nullptr)
- {
- Assert(new_numbers.size() == this->n_dofs() ||
- new_numbers.size() == this->n_locally_owned_dofs(),
- ExcMessage("Incorrect size of the input array."));
- }
- else if (dynamic_cast<
- const parallel::DistributedTriangulationBase<dim, spacedim> *>(
- &*this->tria) != nullptr)
- {
- AssertDimension(new_numbers.size(), this->n_locally_owned_dofs());
- }
- else
+ if constexpr (running_in_debug_mode())
{
- AssertDimension(new_numbers.size(), this->n_dofs());
- }
+ if (dynamic_cast<const parallel::shared::Triangulation<dim, spacedim>
+ *>(&*this->tria) != nullptr)
+ {
+ Assert(new_numbers.size() == this->n_dofs() ||
+ new_numbers.size() == this->n_locally_owned_dofs(),
+ ExcMessage("Incorrect size of the input array."));
+ }
+ else if (dynamic_cast<
+ const parallel::DistributedTriangulationBase<dim, spacedim>
+ *>(&*this->tria) != nullptr)
+ {
+ AssertDimension(new_numbers.size(), this->n_locally_owned_dofs());
+ }
+ else
+ {
+ AssertDimension(new_numbers.size(), this->n_dofs());
+ }
- // assert that the new indices are consecutively numbered if we are
- // working on a single processor. this doesn't need to
- // hold in the case of a parallel mesh since we map the interval
- // [0...n_dofs()) into itself but only globally, not on each processor
- if (this->n_locally_owned_dofs() == this->n_dofs())
- {
- std::vector<types::global_dof_index> tmp(new_numbers);
- std::sort(tmp.begin(), tmp.end());
- std::vector<types::global_dof_index>::const_iterator p = tmp.begin();
- types::global_dof_index i = 0;
- for (; p != tmp.end(); ++p, ++i)
- Assert(*p == i, ExcNewNumbersNotConsecutive(i));
+ // assert that the new indices are consecutively numbered if we are
+ // working on a single processor. this doesn't need to
+ // hold in the case of a parallel mesh since we map the interval
+ // [0...n_dofs()) into itself but only globally, not on each processor
+ if (this->n_locally_owned_dofs() == this->n_dofs())
+ {
+ std::vector<types::global_dof_index> tmp(new_numbers);
+ std::sort(tmp.begin(), tmp.end());
+ std::vector<types::global_dof_index>::const_iterator p =
+ tmp.begin();
+ types::global_dof_index i = 0;
+ for (; p != tmp.end(); ++p, ++i)
+ Assert(*p == i, ExcNewNumbersNotConsecutive(i));
+ }
+ else
+ for (const auto new_number : new_numbers)
+ Assert(
+ new_number < this->n_dofs(),
+ ExcMessage(
+ "New DoF index is not less than the total number of dofs."));
}
- else
- for (const auto new_number : new_numbers)
- Assert(new_number < this->n_dofs(),
- ExcMessage(
- "New DoF index is not less than the total number of dofs."));
-# endif
this->number_cache = this->policy->renumber_dofs(new_numbers);
}
AssertDimension(new_numbers.size(),
this->locally_owned_mg_dofs(level).n_elements());
-# ifdef DEBUG
- // assert that the new indices are consecutively numbered if we are working
- // on a single processor. this doesn't need to hold in the case of a
- // parallel mesh since we map the interval [0...n_dofs(level)) into itself
- // but only globally, not on each processor
- if (this->n_locally_owned_dofs() == this->n_dofs())
+ if constexpr (running_in_debug_mode())
{
- std::vector<types::global_dof_index> tmp(new_numbers);
- std::sort(tmp.begin(), tmp.end());
- std::vector<types::global_dof_index>::const_iterator p = tmp.begin();
- types::global_dof_index i = 0;
- for (; p != tmp.end(); ++p, ++i)
- Assert(*p == i, ExcNewNumbersNotConsecutive(i));
+ // assert that the new indices are consecutively numbered if we are
+ // working on a single processor. this doesn't need to hold in the case of
+ // a parallel mesh since we map the interval [0...n_dofs(level)) into
+ // itself but only globally, not on each processor
+ if (this->n_locally_owned_dofs() == this->n_dofs())
+ {
+ std::vector<types::global_dof_index> tmp(new_numbers);
+ std::sort(tmp.begin(), tmp.end());
+ std::vector<types::global_dof_index>::const_iterator p = tmp.begin();
+ types::global_dof_index i = 0;
+ for (; p != tmp.end(); ++p, ++i)
+ Assert(*p == i, ExcNewNumbersNotConsecutive(i));
+ }
+ else
+ for (const auto new_number : new_numbers)
+ Assert(new_number < this->n_dofs(level),
+ ExcMessage(
+ "New DoF index is not less than the total number of dofs."));
}
- else
- for (const auto new_number : new_numbers)
- Assert(new_number < this->n_dofs(level),
- ExcMessage(
- "New DoF index is not less than the total number of dofs."));
-# endif
this->mg_number_cache[level] =
this->policy->renumber_mg_dofs(level, new_numbers);
DEAL_II_NOT_IMPLEMENTED();
}
-#ifdef DEBUG
- // Each entry of 'complete_identities' contains a set of
- // pairs (fe_index,dof_index). Because we put in exactly
- // two fe indices, we know that each entry of the outer
- // vector needs to contain a set of exactly two such
- // pairs. Check this. While there, also check that
- // the two entries actually reference fe_index_1 and
- // fe_index_2:
- for (const auto &complete_identity : complete_identities)
+ if constexpr (running_in_debug_mode())
{
- Assert(complete_identity.size() == 2, ExcInternalError());
- Assert(complete_identity.find(fe_index_1) !=
- complete_identity.end(),
- ExcInternalError());
- Assert(complete_identity.find(fe_index_2) !=
- complete_identity.end(),
- ExcInternalError());
+ // Each entry of 'complete_identities' contains a set of
+ // pairs (fe_index,dof_index). Because we put in exactly
+ // two fe indices, we know that each entry of the outer
+ // vector needs to contain a set of exactly two such
+ // pairs. Check this. While there, also check that
+ // the two entries actually reference fe_index_1 and
+ // fe_index_2:
+ for (const auto &complete_identity : complete_identities)
+ {
+ Assert(complete_identity.size() == 2, ExcInternalError());
+ Assert(complete_identity.find(fe_index_1) !=
+ complete_identity.end(),
+ ExcInternalError());
+ Assert(complete_identity.find(fe_index_2) !=
+ complete_identity.end(),
+ ExcInternalError());
+ }
}
-#endif
// Next reduce these sets of two pairs by removing the
// fe_index parts: We know which indices we have. But we
reduced_identities.emplace_back(dof_index_1, dof_index_2);
}
-#ifdef DEBUG
- // double check whether the newly created entries make
- // any sense at all
- for (const auto &identity : reduced_identities)
+ if constexpr (running_in_debug_mode())
{
- Assert(identity.first <
- fes[fe_index_1]
- .template n_dofs_per_object<structdim>(face_no),
- ExcInternalError());
- Assert(identity.second <
- fes[fe_index_2]
- .template n_dofs_per_object<structdim>(face_no),
- ExcInternalError());
+ // double check whether the newly created entries make
+ // any sense at all
+ for (const auto &identity : reduced_identities)
+ {
+ Assert(
+ identity.first <
+ fes[fe_index_1].template n_dofs_per_object<structdim>(
+ face_no),
+ ExcInternalError());
+ Assert(
+ identity.second <
+ fes[fe_index_2].template n_dofs_per_object<structdim>(
+ face_no),
+ ExcInternalError());
+ }
}
-#endif
identities =
std::make_unique<DoFIdentities>(std::move(reduced_identities));
// at this point, we must have taken care of the data transfer
// on all cells we had previously marked. verify this
-# ifdef DEBUG
- for (const auto &cell : dof_handler->active_cell_iterators())
- Assert(cell_marked[cell->active_cell_index()] == false,
- ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &cell : dof_handler->active_cell_iterators())
+ Assert(cell_marked[cell->active_cell_index()] == false,
+ ExcInternalError());
+ }
}
-# ifdef DEBUG
- // check that we are really done
- {
- std::vector<dealii::types::global_dof_index> local_dof_indices;
+ if constexpr (running_in_debug_mode())
+ {
+ // check that we are really done
+ {
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
- for (const auto &cell : dof_handler->active_cell_iterators())
- if (!cell->is_artificial())
- {
- local_dof_indices.resize(cell->get_fe().n_dofs_per_cell());
- cell->get_dof_indices(local_dof_indices);
- if (local_dof_indices.end() !=
- std::find(local_dof_indices.begin(),
- local_dof_indices.end(),
- numbers::invalid_dof_index))
+ for (const auto &cell : dof_handler->active_cell_iterators())
+ if (!cell->is_artificial())
{
- if (cell->is_ghost())
+ local_dof_indices.resize(cell->get_fe().n_dofs_per_cell());
+ cell->get_dof_indices(local_dof_indices);
+ if (local_dof_indices.end() !=
+ std::find(local_dof_indices.begin(),
+ local_dof_indices.end(),
+ numbers::invalid_dof_index))
{
- Assert(false,
- ExcMessage(
- "A ghost cell ended up with incomplete "
- "DoF index information. This should not "
- "have happened!"));
- }
- else
- {
- Assert(
- false,
- ExcMessage(
- "A locally owned cell ended up with incomplete "
- "DoF index information. This should not "
- "have happened!"));
+ if (cell->is_ghost())
+ {
+ Assert(false,
+ ExcMessage(
+ "A ghost cell ended up with incomplete "
+ "DoF index information. This should not "
+ "have happened!"));
+ }
+ else
+ {
+ Assert(
+ false,
+ ExcMessage(
+ "A locally owned cell ended up with incomplete "
+ "DoF index information. This should not "
+ "have happened!"));
+ }
}
}
- }
- }
-# endif // DEBUG
+ }
+ } // DEBUG
return number_cache;
-#endif // DEAL_II_WITH_MPI
+#endif // DEAL_II_WITH_MPI
}
// in Phase 1.
communicate_mg_ghost_cells(*dof_handler, cell_marked);
-# ifdef DEBUG
- // make sure we have finished all cells:
- for (const auto &cell : dof_handler->cell_iterators())
- Assert(cell_marked[cell->level()][cell->index()] == false,
- ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // make sure we have finished all cells:
+ for (const auto &cell : dof_handler->cell_iterators())
+ Assert(cell_marked[cell->level()][cell->index()] == false,
+ ExcInternalError());
+ }
}
-# ifdef DEBUG
- // check that we are really done
- {
- std::vector<dealii::types::global_dof_index> local_dof_indices;
- for (const auto &cell : dof_handler->cell_iterators())
- if (cell->level_subdomain_id() !=
- dealii::numbers::artificial_subdomain_id)
- {
- local_dof_indices.resize(cell->get_fe().n_dofs_per_cell());
- cell->get_mg_dof_indices(local_dof_indices);
- if (local_dof_indices.end() !=
- std::find(local_dof_indices.begin(),
- local_dof_indices.end(),
- numbers::invalid_dof_index))
+ if constexpr (running_in_debug_mode())
+ {
+ // check that we are really done
+ {
+ std::vector<dealii::types::global_dof_index> local_dof_indices;
+ for (const auto &cell : dof_handler->cell_iterators())
+ if (cell->level_subdomain_id() !=
+ dealii::numbers::artificial_subdomain_id)
{
- Assert(false, ExcMessage("not all DoFs got distributed!"));
+ local_dof_indices.resize(cell->get_fe().n_dofs_per_cell());
+ cell->get_mg_dof_indices(local_dof_indices);
+ if (local_dof_indices.end() !=
+ std::find(local_dof_indices.begin(),
+ local_dof_indices.end(),
+ numbers::invalid_dof_index))
+ {
+ Assert(false,
+ ExcMessage("not all DoFs got distributed!"));
+ }
}
- }
- }
-# endif // DEBUG
+ }
+ } // DEBUG
return number_caches;
std::vector<unsigned int>(),
false);
-#ifdef DEBUG
- {
- const std::vector<types::global_dof_index> dofs_per_component =
- DoFTools::count_dofs_per_fe_component(dof_handler, true);
- for (const auto &dpc : dofs_per_component)
- Assert(dofs_per_component[0] == dpc, ExcNotImplemented());
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ const std::vector<types::global_dof_index> dofs_per_component =
+ DoFTools::count_dofs_per_fe_component(dof_handler, true);
+ for (const auto &dpc : dofs_per_component)
+ Assert(dofs_per_component[0] == dpc, ExcNotImplemented());
+ }
+ }
const unsigned int n_components =
dof_handler.get_fe_collection().n_components();
Assert(dof_handler.n_dofs() % n_components == 0, ExcInternalError());
component_dofs.end());
}
component_renumbered_dofs.compress();
-#ifdef DEBUG
- {
- IndexSet component_renumbered_dofs2(dof_handler.n_dofs());
- component_renumbered_dofs2.add_indices(component_renumbering.begin(),
- component_renumbering.end());
- Assert(component_renumbered_dofs2 == component_renumbered_dofs,
- ExcInternalError());
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ IndexSet component_renumbered_dofs2(dof_handler.n_dofs());
+ component_renumbered_dofs2.add_indices(component_renumbering.begin(),
+ component_renumbering.end());
+ Assert(component_renumbered_dofs2 == component_renumbered_dofs,
+ ExcInternalError());
+ }
+ }
for (const FiniteElement<dim, spacedim> &fe :
dof_handler.get_fe_collection())
{
cell->active_fe_index()) == true,
ExcInternalError());
-#ifdef DEBUG
- for (unsigned int c = 0; c < cell->face(f)->n_children(); ++c)
+ if constexpr (running_in_debug_mode())
{
- if (cell->neighbor_child_on_subface(f, c)->is_artificial())
- continue;
+ for (unsigned int c = 0; c < cell->face(f)->n_children(); ++c)
+ {
+ if (cell->neighbor_child_on_subface(f, c)
+ ->is_artificial())
+ continue;
- Assert(cell->face(f)->child(c)->n_active_fe_indices() == 1,
- ExcInternalError());
+ Assert(cell->face(f)->child(c)->n_active_fe_indices() ==
+ 1,
+ ExcInternalError());
- Assert(cell->face(f)->child(c)->fe_index_is_active(
- cell->active_fe_index()) == true,
- ExcNotImplemented());
- }
-#endif // DEBUG
+ Assert(cell->face(f)->child(c)->fe_index_is_active(
+ cell->active_fe_index()) == true,
+ ExcNotImplemented());
+ }
+ } // DEBUG
// Ok, start up the work:
const FiniteElement<dim, spacedim> &fe = cell->get_fe();
cell->active_fe_index()) == true,
ExcInternalError());
-#ifdef DEBUG
-
- for (unsigned int c = 0; c < cell->face(face)->n_children(); ++c)
+ if constexpr (running_in_debug_mode())
{
- if (cell->neighbor_child_on_subface(face, c)->is_artificial())
- continue;
+ for (unsigned int c = 0; c < cell->face(face)->n_children();
+ ++c)
+ {
+ if (cell->neighbor_child_on_subface(face, c)
+ ->is_artificial())
+ continue;
- AssertDimension(
- cell->face(face)->child(c)->n_active_fe_indices(), 1);
+ AssertDimension(
+ cell->face(face)->child(c)->n_active_fe_indices(), 1);
- Assert(cell->face(face)->child(c)->fe_index_is_active(
- cell->active_fe_index()) == true,
- ExcNotImplemented());
+ Assert(cell->face(face)->child(c)->fe_index_is_active(
+ cell->active_fe_index()) == true,
+ ExcNotImplemented());
+
+ for (unsigned int e = 0;
+ e < GeometryInfo<dim>::vertices_per_face;
+ ++e)
+ {
+ Assert(cell->face(face)
+ ->child(c)
+ ->line(e)
+ ->n_active_fe_indices() == 1,
+ ExcNotImplemented());
+
+ Assert(cell->face(face)
+ ->child(c)
+ ->line(e)
+ ->fe_index_is_active(
+ cell->active_fe_index()) == true,
+ ExcNotImplemented());
+ }
+ }
for (unsigned int e = 0;
e < GeometryInfo<dim>::vertices_per_face;
++e)
{
- Assert(cell->face(face)
- ->child(c)
- ->line(e)
- ->n_active_fe_indices() == 1,
+ Assert(cell->face(face)->line(e)->n_active_fe_indices() ==
+ 1,
ExcNotImplemented());
- Assert(
- cell->face(face)->child(c)->line(e)->fe_index_is_active(
- cell->active_fe_index()) == true,
- ExcNotImplemented());
+ Assert(cell->face(face)->line(e)->fe_index_is_active(
+ cell->active_fe_index()) == true,
+ ExcNotImplemented());
}
- }
-
- for (unsigned int e = 0; e < GeometryInfo<dim>::vertices_per_face;
- ++e)
- {
- Assert(cell->face(face)->line(e)->n_active_fe_indices() == 1,
- ExcNotImplemented());
-
- Assert(cell->face(face)->line(e)->fe_index_is_active(
- cell->active_fe_index()) == true,
- ExcNotImplemented());
- }
-#endif // DEBUG
+ } // DEBUG
// Ok, start up the work
const FiniteElement<dim, spacedim> &fe = cell->get_fe();
static const int dim = FaceIterator::AccessorType::dimension;
static const int spacedim = FaceIterator::AccessorType::space_dimension;
-#ifdef DEBUG
- const auto [orientation, rotation, flip] =
- ::dealii::internal::split_face_orientation(combined_orientation);
-
- Assert((dim != 1) ||
- (orientation == true && flip == false && rotation == false),
- ExcMessage("The supplied orientation (orientation, rotation, flip) "
- "is invalid for 1d"));
-
- Assert((dim != 2) || (flip == false && rotation == false),
- ExcMessage("The supplied orientation (orientation, rotation, flip) "
- "is invalid for 2d"));
-
- Assert(face_1 != face_2,
- ExcMessage("face_1 and face_2 are equal! Cannot constrain DoFs "
- "on the very same face"));
-
- Assert(face_1->at_boundary() && face_2->at_boundary(),
- ExcMessage("Faces for periodicity constraints must be on the "
- "boundary"));
+ if constexpr (running_in_debug_mode())
+ {
+ const auto [orientation, rotation, flip] =
+ ::dealii::internal::split_face_orientation(combined_orientation);
- Assert(matrix.m() == matrix.n(),
- ExcMessage("The supplied (rotation or interpolation) matrix must "
- "be a square matrix"));
+ Assert((dim != 1) ||
+ (orientation == true && flip == false && rotation == false),
+ ExcMessage(
+ "The supplied orientation (orientation, rotation, flip) "
+ "is invalid for 1d"));
- Assert(first_vector_components.empty() || matrix.m() == spacedim,
- ExcMessage("first_vector_components is nonempty, so matrix must "
- "be a rotation matrix exactly of size spacedim"));
+ Assert((dim != 2) || (flip == false && rotation == false),
+ ExcMessage(
+ "The supplied orientation (orientation, rotation, flip) "
+ "is invalid for 2d"));
- if (!face_1->has_children())
- {
- // TODO: the implementation makes the assumption that all faces have the
- // same number of dofs
- AssertDimension(
- face_1->get_fe(face_1->nth_active_fe_index(0)).n_unique_faces(), 1);
- const unsigned int face_no = 0;
+ Assert(face_1 != face_2,
+ ExcMessage("face_1 and face_2 are equal! Cannot constrain DoFs "
+ "on the very same face"));
- Assert(face_1->n_active_fe_indices() == 1, ExcInternalError());
- const unsigned int n_dofs_per_face =
- face_1->get_fe(face_1->nth_active_fe_index(0))
- .n_dofs_per_face(face_no);
+ Assert(face_1->at_boundary() && face_2->at_boundary(),
+ ExcMessage("Faces for periodicity constraints must be on the "
+ "boundary"));
- Assert(matrix.m() == 0 ||
- (first_vector_components.empty() &&
- matrix.m() == n_dofs_per_face) ||
- (!first_vector_components.empty() && matrix.m() == spacedim),
+ Assert(matrix.m() == matrix.n(),
ExcMessage(
- "The matrix must have either size 0 or spacedim "
- "(if first_vector_components is nonempty) "
- "or the size must be equal to the # of DoFs on the face "
- "(if first_vector_components is empty)."));
- }
+ "The supplied (rotation or interpolation) matrix must "
+ "be a square matrix"));
- if (!face_2->has_children())
- {
- // TODO: the implementation makes the assumption that all faces have the
- // same number of dofs
- AssertDimension(
- face_2->get_fe(face_2->nth_active_fe_index(0)).n_unique_faces(), 1);
- const unsigned int face_no = 0;
+ Assert(first_vector_components.empty() || matrix.m() == spacedim,
+ ExcMessage("first_vector_components is nonempty, so matrix must "
+ "be a rotation matrix exactly of size spacedim"));
- Assert(face_2->n_active_fe_indices() == 1, ExcInternalError());
- const unsigned int n_dofs_per_face =
- face_2->get_fe(face_2->nth_active_fe_index(0))
- .n_dofs_per_face(face_no);
+ if (!face_1->has_children())
+ {
+ // TODO: the implementation makes the assumption that all faces have
+ // the same number of dofs
+ AssertDimension(
+ face_1->get_fe(face_1->nth_active_fe_index(0)).n_unique_faces(),
+ 1);
+ const unsigned int face_no = 0;
+
+ Assert(face_1->n_active_fe_indices() == 1, ExcInternalError());
+ const unsigned int n_dofs_per_face =
+ face_1->get_fe(face_1->nth_active_fe_index(0))
+ .n_dofs_per_face(face_no);
+
+ Assert(matrix.m() == 0 ||
+ (first_vector_components.empty() &&
+ matrix.m() == n_dofs_per_face) ||
+ (!first_vector_components.empty() &&
+ matrix.m() == spacedim),
+ ExcMessage(
+ "The matrix must have either size 0 or spacedim "
+ "(if first_vector_components is nonempty) "
+ "or the size must be equal to the # of DoFs on the face "
+ "(if first_vector_components is empty)."));
+ }
- Assert(matrix.m() == 0 ||
- (first_vector_components.empty() &&
- matrix.m() == n_dofs_per_face) ||
- (!first_vector_components.empty() && matrix.m() == spacedim),
- ExcMessage(
- "The matrix must have either size 0 or spacedim "
- "(if first_vector_components is nonempty) "
- "or the size must be equal to the # of DoFs on the face "
- "(if first_vector_components is empty)."));
+ if (!face_2->has_children())
+ {
+ // TODO: the implementation makes the assumption that all faces have
+ // the same number of dofs
+ AssertDimension(
+ face_2->get_fe(face_2->nth_active_fe_index(0)).n_unique_faces(),
+ 1);
+ const unsigned int face_no = 0;
+
+ Assert(face_2->n_active_fe_indices() == 1, ExcInternalError());
+ const unsigned int n_dofs_per_face =
+ face_2->get_fe(face_2->nth_active_fe_index(0))
+ .n_dofs_per_face(face_no);
+
+ Assert(matrix.m() == 0 ||
+ (first_vector_components.empty() &&
+ matrix.m() == n_dofs_per_face) ||
+ (!first_vector_components.empty() &&
+ matrix.m() == spacedim),
+ ExcMessage(
+ "The matrix must have either size 0 or spacedim "
+ "(if first_vector_components is nonempty) "
+ "or the size must be equal to the # of DoFs on the face "
+ "(if first_vector_components is empty)."));
+ }
}
-#endif
if (face_1->has_children() && face_2->has_children())
{
fine_fe.component_to_base_index(fine_component).first),
ExcFiniteElementsDontMatch());
-#ifdef DEBUG
- // if in debug mode, check whether the coarse grid is indeed coarser
- // everywhere than the fine grid
- for (const auto &cell : coarse_grid.active_cell_iterators())
- Assert(cell->level() <= coarse_to_fine_grid_map[cell]->level(),
- ExcGridNotCoarser());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // if in debug mode, check whether the coarse grid is indeed coarser
+ // everywhere than the fine grid
+ for (const auto &cell : coarse_grid.active_cell_iterators())
+ Assert(cell->level() <= coarse_to_fine_grid_map[cell]->level(),
+ ExcGridNotCoarser());
+ }
/*
* From here on: the term `parameter' refers to the selected component
// respective dofs of the other components have sum of weights zero, of
// course. we do not explicitly ask which component a dof belongs to,
// but this at least tests some errors
-#ifdef DEBUG
- for (unsigned int col = 0; col < n_parameters_on_fine_grid; ++col)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0;
- for (types::global_dof_index row = 0; row < n_coarse_dofs; ++row)
- if (weights[row].find(col) != weights[row].end())
- sum += weights[row][col];
- Assert((std::fabs(sum - 1) < 1.e-12) ||
- ((coarse_fe.n_components() > 1) && (sum == 0)),
- ExcInternalError());
+ for (unsigned int col = 0; col < n_parameters_on_fine_grid; ++col)
+ {
+ double sum = 0;
+ for (types::global_dof_index row = 0; row < n_coarse_dofs;
+ ++row)
+ if (weights[row].find(col) != weights[row].end())
+ sum += weights[row][col];
+ Assert((std::fabs(sum - 1) < 1.e-12) ||
+ ((coarse_fe.n_components() > 1) && (sum == 0)),
+ ExcInternalError());
+ }
}
-#endif
return n_parameters_on_fine_grid;
AssertDimension(dof_to_boundary_mapping.size(), n_dofs);
AssertDimension(sparsity.n_rows(), dof.n_boundary_dofs());
AssertDimension(sparsity.n_cols(), dof.n_boundary_dofs());
-#ifdef DEBUG
- if (sparsity.n_rows() != 0)
+ if constexpr (running_in_debug_mode())
{
- types::global_dof_index max_element = 0;
- for (const types::global_dof_index index : dof_to_boundary_mapping)
- if ((index != numbers::invalid_dof_index) && (index > max_element))
- max_element = index;
- AssertDimension(max_element, sparsity.n_rows() - 1);
+ if (sparsity.n_rows() != 0)
+ {
+ types::global_dof_index max_element = 0;
+ for (const types::global_dof_index index : dof_to_boundary_mapping)
+ if ((index != numbers::invalid_dof_index) &&
+ (index > max_element))
+ max_element = index;
+ AssertDimension(max_element, sparsity.n_rows() - 1);
+ }
}
-#endif
std::vector<types::global_dof_index> dofs_on_this_face;
dofs_on_this_face.reserve(dof.get_fe_collection().max_dofs_per_face());
dof.n_boundary_dofs(boundary_ids)));
(void)fe_is_hermite;
-#ifdef DEBUG
- if (sparsity.n_rows() != 0)
+ if constexpr (running_in_debug_mode())
{
- types::global_dof_index max_element = 0;
- for (const types::global_dof_index index : dof_to_boundary_mapping)
- if ((index != numbers::invalid_dof_index) && (index > max_element))
- max_element = index;
- AssertDimension(max_element, sparsity.n_rows() - 1);
+ if (sparsity.n_rows() != 0)
+ {
+ types::global_dof_index max_element = 0;
+ for (const types::global_dof_index index : dof_to_boundary_mapping)
+ if ((index != numbers::invalid_dof_index) &&
+ (index > max_element))
+ max_element = index;
+ AssertDimension(max_element, sparsity.n_rows() - 1);
+ }
}
-#endif
std::vector<types::global_dof_index> dofs_on_this_face;
dofs_on_this_face.reserve(dof.get_fe_collection().max_dofs_per_face());
const unsigned int first_selected =
mask.first_selected_component(n_total_components);
-# ifdef DEBUG
- // check that it is contiguous:
- for (unsigned int c = 0; c < n_total_components; ++c)
- Assert((c < first_selected && (!mask[c])) ||
- (c >= first_selected && c < first_selected + n_selected &&
- mask[c]) ||
- (c >= first_selected + n_selected && !mask[c]),
- ExcMessage("Error: the given ComponentMask is not contiguous!"));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // check that it is contiguous:
+ for (unsigned int c = 0; c < n_total_components; ++c)
+ Assert((c < first_selected && (!mask[c])) ||
+ (c >= first_selected && c < first_selected + n_selected &&
+ mask[c]) ||
+ (c >= first_selected + n_selected && !mask[c]),
+ ExcMessage("Error: the given ComponentMask is not contiguous!"));
+ }
return get_sub_fe(first_selected, n_selected);
}
}
}
-#ifdef DEBUG
- // make sure that the row sum of each of the matrices is 1 at this
- // point. this must be so since the shape functions sum up to 1
- for (unsigned int j = 0; j < source_fe->n_dofs_per_face(face_no); ++j)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
+ // make sure that the row sum of each of the matrices is 1 at this
+ // point. this must be so since the shape functions sum up to 1
+ for (unsigned int j = 0; j < source_fe->n_dofs_per_face(face_no); ++j)
+ {
+ double sum = 0.;
- for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
- sum += interpolation_matrix(j, i);
+ for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
+ sum += interpolation_matrix(j, i);
- Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ }
}
-#endif
}
else
{
if (std::fabs(interpolation_matrix(i, j)) < 1e-15)
interpolation_matrix(i, j) = 0.;
-#ifdef DEBUG
- // make sure that the row sum of
- // each of the matrices is 1 at
- // this point. this must be so
- // since the shape functions sum up
- // to 1
- for (unsigned int i = 0; i < this->n_dofs_per_cell(); ++i)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
- for (unsigned int j = 0; j < source_fe->n_dofs_per_cell(); ++j)
- sum += interpolation_matrix(i, j);
+ // make sure that the row sum of
+ // each of the matrices is 1 at
+ // this point. this must be so
+ // since the shape functions sum up
+ // to 1
+ for (unsigned int i = 0; i < this->n_dofs_per_cell(); ++i)
+ {
+ double sum = 0.;
+ for (unsigned int j = 0; j < source_fe->n_dofs_per_cell(); ++j)
+ sum += interpolation_matrix(i, j);
- Assert(std::fabs(sum - 1) < 5e-14 * std::max(this->degree, 1U) * dim,
- ExcInternalError());
+ Assert(std::fabs(sum - 1) <
+ 5e-14 * std::max(this->degree, 1U) * dim,
+ ExcInternalError());
+ }
}
-#endif
}
else if (dynamic_cast<const FE_Nothing<dim> *>(&x_source_fe))
{
}
}
-#ifdef DEBUG
- // make sure that the row sum of each of the matrices is 1 at this
- // point. this must be so since the shape functions sum up to 1
- for (unsigned int j = 0; j < source_fe->n_dofs_per_face(face_no); ++j)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
+ // make sure that the row sum of each of the matrices is 1 at this
+ // point. this must be so since the shape functions sum up to 1
+ for (unsigned int j = 0; j < source_fe->n_dofs_per_face(face_no); ++j)
+ {
+ double sum = 0.;
- for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
- sum += interpolation_matrix(j, i);
+ for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
+ sum += interpolation_matrix(j, i);
- Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ }
}
-#endif
}
else if (dynamic_cast<const FE_Nothing<dim> *>(&x_source_fe) != nullptr)
{
if (std::fabs(interpolation_matrix(i, j)) < eps)
interpolation_matrix(i, j) = 0.;
-# ifdef DEBUG
- // make sure that the row sum of each of the matrices is 1 at this
- // point. this must be so since the shape functions sum up to 1
- for (unsigned int i = 0; i < this->n_dofs_per_cell(); ++i)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
- for (unsigned int j = 0; j < source_fe->n_dofs_per_cell(); ++j)
- sum += interpolation_matrix(i, j);
+ // make sure that the row sum of each of the matrices is 1 at this
+ // point. this must be so since the shape functions sum up to 1
+ for (unsigned int i = 0; i < this->n_dofs_per_cell(); ++i)
+ {
+ double sum = 0.;
+ for (unsigned int j = 0; j < source_fe->n_dofs_per_cell(); ++j)
+ sum += interpolation_matrix(i, j);
- Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ }
}
-# endif
}
else if (dynamic_cast<const FE_Nothing<dim> *>(&x_source_fe))
{
}
}
-# ifdef DEBUG
- // make sure that the row sum of each of the matrices is 1 at this
- // point. this must be so since the shape functions sum up to 1
- for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
+ // make sure that the row sum of each of the matrices is 1 at this
+ // point. this must be so since the shape functions sum up to 1
+ for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ {
+ double sum = 0.;
- for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
- sum += interpolation_matrix(j, i);
+ for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
+ sum += interpolation_matrix(j, i);
- Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ }
}
-# endif
}
else if (dynamic_cast<const FE_Nothing<dim> *>(&source_fe) != nullptr)
{
// evaluations of the Lagrange polynomials are zero or one.
const double eps = 1e-15 * q_degree * dim;
-# ifdef DEBUG
- // in DEBUG mode, check that the evaluation of support points in the
- // current numbering gives the identity operation
- for (unsigned int i = 0; i < q_dofs_per_cell; ++i)
+ if constexpr (running_in_debug_mode())
{
- Assert(std::fabs(1. - this->poly_space->compute_value(
- i, this->unit_support_points[i])) < eps,
- ExcInternalError("The Lagrange polynomial does not evaluate "
- "to one or zero in a nodal point. "
- "This typically indicates that the "
- "polynomial interpolation is "
- "ill-conditioned such that round-off "
- "prevents the sum to be one."));
- for (unsigned int j = 0; j < q_dofs_per_cell; ++j)
- if (j != i)
- Assert(std::fabs(this->poly_space->compute_value(
- i, this->unit_support_points[j])) < eps,
+ // in DEBUG mode, check that the evaluation of support points in the
+ // current numbering gives the identity operation
+ for (unsigned int i = 0; i < q_dofs_per_cell; ++i)
+ {
+ Assert(std::fabs(1. - this->poly_space->compute_value(
+ i, this->unit_support_points[i])) < eps,
ExcInternalError(
"The Lagrange polynomial does not evaluate "
"to one or zero in a nodal point. "
"polynomial interpolation is "
"ill-conditioned such that round-off "
"prevents the sum to be one."));
+ for (unsigned int j = 0; j < q_dofs_per_cell; ++j)
+ if (j != i)
+ Assert(std::fabs(this->poly_space->compute_value(
+ i, this->unit_support_points[j])) < eps,
+ ExcInternalError(
+ "The Lagrange polynomial does not evaluate "
+ "to one or zero in a nodal point. "
+ "This typically indicates that the "
+ "polynomial interpolation is "
+ "ill-conditioned such that round-off "
+ "prevents the sum to be one."));
+ }
}
-# endif
// to efficiently evaluate the polynomial at the subcell, make use of
// the tensor product structure of this element and only evaluate 1d
if (q_dofs_per_cell < this->n_dofs_per_cell())
prolongate(q_dofs_per_cell, q_dofs_per_cell) = 1.;
- // and make sure that the row sum is 1. this must be so since for this
- // element, the shape functions add up to one
-# ifdef DEBUG
- for (unsigned int row = 0; row < this->n_dofs_per_cell(); ++row)
+ // and make sure that the row sum is 1. this must be so since for this
+ // element, the shape functions add up to one
+ if constexpr (running_in_debug_mode())
{
- double sum = 0;
- for (unsigned int col = 0; col < this->n_dofs_per_cell(); ++col)
- sum += prolongate(row, col);
- Assert(std::fabs(sum - 1.) <
- std::max(eps, 5e-16 * std::sqrt(this->n_dofs_per_cell())),
- ExcInternalError("The entries in a row of the local "
- "prolongation matrix do not add to one. "
- "This typically indicates that the "
- "polynomial interpolation is "
- "ill-conditioned such that round-off "
- "prevents the sum to be one."));
+ for (unsigned int row = 0; row < this->n_dofs_per_cell(); ++row)
+ {
+ double sum = 0;
+ for (unsigned int col = 0; col < this->n_dofs_per_cell(); ++col)
+ sum += prolongate(row, col);
+ Assert(std::fabs(sum - 1.) <
+ std::max(eps,
+ 5e-16 * std::sqrt(this->n_dofs_per_cell())),
+ ExcInternalError("The entries in a row of the local "
+ "prolongation matrix do not add to one. "
+ "This typically indicates that the "
+ "polynomial interpolation is "
+ "ill-conditioned such that round-off "
+ "prevents the sum to be one."));
+ }
}
-# endif
// move result into place
const_cast<FullMatrix<double> &>(
}
}
-#ifdef DEBUG
- // make sure that the row sum of each of the matrices is 1 at this
- // point. this must be so since the shape functions sum up to 1
- for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
+ // make sure that the row sum of each of the matrices is 1 at this
+ // point. this must be so since the shape functions sum up to 1
+ for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ {
+ double sum = 0.;
- for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
- sum += interpolation_matrix(j, i);
+ for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
+ sum += interpolation_matrix(j, i);
- Assert(std::fabs(sum - 1) < 2e-13 * this->degree * (dim - 1),
- ExcInternalError());
+ Assert(std::fabs(sum - 1) < 2e-13 * this->degree * (dim - 1),
+ ExcInternalError());
+ }
}
-#endif
}
}
}
-#ifdef DEBUG
- // make sure that the row sum of each of the matrices is 1 at this
- // point. this must be so since the shape functions sum up to 1
- for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
+ // make sure that the row sum of each of the matrices is 1 at this
+ // point. this must be so since the shape functions sum up to 1
+ for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ {
+ double sum = 0.;
- for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
- sum += interpolation_matrix(j, i);
+ for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
+ sum += interpolation_matrix(j, i);
- Assert(std::fabs(sum - 1) < 2e-13 * this->degree * (dim - 1),
- ExcInternalError());
+ Assert(std::fabs(sum - 1) < 2e-13 * this->degree * (dim - 1),
+ ExcInternalError());
+ }
}
-#endif
}
restriction_mat[i][j] =
this->shape_value(j, transformed_point[0]);
}
-#ifdef DEBUG
- for (unsigned int i = 0; i < this->n_dofs_per_cell(); i++)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
-
- for (unsigned int j = 0; j < this->n_dofs_per_cell(); j++)
- sum += restriction_mat[i][j];
-
- Assert(std::fabs(sum - 1) < eps || std::fabs(sum) < eps,
- ExcInternalError(
- "The entries in a row of the local "
- "restriction matrix do not add to zero or one. "
- "This typically indicates that the "
- "polynomial interpolation is "
- "ill-conditioned such that round-off "
- "prevents the sum to be one."));
+ for (unsigned int i = 0; i < this->n_dofs_per_cell(); i++)
+ {
+ double sum = 0.;
+
+ for (unsigned int j = 0; j < this->n_dofs_per_cell(); j++)
+ sum += restriction_mat[i][j];
+
+ Assert(std::fabs(sum - 1) < eps || std::fabs(sum) < eps,
+ ExcInternalError(
+ "The entries in a row of the local "
+ "restriction matrix do not add to zero or one. "
+ "This typically indicates that the "
+ "polynomial interpolation is "
+ "ill-conditioned such that round-off "
+ "prevents the sum to be one."));
+ }
}
-#endif
// Remove small entries from the matrix
for (unsigned int i = 0; i < restriction_mat.m(); ++i)
interpolation_matrix(i, j) = matrix_entry;
}
-#ifdef DEBUG
- for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
+ for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ {
+ double sum = 0.;
- for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
- sum += interpolation_matrix(j, i);
+ for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
+ sum += interpolation_matrix(j, i);
- Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ }
}
-#endif
}
else if (dynamic_cast<const FE_Nothing<dim> *>(&source_fe) != nullptr)
{
interpolation_matrix(i, j) = matrix_entry;
}
-#ifdef DEBUG
- for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ if constexpr (running_in_debug_mode())
{
- double sum = 0.;
+ for (unsigned int j = 0; j < source_fe.n_dofs_per_face(face_no); ++j)
+ {
+ double sum = 0.;
- for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
- sum += interpolation_matrix(j, i);
+ for (unsigned int i = 0; i < this->n_dofs_per_face(face_no); ++i)
+ sum += interpolation_matrix(j, i);
- Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ Assert(std::fabs(sum - 1) < eps, ExcInternalError());
+ }
}
-#endif
}
else if (dynamic_cast<const FE_Nothing<dim> *>(&source_fe) != nullptr)
{
for (auto &p : bubble_functions)
lump_polys.push_back(std::move(p));
- // Sanity check:
-#ifdef DEBUG
- BarycentricPolynomial<dim> unity;
- for (const auto &p : lump_polys)
- unity = unity + p;
-
- Point<dim> test;
- for (unsigned int d = 0; d < dim; ++d)
- test[d] = 2.0;
- Assert(std::abs(unity.value(test) - 1.0) < 1e-10,
- ExcInternalError());
-#endif
+ // Sanity check:
+ if constexpr (running_in_debug_mode())
+ {
+ BarycentricPolynomial<dim> unity;
+ for (const auto &p : lump_polys)
+ unity = unity + p;
+
+ Point<dim> test;
+ for (unsigned int d = 0; d < dim; ++d)
+ test[d] = 2.0;
+ Assert(std::abs(unity.value(test) - 1.0) < 1e-10,
+ ExcInternalError());
+ }
return BarycentricPolynomials<dim>(lump_polys);
}
}
}
-# ifdef DEBUG
- // check generalized_support_points_index_table for consistency
- for (unsigned int i = 0; i < base_elements.size(); ++i)
+ if constexpr (running_in_debug_mode())
{
- if (!base_element(i).has_generalized_support_points())
- continue;
-
- const auto &points =
- base_elements[i].first->get_generalized_support_points();
- for (unsigned int j = 0; j < points.size(); ++j)
+ // check generalized_support_points_index_table for consistency
+ for (unsigned int i = 0; i < base_elements.size(); ++i)
{
- const auto n = generalized_support_points_index_table[i][j];
- Assert(this->generalized_support_points[n] == points[j],
- ExcInternalError());
+ if (!base_element(i).has_generalized_support_points())
+ continue;
+
+ const auto &points =
+ base_elements[i].first->get_generalized_support_points();
+ for (unsigned int j = 0; j < points.size(); ++j)
+ {
+ const auto n = generalized_support_points_index_table[i][j];
+ Assert(this->generalized_support_points[n] == points[j],
+ ExcInternalError());
+ }
}
- }
-# endif /* DEBUG */
+ } /* DEBUG */
});
// initialize quad dof index permutation in 3d and higher
{
const Triangulation<3>::face_iterator face = cell->face(face_no);
-#ifdef DEBUG
- const bool face_orientation = cell->face_orientation(face_no),
- face_flip = cell->face_flip(face_no),
- face_rotation = cell->face_rotation(face_no);
- const unsigned int vertices_per_face = GeometryInfo<3>::vertices_per_face,
- lines_per_face = GeometryInfo<3>::lines_per_face;
-
- // some sanity checks up front
- for (unsigned int i = 0; i < vertices_per_face; ++i)
- Assert(face->vertex_index(i) ==
- cell->vertex_index(GeometryInfo<3>::face_to_cell_vertices(
- face_no, i, face_orientation, face_flip, face_rotation)),
- ExcInternalError());
-
- // indices of the lines that bound a face are given by GeometryInfo<3>::
- // face_to_cell_lines
- for (unsigned int i = 0; i < lines_per_face; ++i)
- Assert(face->line(i) ==
- cell->line(GeometryInfo<3>::face_to_cell_lines(
- face_no, i, face_orientation, face_flip, face_rotation)),
- ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ const bool face_orientation = cell->face_orientation(face_no),
+ face_flip = cell->face_flip(face_no),
+ face_rotation = cell->face_rotation(face_no);
+ const unsigned int vertices_per_face =
+ GeometryInfo<3>::vertices_per_face,
+ lines_per_face = GeometryInfo<3>::lines_per_face;
+
+ // some sanity checks up front
+ for (unsigned int i = 0; i < vertices_per_face; ++i)
+ Assert(face->vertex_index(i) ==
+ cell->vertex_index(GeometryInfo<3>::face_to_cell_vertices(
+ face_no, i, face_orientation, face_flip, face_rotation)),
+ ExcInternalError());
+
+ // indices of the lines that bound a face are given by
+ // GeometryInfo<3>:: face_to_cell_lines
+ for (unsigned int i = 0; i < lines_per_face; ++i)
+ Assert(face->line(i) ==
+ cell->line(GeometryInfo<3>::face_to_cell_lines(
+ face_no, i, face_orientation, face_flip, face_rotation)),
+ ExcInternalError());
+ }
// extract the points surrounding a quad from the points
// already computed. First get the 4 vertices and then the points on
// the four lines
Assert(dim > 1, ExcNotImplemented());
Assert(dim < 4, ExcNotImplemented());
-# ifdef DEBUG
- Tensor<2, dim> vector_matrix;
- for (unsigned int d = 0; d < dim; ++d)
- for (unsigned int c = 1; c <= dim; ++c)
- vector_matrix[c - 1][d] = vertices[c][d] - vertices[0][d];
- Assert(determinant(vector_matrix) > 0.,
- ExcMessage("Vertices of simplex must form a right handed system"));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Tensor<2, dim> vector_matrix;
+ for (unsigned int d = 0; d < dim; ++d)
+ for (unsigned int c = 1; c <= dim; ++c)
+ vector_matrix[c - 1][d] = vertices[c][d] - vertices[0][d];
+ Assert(determinant(vector_matrix) > 0.,
+ ExcMessage(
+ "Vertices of simplex must form a right handed system"));
+ }
// Set up the vertices by first copying into points.
std::vector<Point<dim>> points = vertices;
std::reverse(step_sizes[i].begin(), step_sizes[i].end());
}
-# ifdef DEBUG
- double x = 0;
- for (unsigned int j = 0; j < step_sizes.at(i).size(); ++j)
- x += step_sizes[i][j];
- Assert(std::fabs(x - (p2[i] - p1[i])) <= 1e-12 * std::fabs(x),
- ExcMessage(
- "The sequence of step sizes in coordinate direction " +
- Utilities::int_to_string(i) +
- " must be equal to the distance of the two given "
- "points in this coordinate direction."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ double x = 0;
+ for (unsigned int j = 0; j < step_sizes.at(i).size(); ++j)
+ x += step_sizes[i][j];
+ Assert(std::fabs(x - (p2[i] - p1[i])) <= 1e-12 * std::fabs(x),
+ ExcMessage(
+ "The sequence of step sizes in coordinate direction " +
+ Utilities::int_to_string(i) +
+ " must be equal to the distance of the two given "
+ "points in this coordinate direction."));
+ }
}
Triangulation<dim, spacedim> &result)
{
AssertDimension(dim, extents.size());
-# ifdef DEBUG
- for (const auto &extent : extents)
- Assert(0 < extent,
- ExcMessage("The Triangulation must be copied at least one time in "
- "each coordinate dimension."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &extent : extents)
+ Assert(0 < extent,
+ ExcMessage(
+ "The Triangulation must be copied at least one time in "
+ "each coordinate dimension."));
+ }
const BoundingBox<spacedim> bbox(input.get_vertices());
const auto &min = bbox.get_boundary_points().first;
const auto &max = bbox.get_boundary_points().second;
// mode)
if (0 < manifold_priorities.size())
{
-# ifdef DEBUG
- // check that the provided manifold_priorities is valid
- std::vector<types::manifold_id> sorted_manifold_priorities =
- manifold_priorities;
- std::sort(sorted_manifold_priorities.begin(),
- sorted_manifold_priorities.end());
- Assert(std::unique(sorted_manifold_priorities.begin(),
- sorted_manifold_priorities.end()) ==
- sorted_manifold_priorities.end(),
- ExcMessage(
- "The given vector of manifold ids may not contain any "
- "duplicated entries."));
- std::vector<types::manifold_id> sorted_manifold_ids =
- input.get_manifold_ids();
- std::sort(sorted_manifold_ids.begin(), sorted_manifold_ids.end());
- if (sorted_manifold_priorities != sorted_manifold_ids)
+ if constexpr (running_in_debug_mode())
{
- std::ostringstream message;
- message << "The given triangulation has manifold ids {";
- for (const types::manifold_id manifold_id : sorted_manifold_ids)
- if (manifold_id != sorted_manifold_ids.back())
- message << manifold_id << ", ";
- message << sorted_manifold_ids.back() << "}, but \n"
- << " the given vector of manifold ids is {";
- for (const types::manifold_id manifold_id : manifold_priorities)
- if (manifold_id != manifold_priorities.back())
- message << manifold_id << ", ";
- message
- << manifold_priorities.back() << "}.\n"
- << " These vectors should contain the same elements.\n";
- const std::string m = message.str();
- Assert(false, ExcMessage(m));
+ // check that the provided manifold_priorities is valid
+ std::vector<types::manifold_id> sorted_manifold_priorities =
+ manifold_priorities;
+ std::sort(sorted_manifold_priorities.begin(),
+ sorted_manifold_priorities.end());
+ Assert(std::unique(sorted_manifold_priorities.begin(),
+ sorted_manifold_priorities.end()) ==
+ sorted_manifold_priorities.end(),
+ ExcMessage(
+ "The given vector of manifold ids may not contain any "
+ "duplicated entries."));
+ std::vector<types::manifold_id> sorted_manifold_ids =
+ input.get_manifold_ids();
+ std::sort(sorted_manifold_ids.begin(), sorted_manifold_ids.end());
+ if (sorted_manifold_priorities != sorted_manifold_ids)
+ {
+ std::ostringstream message;
+ message << "The given triangulation has manifold ids {";
+ for (const types::manifold_id manifold_id :
+ sorted_manifold_ids)
+ if (manifold_id != sorted_manifold_ids.back())
+ message << manifold_id << ", ";
+ message << sorted_manifold_ids.back() << "}, but \n"
+ << " the given vector of manifold ids is {";
+ for (const types::manifold_id manifold_id :
+ manifold_priorities)
+ if (manifold_id != manifold_priorities.back())
+ message << manifold_id << ", ";
+ message
+ << manifold_priorities.back() << "}.\n"
+ << " These vectors should contain the same elements.\n";
+ const std::string m = message.str();
+ Assert(false, ExcMessage(m));
+ }
}
-# endif
return manifold_priorities;
}
// otherwise use the default ranking: ascending order, but TFI manifolds
constexpr unsigned int n_pipes = 3;
constexpr double tolerance = 1.e-12;
-# ifdef DEBUG
- // Verify user input.
- Assert(bifurcation.second > 0,
- ExcMessage("Invalid input: negative radius."));
- Assert(openings.size() == n_pipes,
- ExcMessage("Invalid input: only 3 openings allowed."));
- for (const auto &opening : openings)
- Assert(opening.second > 0, ExcMessage("Invalid input: negative radius."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Verify user input.
+ Assert(bifurcation.second > 0,
+ ExcMessage("Invalid input: negative radius."));
+ Assert(openings.size() == n_pipes,
+ ExcMessage("Invalid input: only 3 openings allowed."));
+ for (const auto &opening : openings)
+ Assert(opening.second > 0,
+ ExcMessage("Invalid input: negative radius."));
+ }
// Each pipe segment will be identified by the index of its opening in the
// parameter array. To determine the next and previous entry in the array
AssertDimension(node_tags[i], i + 1);
for (unsigned int d = 0; d < spacedim; ++d)
vertices[i][d] = coord[i * 3 + d];
-# ifdef DEBUG
- // Make sure the embedded dimension is right
- for (unsigned int d = spacedim; d < 3; ++d)
- Assert(std::abs(coord[i * 3 + d]) < 1e-10,
- ExcMessage("The grid you are reading contains nodes that are "
- "nonzero in the coordinate with index " +
- std::to_string(d) +
- ", but you are trying to save "
- "it on a grid embedded in a " +
- std::to_string(spacedim) + " dimensional space."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Make sure the embedded dimension is right
+ for (unsigned int d = spacedim; d < 3; ++d)
+ Assert(std::abs(coord[i * 3 + d]) < 1e-10,
+ ExcMessage(
+ "The grid you are reading contains nodes that are "
+ "nonzero in the coordinate with index " +
+ std::to_string(d) +
+ ", but you are trying to save "
+ "it on a grid embedded in a " +
+ std::to_string(spacedim) + " dimensional space."));
+ }
}
}
vertex_to_point = p - mesh.get_vertices()[closest_vertex_index];
}
-#ifdef DEBUG
- {
- // Double-check if found index is at marked cell
- Assert(any_cell_marked(vertex_to_cells[closest_vertex_index]),
- dealii::ExcMessage("Found non-marked vertex"));
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ // Double-check if found index is at marked cell
+ Assert(any_cell_marked(vertex_to_cells[closest_vertex_index]),
+ dealii::ExcMessage("Found non-marked vertex"));
+ }
+ }
const double vertex_point_norm = vertex_to_point.norm();
if (vertex_point_norm > 0)
AssertDimension(cells_out.size(), maps_out.size());
AssertDimension(cells_out.size(), qpoints_out.size());
-#ifdef DEBUG
- unsigned int c = cells_out.size();
- unsigned int qps = 0;
- // The number of points in all
- // the cells must be the same as
- // the number of points we
- // started off from,
- // plus the points which were ignored
- for (unsigned int n = 0; n < c; ++n)
+ if constexpr (running_in_debug_mode())
{
- AssertDimension(qpoints_out[n].size(), maps_out[n].size());
- qps += qpoints_out[n].size();
- }
+ unsigned int c = cells_out.size();
+ unsigned int qps = 0;
+ // The number of points in all
+ // the cells must be the same as
+ // the number of points we
+ // started off from,
+ // plus the points which were ignored
+ for (unsigned int n = 0; n < c; ++n)
+ {
+ AssertDimension(qpoints_out[n].size(), maps_out[n].size());
+ qps += qpoints_out[n].size();
+ }
- Assert(qps + missing_points_out.size() == np,
- ExcDimensionMismatch(qps + missing_points_out.size(), np));
-#endif
+ Assert(qps + missing_points_out.size() == np,
+ ExcDimensionMismatch(qps + missing_points_out.size(), np));
+ }
return std::make_tuple(std::move(cells_out),
std::move(qpoints_out),
(void)space_dim;
AssertIndexRange(direction, space_dim);
-#ifdef DEBUG
- {
- constexpr int dim = CellIterator::AccessorType::dimension;
- constexpr int spacedim = CellIterator::AccessorType::space_dimension;
- // For parallel::fullydistributed::Triangulation there might be unmatched
- // faces on periodic boundaries on the coarse grid. As a result
- // this assert is not fulfilled (which is not a bug!). See also the
- // discussion in the method collect_periodic_faces.
- if (!(((pairs1.size() > 0) &&
- (dynamic_cast<const parallel::fullydistributed::
- Triangulation<dim, spacedim> *>(
- &pairs1.begin()->first->get_triangulation()) != nullptr)) ||
- ((pairs2.size() > 0) &&
- (dynamic_cast<
- const parallel::fullydistributed::Triangulation<dim, spacedim>
- *>(&pairs2.begin()->first->get_triangulation()) != nullptr))))
- Assert(pairs1.size() == pairs2.size(),
- ExcMessage("Unmatched faces on periodic boundaries"));
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ constexpr int dim = CellIterator::AccessorType::dimension;
+ constexpr int spacedim = CellIterator::AccessorType::space_dimension;
+ // For parallel::fullydistributed::Triangulation there might be
+ // unmatched faces on periodic boundaries on the coarse grid. As a
+ // result this assert is not fulfilled (which is not a bug!). See also
+ // the discussion in the method collect_periodic_faces.
+ if (!(((pairs1.size() > 0) &&
+ (dynamic_cast<const parallel::fullydistributed::
+ Triangulation<dim, spacedim> *>(
+ &pairs1.begin()->first->get_triangulation()) != nullptr)) ||
+ ((pairs2.size() > 0) &&
+ (dynamic_cast<const parallel::fullydistributed::
+ Triangulation<dim, spacedim> *>(
+ &pairs2.begin()->first->get_triangulation()) != nullptr))))
+ Assert(pairs1.size() == pairs2.size(),
+ ExcMessage("Unmatched faces on periodic boundaries"));
+ }
+ }
unsigned int n_matches = 0;
"Are you sure that you've selected the correct boundary "
"id's and that the coarsest level mesh is colorized?"));
-#ifdef DEBUG
- const unsigned int size_old = matched_pairs.size();
-#endif
+ [[maybe_unused]] const unsigned int size_old = matched_pairs.size();
// and call match_periodic_face_pairs that does the actual matching:
match_periodic_face_pairs(
pairs1, pairs2, direction, matched_pairs, offset, matrix);
-#ifdef DEBUG
- // check for standard orientation
- const unsigned int size_new = matched_pairs.size();
- for (unsigned int i = size_old; i < size_new; ++i)
+ if constexpr (running_in_debug_mode())
{
- Assert(matched_pairs[i].orientation ==
- numbers::default_geometric_orientation,
- ExcMessage(
- "Found a face match with non standard orientation. "
- "This function is only suitable for meshes with cells "
- "in default orientation"));
+ // check for standard orientation
+ const unsigned int size_new = matched_pairs.size();
+ for (unsigned int i = size_old; i < size_new; ++i)
+ {
+ Assert(matched_pairs[i].orientation ==
+ numbers::default_geometric_orientation,
+ ExcMessage(
+ "Found a face match with non standard orientation. "
+ "This function is only suitable for meshes with cells "
+ "in default orientation"));
+ }
}
-#endif
}
// tria.n_levels()==1, since this is something that can happen on one
// processor without being true on all. however, we can ask for the
// global number of active cells and use that
-#ifdef DEBUG
- if (const auto *p_tria = dynamic_cast<
- const parallel::DistributedTriangulationBase<dim, spacedim> *>(&tria))
- Assert(p_tria->n_global_active_cells() == tria.n_cells(0),
- ExcNotImplemented());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (const auto *p_tria = dynamic_cast<
+ const parallel::DistributedTriangulationBase<dim, spacedim> *>(
+ &tria))
+ Assert(p_tria->n_global_active_cells() == tria.n_cells(0),
+ ExcNotImplemented());
+ }
// the algorithm used simply traverses all cells and picks out the
// boundary vertices. it may or may not be faster to simply get all
std::begin(b.vertices),
std::end(b.vertices)))
return true;
- // it should never be necessary to check the material or manifold
- // ids as a 'tiebreaker' (since they must be equal if the vertex
- // indices are equal). Assert it anyway:
-#ifdef DEBUG
- if (std::equal(std::begin(a.vertices),
- std::end(a.vertices),
- std::begin(b.vertices)))
+ // it should never be necessary to check the material or manifold
+ // ids as a 'tiebreaker' (since they must be equal if the vertex
+ // indices are equal). Assert it anyway:
+ if constexpr (running_in_debug_mode())
{
- Assert(a.material_id == b.material_id &&
- a.manifold_id == b.manifold_id,
- ExcMessage(
- "Two CellData objects with equal vertices must "
- "have the same material/boundary ids and manifold "
- "ids."));
+ if (std::equal(std::begin(a.vertices),
+ std::end(a.vertices),
+ std::begin(b.vertices)))
+ {
+ Assert(a.material_id == b.material_id &&
+ a.manifold_id == b.manifold_id,
+ ExcMessage(
+ "Two CellData objects with equal vertices must "
+ "have the same material/boundary ids and manifold "
+ "ids."));
+ }
}
-#endif
return false;
}
};
for (unsigned int i = 0; i < spacedim; ++i)
result[i] = pf[i];
-#ifdef DEBUG
- Vector<double> pb(chartdim);
- pull_back_function->vector_value(result, pb);
- for (unsigned int i = 0; i < chartdim; ++i)
- Assert(
- (chart_point.norm() > tolerance &&
- (std::abs(pb[i] - chart_point[i]) < tolerance * chart_point.norm())) ||
- (std::abs(pb[i] - chart_point[i]) < tolerance),
- ExcMessage(
- "The push forward is not the inverse of the pull back! Bailing out."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ Vector<double> pb(chartdim);
+ pull_back_function->vector_value(result, pb);
+ for (unsigned int i = 0; i < chartdim; ++i)
+ Assert(
+ (chart_point.norm() > tolerance &&
+ (std::abs(pb[i] - chart_point[i]) <
+ tolerance * chart_point.norm())) ||
+ (std::abs(pb[i] - chart_point[i]) < tolerance),
+ ExcMessage(
+ "The push forward is not the inverse of the pull back! Bailing out."));
+ }
return result;
}
const auto inverse_orientation =
face_1->reference_cell().get_inverse_combined_orientation(orientation);
-#ifdef DEBUG
- const auto [face_orientation, face_rotation, face_flip] =
- internal::split_face_orientation(orientation);
-
- Assert((dim != 1) || (face_orientation == true && face_flip == false &&
- face_rotation == false),
- ExcMessage("The supplied orientation "
- "(face_orientation, face_flip, face_rotation) "
- "is invalid for 1d"));
-
- Assert((dim != 2) || (face_flip == false && face_rotation == false),
- ExcMessage("The supplied orientation "
- "(face_orientation, face_flip, face_rotation) "
- "is invalid for 2d"));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ const auto [face_orientation, face_rotation, face_flip] =
+ internal::split_face_orientation(orientation);
+
+ Assert((dim != 1) || (face_orientation == true && face_flip == false &&
+ face_rotation == false),
+ ExcMessage("The supplied orientation "
+ "(face_orientation, face_flip, face_rotation) "
+ "is invalid for 1d"));
+
+ Assert((dim != 2) || (face_flip == false && face_rotation == false),
+ ExcMessage("The supplied orientation "
+ "(face_orientation, face_flip, face_rotation) "
+ "is invalid for 2d"));
+ }
Assert(face_1 != face_2, ExcMessage("face_1 and face_2 are equal!"));
triangulation.vertices_used.resize(needed_vertices, false);
}
- //-----------------------------------------
- // Before we start with the actual refinement, we do some
- // sanity checks if in debug mode. especially, we try to catch
- // the notorious problem with lines being twice refined,
- // i.e. there are cells adjacent at one line ("around the
- // edge", but not at a face), with two cells differing by more
- // than one refinement level
- //
- // this check is very simple to implement here, since we have
- // all lines flagged if they shall be refined
-#ifdef DEBUG
- for (const auto &cell : triangulation.active_cell_iterators())
- if (!cell->refine_flag_set())
- for (unsigned int line_n = 0; line_n < cell->n_lines(); ++line_n)
- if (cell->line(line_n)->has_children())
- for (unsigned int c = 0; c < 2; ++c)
- Assert(cell->line(line_n)->child(c)->user_flag_set() == false,
- ExcInternalError());
-#endif
+ //-----------------------------------------
+ // Before we start with the actual refinement, we do some
+ // sanity checks if in debug mode. especially, we try to catch
+ // the notorious problem with lines being twice refined,
+ // i.e. there are cells adjacent at one line ("around the
+ // edge", but not at a face), with two cells differing by more
+ // than one refinement level
+ //
+ // this check is very simple to implement here, since we have
+ // all lines flagged if they shall be refined
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &cell : triangulation.active_cell_iterators())
+ if (!cell->refine_flag_set())
+ for (unsigned int line_n = 0; line_n < cell->n_lines();
+ ++line_n)
+ if (cell->line(line_n)->has_children())
+ for (unsigned int c = 0; c < 2; ++c)
+ Assert(cell->line(line_n)->child(c)->user_flag_set() ==
+ false,
+ ExcInternalError());
+ }
unsigned int current_vertex = 0;
DEAL_II_NOT_IMPLEMENTED();
}
-#ifdef DEBUG
- for (const unsigned int line : quad->line_indices())
- AssertIsNotUsed(new_lines[line]);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const unsigned int line : quad->line_indices())
+ AssertIsNotUsed(new_lines[line]);
+ }
// 2) create new quads (properties are set below). Both triangles
// and quads are divided in four.
}
quad->set_refinement_case(RefinementCase<2>::cut_xy);
-#ifdef DEBUG
- for (const auto &quad : new_quads)
- AssertIsNotUsed(quad);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &quad : new_quads)
+ AssertIsNotUsed(quad);
+ }
// 3) set vertex indices and set new vertex
new_quad->set_boundary_id_internal(quad->boundary_id());
new_quad->set_manifold_id(quad->manifold_id());
-#ifdef DEBUG
- std::set<unsigned int> s;
-#endif
+ [[maybe_unused]] std::set<unsigned int> s;
// ... and fix orientation of lines of face for triangles,
// using an expensive algorithm, quadrilaterals are treated
make_array_view(vertices_0),
make_array_view(vertices_1));
-#ifdef DEBUG
- for (const auto i : vertices_0)
- s.insert(i);
- for (const auto i : vertices_1)
- s.insert(i);
-#endif
+ if constexpr (library_build_mode ==
+ LibraryBuildMode::debug)
+ {
+ for (const auto i : vertices_0)
+ s.insert(i);
+ for (const auto i : vertices_1)
+ s.insert(i);
+ }
new_quad->set_line_orientation(f, orientation);
}
-#ifdef DEBUG
- AssertDimension(s.size(), 3);
-#endif
+ if constexpr (library_build_mode ==
+ LibraryBuildMode::debug)
+ {
+ AssertDimension(s.size(), 3);
+ }
}
}
}
- //-----------------------------------------
- // Before we start with the actual refinement, we do some
- // sanity checks if in debug mode. especially, we try to catch
- // the notorious problem with lines being twice refined,
- // i.e. there are cells adjacent at one line ("around the
- // edge", but not at a face), with two cells differing by more
- // than one refinement level
- //
- // this check is very simple to implement here, since we have
- // all lines flagged if they shall be refined
-#ifdef DEBUG
- for (const auto &cell : triangulation.active_cell_iterators())
- if (!cell->refine_flag_set())
- for (unsigned int line = 0;
- line < GeometryInfo<dim>::lines_per_cell;
- ++line)
- if (cell->line(line)->has_children())
- for (unsigned int c = 0; c < 2; ++c)
- Assert(cell->line(line)->child(c)->user_flag_set() == false,
- ExcInternalError());
-#endif
+ //-----------------------------------------
+ // Before we start with the actual refinement, we do some
+ // sanity checks if in debug mode. especially, we try to catch
+ // the notorious problem with lines being twice refined,
+ // i.e. there are cells adjacent at one line ("around the
+ // edge", but not at a face), with two cells differing by more
+ // than one refinement level
+ //
+ // this check is very simple to implement here, since we have
+ // all lines flagged if they shall be refined
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &cell : triangulation.active_cell_iterators())
+ if (!cell->refine_flag_set())
+ for (unsigned int line = 0;
+ line < GeometryInfo<dim>::lines_per_cell;
+ ++line)
+ if (cell->line(line)->has_children())
+ for (unsigned int c = 0; c < 2; ++c)
+ Assert(cell->line(line)->child(c)->user_flag_set() ==
+ false,
+ ExcInternalError());
+ }
//-----------------------------------------
// Do refinement on every level
if (this->cell_attached_data.n_attached_data_sets == 0)
this->update_cell_relations();
-# ifdef DEBUG
-
- // In debug mode, we want to check for some consistency of the
- // result of this function. Because there are multiple exit
- // paths, put this check into a ScopeExit object that is
- // executed on each of the exit paths.
- //
- // Specifically, check on exit of this function that if a quad
- // cell has been refined, all of its children have neighbors
- // in all directions in which the parent cell has neighbors as
- // well. The children's neighbors are either the parent
- // neighbor or the parent neighbor's children, or simply one of
- // the other children of the current cell. This check is
- // useful because if one creates a triangulation with an
- // inconsistently ordered set of cells (e.g., because one has
- // forgotten to call GridTools::consistently_order_cells()),
- // then this relatively simple invariant is violated -- so the
- // check here can be used to catch that case, at least
- // sometimes.
- //
- // In 1d, this situation cannot happen. In 3d, we have explicit
- // orientation flags to ensure that it is not necessary to re-orient
- // cells at the beginning. But in both cases, the invariant should
- // still hold as long as the cell is a hypercube.
- for (const auto &cell : cell_iterators())
+ if constexpr (running_in_debug_mode())
{
- if (cell->has_children() && cell->reference_cell().is_hyper_cube())
- for (const unsigned int f : cell->face_indices())
- if (cell->at_boundary(f) == false)
- {
- for (const auto &child : cell->child_iterators())
+ // In debug mode, we want to check for some consistency of the
+ // result of this function. Because there are multiple exit
+ // paths, put this check into a ScopeExit object that is
+ // executed on each of the exit paths.
+ //
+ // Specifically, check on exit of this function that if a quad
+ // cell has been refined, all of its children have neighbors
+ // in all directions in which the parent cell has neighbors as
+ // well. The children's neighbors are either the parent
+ // neighbor or the parent neighbor's children, or simply one of
+ // the other children of the current cell. This check is
+ // useful because if one creates a triangulation with an
+ // inconsistently ordered set of cells (e.g., because one has
+ // forgotten to call GridTools::consistently_order_cells()),
+ // then this relatively simple invariant is violated -- so the
+ // check here can be used to catch that case, at least
+ // sometimes.
+ //
+ // In 1d, this situation cannot happen. In 3d, we have explicit
+ // orientation flags to ensure that it is not necessary to re-orient
+ // cells at the beginning. But in both cases, the invariant should
+ // still hold as long as the cell is a hypercube.
+ for (const auto &cell : cell_iterators())
+ {
+ if (cell->has_children() && cell->reference_cell().is_hyper_cube())
+ for (const unsigned int f : cell->face_indices())
+ if (cell->at_boundary(f) == false)
{
- Assert(
- child->at_boundary(f) == false,
- ExcMessage(
- "We ended up with a triangulation whose child cells "
- "are not connected to their neighbors as expected. "
- "When you created the triangulation, did you forget "
- "to call GridTools::consistently_order_cells() "
- "before calling Triangulation::create_triangulation()?"));
+ for (const auto &child : cell->child_iterators())
+ {
+ Assert(
+ child->at_boundary(f) == false,
+ ExcMessage(
+ "We ended up with a triangulation whose child cells "
+ "are not connected to their neighbors as expected. "
+ "When you created the triangulation, did you forget "
+ "to call GridTools::consistently_order_cells() "
+ "before calling Triangulation::create_triangulation()?"));
+ }
}
- }
+ }
}
-# endif
}
this->data_serializer.unpack_cell_status(this->local_cell_relations);
-# ifdef DEBUG
- // the CellStatus of all stored cells should always be
- // CellStatus::cell_will_persist.
- for (const auto &cell_rel : this->local_cell_relations)
+ if constexpr (running_in_debug_mode())
{
- Assert((cell_rel.second == // cell_status
- ::dealii::CellStatus::cell_will_persist),
- ExcInternalError());
+ // the CellStatus of all stored cells should always be
+ // CellStatus::cell_will_persist.
+ for (const auto &cell_rel : this->local_cell_relations)
+ {
+ Assert((cell_rel.second == // cell_status
+ ::dealii::CellStatus::cell_will_persist),
+ ExcInternalError());
+ }
}
-# endif
}
}
internal::TriangulationImplementation::Implementation::compute_number_cache(
*this, levels.size(), number_cache);
-# ifdef DEBUG
- for (const auto &level : levels)
- monitor_memory(level->cells, dim);
-
- // check whether really all refinement flags are reset (also of
- // previously non-active cells which we may not have touched. If the
- // refinement flag of a non-active cell is set, something went wrong
- // since the cell-accessors should have caught this)
- for (const auto &cell : this->cell_iterators())
- Assert(!cell->refine_flag_set(), ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &level : levels)
+ monitor_memory(level->cells, dim);
+
+ // check whether really all refinement flags are reset (also of
+ // previously non-active cells which we may not have touched. If the
+ // refinement flag of a non-active cell is set, something went wrong
+ // since the cell-accessors should have caught this)
+ for (const auto &cell : this->cell_iterators())
+ Assert(!cell->refine_flag_set(), ExcInternalError());
+ }
return cells_with_distorted_children;
}
{
Assert(cell->is_active(), ExcInternalError());
-#ifdef DEBUG
- // If this is not a parallel::distributed::Triangulation, then we really
- // should only get here if the cell is marked for refinement:
- if (dynamic_cast<const parallel::distributed::Triangulation<dim, spacedim>
- *>(&cell->get_triangulation()) == nullptr)
- Assert(cell->refine_flag_set() == false, ExcInternalError());
- else
- // But if this is a p::d::Triangulation, then we don't have that
- // much control and we can get here because mesh smoothing is
- // requested but can not be honored because p4est controls
- // what gets refined. In that case, we can at least provide
- // a better error message.
- Assert(cell->refine_flag_set() == false,
- ExcMessage(
- "The triangulation is trying to avoid unrefined islands "
- "during mesh refinement/coarsening, as you had requested "
- " by passing the appropriate 'smoothing flags' to the "
- "constructor of the triangulation. However, for objects "
- "of type parallel::distributed::Triangulation, control "
- "over which cells get refined rests with p4est, not the "
- "deal.II triangulation, and consequently it is not "
- "always possible to avoid unrefined islands in the mesh. "
- "Please remove the constructor argument to the triangulation "
- "object that requests mesh smoothing."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // If this is not a parallel::distributed::Triangulation, then we really
+ // should only get here if the cell is marked for refinement:
+ if (dynamic_cast<
+ const parallel::distributed::Triangulation<dim, spacedim> *>(
+ &cell->get_triangulation()) == nullptr)
+ Assert(cell->refine_flag_set() == false, ExcInternalError());
+ else
+ // But if this is a p::d::Triangulation, then we don't have that
+ // much control and we can get here because mesh smoothing is
+ // requested but can not be honored because p4est controls
+ // what gets refined. In that case, we can at least provide
+ // a better error message.
+ Assert(
+ cell->refine_flag_set() == false,
+ ExcMessage(
+ "The triangulation is trying to avoid unrefined islands "
+ "during mesh refinement/coarsening, as you had requested "
+ " by passing the appropriate 'smoothing flags' to the "
+ "constructor of the triangulation. However, for objects "
+ "of type parallel::distributed::Triangulation, control "
+ "over which cells get refined rests with p4est, not the "
+ "deal.II triangulation, and consequently it is not "
+ "always possible to avoid unrefined islands in the mesh. "
+ "Please remove the constructor argument to the triangulation "
+ "object that requests mesh smoothing."));
+ }
// now we provide two algorithms. the first one is the standard
// one, coming from the time, where only isotropic refinement was
neighbor_child->child(GeometryInfo<dim>::child_cell_on_face(
neighbor_child->refinement_case(), neighbor_neighbor, 0));
-#ifdef DEBUG
- // check, whether the face neighbor_child matches the requested
- // subface.
- typename Triangulation<dim, spacedim>::face_iterator requested;
- switch (this->subface_case(face))
+ if constexpr (running_in_debug_mode())
{
- case internal::SubfaceCase<3>::case_x:
- case internal::SubfaceCase<3>::case_y:
- case internal::SubfaceCase<3>::case_xy:
- requested = mother_face->child(subface);
- break;
- case internal::SubfaceCase<3>::case_x1y2y:
- case internal::SubfaceCase<3>::case_y1x2x:
- requested =
- mother_face->child(subface / 2)->child(subface % 2);
- break;
-
- case internal::SubfaceCase<3>::case_x1y:
- case internal::SubfaceCase<3>::case_y1x:
- switch (subface)
- {
- case 0:
- case 1:
- requested = mother_face->child(0)->child(subface);
- break;
- case 2:
- requested = mother_face->child(1);
- break;
- default:
- DEAL_II_ASSERT_UNREACHABLE();
- }
- break;
- case internal::SubfaceCase<3>::case_x2y:
- case internal::SubfaceCase<3>::case_y2x:
- switch (subface)
- {
- case 0:
- requested = mother_face->child(0);
- break;
- case 1:
- case 2:
- requested = mother_face->child(1)->child(subface - 1);
- break;
- default:
- DEAL_II_ASSERT_UNREACHABLE();
- }
- break;
- default:
- DEAL_II_ASSERT_UNREACHABLE();
- break;
+ // check, whether the face neighbor_child matches the
+ // requested subface.
+ typename Triangulation<dim, spacedim>::face_iterator
+ requested;
+ switch (this->subface_case(face))
+ {
+ case internal::SubfaceCase<3>::case_x:
+ case internal::SubfaceCase<3>::case_y:
+ case internal::SubfaceCase<3>::case_xy:
+ requested = mother_face->child(subface);
+ break;
+ case internal::SubfaceCase<3>::case_x1y2y:
+ case internal::SubfaceCase<3>::case_y1x2x:
+ requested =
+ mother_face->child(subface / 2)->child(subface % 2);
+ break;
+
+ case internal::SubfaceCase<3>::case_x1y:
+ case internal::SubfaceCase<3>::case_y1x:
+ switch (subface)
+ {
+ case 0:
+ case 1:
+ requested = mother_face->child(0)->child(subface);
+ break;
+ case 2:
+ requested = mother_face->child(1);
+ break;
+ default:
+ DEAL_II_ASSERT_UNREACHABLE();
+ }
+ break;
+ case internal::SubfaceCase<3>::case_x2y:
+ case internal::SubfaceCase<3>::case_y2x:
+ switch (subface)
+ {
+ case 0:
+ requested = mother_face->child(0);
+ break;
+ case 1:
+ case 2:
+ requested =
+ mother_face->child(1)->child(subface - 1);
+ break;
+ default:
+ DEAL_II_ASSERT_UNREACHABLE();
+ }
+ break;
+ default:
+ DEAL_II_ASSERT_UNREACHABLE();
+ break;
+ }
+ Assert(requested == neighbor_child->face(neighbor_neighbor),
+ ExcInternalError());
}
- Assert(requested == neighbor_child->face(neighbor_neighbor),
- ExcInternalError());
-#endif
return neighbor_child;
}
"in the given communicator."));
}
-#ifdef DEBUG
- // If we are dealing with a sequential triangulation, then someone
- // will have needed to set the subdomain_ids by hand. Make sure that
- // all ids we see are less than the number of processes we are
- // supposed to split the triangulation into.
- if (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
- &tria) == nullptr)
+ if constexpr (running_in_debug_mode())
{
- const unsigned int n_mpi_processes =
- dealii::Utilities::MPI::n_mpi_processes(comm);
- for (const auto &cell : tria.active_cell_iterators())
- Assert(cell->subdomain_id() < n_mpi_processes,
- ExcMessage("You can't have a cell with subdomain_id of " +
- std::to_string(cell->subdomain_id()) +
- " when splitting the triangulation using an MPI "
- " communicator with only " +
- std::to_string(n_mpi_processes) + " processes."));
+ // If we are dealing with a sequential triangulation, then someone
+ // will have needed to set the subdomain_ids by hand. Make sure that
+ // all ids we see are less than the number of processes we are
+ // supposed to split the triangulation into.
+ if (dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
+ &tria) == nullptr)
+ {
+ const unsigned int n_mpi_processes =
+ dealii::Utilities::MPI::n_mpi_processes(comm);
+ for (const auto &cell : tria.active_cell_iterators())
+ Assert(cell->subdomain_id() < n_mpi_processes,
+ ExcMessage(
+ "You can't have a cell with subdomain_id of " +
+ std::to_string(cell->subdomain_id()) +
+ " when splitting the triangulation using an MPI "
+ " communicator with only " +
+ std::to_string(n_mpi_processes) + " processes."));
+ }
}
-#endif
// First, figure out for what rank we are supposed to build the
// TriangulationDescription::Description object
const std::set<unsigned int> &fes,
const unsigned int codim) const
{
-#ifdef DEBUG
- // Validate user inputs.
- Assert(codim <= dim, ExcImpossibleInDim(dim));
- Assert(this->size() > 0, ExcEmptyObject());
- for (const auto &fe : fes)
- AssertIndexRange(fe, this->size());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Validate user inputs.
+ Assert(codim <= dim, ExcImpossibleInDim(dim));
+ Assert(this->size() > 0, ExcEmptyObject());
+ for (const auto &fe : fes)
+ AssertIndexRange(fe, this->size());
+ }
// Check if any element of this FECollection is able to dominate all
// elements of @p fes. If one was found, we add it to the set of
const std::set<unsigned int> &fes,
const unsigned int codim) const
{
-#ifdef DEBUG
- // Validate user inputs.
- Assert(codim <= dim, ExcImpossibleInDim(dim));
- Assert(this->size() > 0, ExcEmptyObject());
- for (const auto &fe : fes)
- AssertIndexRange(fe, this->size());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Validate user inputs.
+ Assert(codim <= dim, ExcImpossibleInDim(dim));
+ Assert(this->size() > 0, ExcEmptyObject());
+ for (const auto &fe : fes)
+ AssertIndexRange(fe, this->size());
+ }
// Check if any element of this FECollection is dominated by all
// elements of @p fes. If one was found, we add it to the set of
if (fes.size() == 1)
return *fes.begin();
-#ifdef DEBUG
- // Validate user inputs.
- Assert(codim <= dim, ExcImpossibleInDim(dim));
- Assert(this->size() > 0, ExcEmptyObject());
- for (const auto &fe : fes)
- AssertIndexRange(fe, this->size());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Validate user inputs.
+ Assert(codim <= dim, ExcImpossibleInDim(dim));
+ Assert(this->size() > 0, ExcEmptyObject());
+ for (const auto &fe : fes)
+ AssertIndexRange(fe, this->size());
+ }
// There may also be others, in which case we'll check if any of these
// elements is able to dominate all others. If one was found, we stop
if (fes.size() == 1)
return *fes.begin();
-#ifdef DEBUG
- // Validate user inputs.
- Assert(codim <= dim, ExcImpossibleInDim(dim));
- Assert(this->size() > 0, ExcEmptyObject());
- for (const auto &fe : fes)
- AssertIndexRange(fe, this->size());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Validate user inputs.
+ Assert(codim <= dim, ExcImpossibleInDim(dim));
+ Assert(this->size() > 0, ExcEmptyObject());
+ for (const auto &fe : fes)
+ AssertIndexRange(fe, this->size());
+ }
// There may also be others, in which case we'll check if any of these
// elements is dominated by all others. If one was found, we stop
identities_graph.emplace(Node(fe_index_1, identity.first),
Node(fe_index_2, identity.second));
-#ifdef DEBUG
- // Now verify that indeed the graph is symmetric: If one element
- // declares that certain ones of its DoFs are to be unified with those
- // of the other, then the other one should agree with this. As a
- // consequence of this test succeeding, we know that the graph is actually
- // undirected.
- for (const auto &edge : identities_graph)
- Assert(identities_graph.find({edge.second, edge.first}) !=
- identities_graph.end(),
- ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Now verify that indeed the graph is symmetric: If one element
+ // declares that certain ones of its DoFs are to be unified with those
+ // of the other, then the other one should agree with this. As a
+ // consequence of this test succeeding, we know that the graph is
+ // actually undirected.
+ for (const auto &edge : identities_graph)
+ Assert(identities_graph.find({edge.second, edge.first}) !=
+ identities_graph.end(),
+ ExcInternalError());
+ }
// The next step is that we ought to verify that if there is an identity
// between (fe1,dof1) and (fe2,dof2), as well as with (fe2,dof2) and
for (const Edge &e : sub_graph)
identities_graph.erase(e);
-#ifdef DEBUG
- // There are three checks we ought to perform:
- // - That the sub-graph is undirected, i.e. that every edge appears
- // in both directions
- for (const auto &edge : sub_graph)
- Assert(sub_graph.find({edge.second, edge.first}) != sub_graph.end(),
- ExcInternalError());
-
- // - None of the nodes in the sub-graph should have appeared in
- // any of the other sub-graphs. If they did, then we have a bug
- // in extracting sub-graphs. This is actually more easily checked
- // the other way around: none of the nodes of the sub-graph we
- // just extracted should be in any of the edges of the *remaining*
- // graph
- for (const Node &n : sub_graph_nodes)
- for (const Edge &e : identities_graph)
- Assert((n != e.first) && (n != e.second), ExcInternalError());
- // - Second, the sub-graph we just extracted needs to be complete,
- // i.e.,
- // be a "clique". We check this by counting how many edges it has.
- // for 'n' nodes in 'N', we need to have n*(n-1) edges (we store
- // both directed edges).
- Assert(sub_graph.size() ==
- sub_graph_nodes.size() * (sub_graph_nodes.size() - 1),
- ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // There are three checks we ought to perform:
+ // - That the sub-graph is undirected, i.e. that every edge
+ // appears
+ // in both directions
+ for (const auto &edge : sub_graph)
+ Assert(sub_graph.find({edge.second, edge.first}) !=
+ sub_graph.end(),
+ ExcInternalError());
+
+ // - None of the nodes in the sub-graph should have appeared in
+ // any of the other sub-graphs. If they did, then we have a bug
+ // in extracting sub-graphs. This is actually more easily
+ // checked the other way around: none of the nodes of the
+ // sub-graph we just extracted should be in any of the edges of
+ // the *remaining* graph
+ for (const Node &n : sub_graph_nodes)
+ for (const Edge &e : identities_graph)
+ Assert((n != e.first) && (n != e.second), ExcInternalError());
+ // - Second, the sub-graph we just extracted needs to be complete,
+ // i.e.,
+ // be a "clique". We check this by counting how many edges it
+ // has. for 'n' nodes in 'N', we need to have n*(n-1) edges (we
+ // store both directed edges).
+ Assert(sub_graph.size() ==
+ sub_graph_nodes.size() * (sub_graph_nodes.size() - 1),
+ ExcInternalError());
+ }
// At this point we're sure that we have extracted a complete
// sub-graph ("clique"). The DoFs involved are all identical then, and
if (future_fe_indices_on_coarsened_cells.find(parent) ==
future_fe_indices_on_coarsened_cells.end())
{
-#ifdef DEBUG
- for (const auto &child : parent->child_iterators())
- Assert(child->is_active() && child->coarsen_flag_set(),
- typename Triangulation<
- dim>::ExcInconsistentCoarseningFlags());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &child : parent->child_iterators())
+ Assert(child->is_active() && child->coarsen_flag_set(),
+ typename Triangulation<
+ dim>::ExcInconsistentCoarseningFlags());
+ }
parent_future_fe_index =
internal::hp::DoFHandlerImplementation::
{
// in debug mode, check whether the
// indices really are sorted.
-#ifdef DEBUG
- {
- ForwardIterator test = begin, test1 = begin;
- ++test1;
- for (; test1 != end; ++test, ++test1)
- Assert(*test1 > *test, ExcInternalError());
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ ForwardIterator test = begin, test1 = begin;
+ ++test1;
+ for (; test1 != end; ++test, ++test1)
+ Assert(*test1 > *test, ExcInternalError());
+ }
+ }
if (entries.empty() || entries.back() < *begin)
{
// iterator for an empty line (what
// would it point to?)
Assert(ncols != 0, ExcInternalError());
-# ifdef DEBUG
- for (PetscInt j = 0; j < ncols; ++j)
+ if constexpr (running_in_debug_mode())
{
- const auto column = static_cast<PetscInt>(colnums[j]);
- AssertIntegerConversion(column, colnums[j]);
+ for (PetscInt j = 0; j < ncols; ++j)
+ {
+ const auto column = static_cast<PetscInt>(colnums[j]);
+ AssertIntegerConversion(column, colnums[j]);
+ }
}
-# endif
colnum_cache =
std::make_shared<std::vector<size_type>>(colnums, colnums + ncols);
value_cache =
assert_is_compressed();
// now set all the entries of these rows to zero
-# ifdef DEBUG
- for (const auto &row : rows)
- AssertIntegerConversion(static_cast<PetscInt>(row), row);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &row : rows)
+ AssertIntegerConversion(static_cast<PetscInt>(row), row);
+ }
const std::vector<PetscInt> petsc_rows(rows.begin(), rows.end());
// call the functions. note that we have
assert_is_compressed();
// now set all the entries of these rows to zero
-# ifdef DEBUG
- for (const auto &row : rows)
- AssertIntegerConversion(static_cast<PetscInt>(row), row);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &row : rows)
+ AssertIntegerConversion(static_cast<PetscInt>(row), row);
+ }
const std::vector<PetscInt> petsc_rows(rows.begin(), rows.end());
// call the functions. note that we have
MatrixBase::compress(const VectorOperation::values operation)
{
{
-# ifdef DEBUG
- // Check that all processors agree that last_action is the same (or none!)
-
- int my_int_last_action = last_action;
- int all_int_last_action;
-
- const int ierr = MPI_Allreduce(&my_int_last_action,
- &all_int_last_action,
- 1,
- MPI_INT,
- MPI_BOR,
- get_mpi_communicator());
- AssertThrowMPI(ierr);
-
- AssertThrow(all_int_last_action !=
- (VectorOperation::add | VectorOperation::insert),
- ExcMessage("Error: not all processors agree on the last "
- "VectorOperation before this compress() call."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Check that all processors agree that last_action is the same (or
+ // none!)
+
+ int my_int_last_action = last_action;
+ int all_int_last_action;
+
+ const int ierr = MPI_Allreduce(&my_int_last_action,
+ &all_int_last_action,
+ 1,
+ MPI_INT,
+ MPI_BOR,
+ get_mpi_communicator());
+ AssertThrowMPI(ierr);
+
+ AssertThrow(all_int_last_action !=
+ (VectorOperation::add | VectorOperation::insert),
+ ExcMessage(
+ "Error: not all processors agree on the last "
+ "VectorOperation before this compress() call."));
+ }
}
AssertThrow(
Assert(local_rows.is_ascending_and_one_to_one(communicator),
ExcNotImplemented());
-# ifdef DEBUG
- {
- // check indexsets
- types::global_dof_index row_owners =
- Utilities::MPI::sum(local_rows.n_elements(), communicator);
- types::global_dof_index col_owners =
- Utilities::MPI::sum(local_columns.n_elements(), communicator);
- Assert(row_owners == sparsity_pattern.n_rows(),
- ExcMessage(
- std::string(
- "Each row has to be owned by exactly one owner (n_rows()=") +
- std::to_string(sparsity_pattern.n_rows()) +
- " but sum(local_rows.n_elements())=" +
- std::to_string(row_owners) + ")"));
- Assert(
- col_owners == sparsity_pattern.n_cols(),
- ExcMessage(
- std::string(
- "Each column has to be owned by exactly one owner (n_cols()=") +
- std::to_string(sparsity_pattern.n_cols()) +
- " but sum(local_columns.n_elements())=" +
- std::to_string(col_owners) + ")"));
- }
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // check indexsets
+ types::global_dof_index row_owners =
+ Utilities::MPI::sum(local_rows.n_elements(), communicator);
+ types::global_dof_index col_owners =
+ Utilities::MPI::sum(local_columns.n_elements(), communicator);
+ Assert(
+ row_owners == sparsity_pattern.n_rows(),
+ ExcMessage(
+ std::string(
+ "Each row has to be owned by exactly one owner (n_rows()=") +
+ std::to_string(sparsity_pattern.n_rows()) +
+ " but sum(local_rows.n_elements())=" +
+ std::to_string(row_owners) + ")"));
+ Assert(
+ col_owners == sparsity_pattern.n_cols(),
+ ExcMessage(
+ std::string(
+ "Each column has to be owned by exactly one owner (n_cols()=") +
+ std::to_string(sparsity_pattern.n_cols()) +
+ " but sum(local_columns.n_elements())=" +
+ std::to_string(col_owners) + ")"));
+ }
// create the matrix. We do not set row length but set the
Assert(local_rows.is_ascending_and_one_to_one(communicator),
ExcNotImplemented());
-# ifdef DEBUG
- {
- // check indexsets
- const types::global_dof_index row_owners =
- Utilities::MPI::sum(local_rows.n_elements(), communicator);
- const types::global_dof_index col_owners =
- Utilities::MPI::sum(local_columns.n_elements(), communicator);
- Assert(row_owners == sparsity_pattern.n_rows(),
- ExcMessage(
- std::string(
- "Each row has to be owned by exactly one owner (n_rows()=") +
- std::to_string(sparsity_pattern.n_rows()) +
- " but sum(local_rows.n_elements())=" +
- std::to_string(row_owners) + ")"));
- Assert(
- col_owners == sparsity_pattern.n_cols(),
- ExcMessage(
- std::string(
- "Each column has to be owned by exactly one owner (n_cols()=") +
- std::to_string(sparsity_pattern.n_cols()) +
- " but sum(local_columns.n_elements())=" +
- std::to_string(col_owners) + ")"));
- }
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // check indexsets
+ const types::global_dof_index row_owners =
+ Utilities::MPI::sum(local_rows.n_elements(), communicator);
+ const types::global_dof_index col_owners =
+ Utilities::MPI::sum(local_columns.n_elements(), communicator);
+ Assert(
+ row_owners == sparsity_pattern.n_rows(),
+ ExcMessage(
+ std::string(
+ "Each row has to be owned by exactly one owner (n_rows()=") +
+ std::to_string(sparsity_pattern.n_rows()) +
+ " but sum(local_rows.n_elements())=" +
+ std::to_string(row_owners) + ")"));
+ Assert(
+ col_owners == sparsity_pattern.n_cols(),
+ ExcMessage(
+ std::string(
+ "Each column has to be owned by exactly one owner (n_cols()=") +
+ std::to_string(sparsity_pattern.n_cols()) +
+ " but sum(local_columns.n_elements())=" +
+ std::to_string(col_owners) + ")"));
+ }
PetscErrorCode ierr;
// create the local to global mappings as arrays.
Assert(size() == n, ExcDimensionMismatch(size(), n));
-# ifdef DEBUG
- {
- // test ghost allocation in debug mode
- PetscInt begin, end;
+ if constexpr (running_in_debug_mode())
+ {
+ // test ghost allocation in debug mode
+ PetscInt begin, end;
- ierr = VecGetOwnershipRange(vector, &begin, &end);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
+ ierr = VecGetOwnershipRange(vector, &begin, &end);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
- AssertDimension(locally_owned_size,
- static_cast<size_type>(end - begin));
+ AssertDimension(locally_owned_size,
+ static_cast<size_type>(end - begin));
- Vec l;
- ierr = VecGhostGetLocalForm(vector, &l);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
+ Vec l;
+ ierr = VecGhostGetLocalForm(vector, &l);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
- PetscInt lsize;
- ierr = VecGetSize(l, &lsize);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
+ PetscInt lsize;
+ ierr = VecGetSize(l, &lsize);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
- ierr = VecGhostRestoreLocalForm(vector, &l);
- AssertThrow(ierr == 0, ExcPETScError(ierr));
+ ierr = VecGhostRestoreLocalForm(vector, &l);
+ AssertThrow(ierr == 0, ExcPETScError(ierr));
- AssertDimension(lsize,
- end - begin +
- static_cast<PetscInt>(ghost_indices.n_elements()));
- }
-# endif
+ AssertDimension(lsize,
+ end - begin +
+ static_cast<PetscInt>(ghost_indices.n_elements()));
+ }
}
"vectors."));
{
-# ifdef DEBUG
- // Check that all processors agree that last_action is the same (or none!)
-
- int my_int_last_action = last_action;
- int all_int_last_action;
-
- const int ierr = MPI_Allreduce(&my_int_last_action,
- &all_int_last_action,
- 1,
- MPI_INT,
- MPI_BOR,
- get_mpi_communicator());
- AssertThrowMPI(ierr);
-
- AssertThrow(all_int_last_action !=
- (VectorOperation::add | VectorOperation::insert),
- ExcMessage("Error: not all processors agree on the last "
- "VectorOperation before this compress() call."));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Check that all processors agree that last_action is the same (or
+ // none!)
+
+ int my_int_last_action = last_action;
+ int all_int_last_action;
+
+ const int ierr = MPI_Allreduce(&my_int_last_action,
+ &all_int_last_action,
+ 1,
+ MPI_INT,
+ MPI_BOR,
+ get_mpi_communicator());
+ AssertThrowMPI(ierr);
+
+ AssertThrow(all_int_last_action !=
+ (VectorOperation::add | VectorOperation::insert),
+ ExcMessage(
+ "Error: not all processors agree on the last "
+ "VectorOperation before this compress() call."));
+ }
}
AssertThrow(
const unsigned int this_mpi_process(
Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
-# ifdef DEBUG
- Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
- ExcMessage("All processes have to call routine with identical rank"));
- Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
- ExcMessage("All processes have to call routine with identical rank"));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
+ ExcMessage(
+ "All processes have to call routine with identical rank"));
+ Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
+ ExcMessage(
+ "All processes have to call routine with identical rank"));
+ }
// root process has to be active in the grid of A
if (this_mpi_process == rank)
const unsigned int this_mpi_process(
Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
-# ifdef DEBUG
- Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
- ExcMessage("All processes have to call routine with identical rank"));
- Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
- ExcMessage("All processes have to call routine with identical rank"));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
+ ExcMessage(
+ "All processes have to call routine with identical rank"));
+ Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
+ ExcMessage(
+ "All processes have to call routine with identical rank"));
+ }
if (this_mpi_process == rank)
{
}
current_neighbors.resize(write_index);
-#ifdef DEBUG
- for (const types::global_dof_index node : current_neighbors)
- Assert(touched_nodes[node] == available_node,
- ExcInternalError());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const types::global_dof_index node : current_neighbors)
+ Assert(touched_nodes[node] == available_node,
+ ExcInternalError());
+ }
// No more neighbors left -> terminate loop
if (current_neighbors.empty())
{
Assert(m() == n(), ExcNotQuadratic());
-# ifdef DEBUG
- // use operator() in debug mode because
- // it checks if this is a valid element
- // (in parallel)
- return operator()(i, i);
-# else
- // Trilinos doesn't seem to have a
- // more efficient way to access the
- // diagonal than by just using the
- // standard el(i,j) function.
- return el(i, i);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // use operator() in debug mode because
+ // it checks if this is a valid element
+ // (in parallel)
+ return operator()(i, i);
+ }
+ else
+ {
+ // Trilinos doesn't seem to have a
+ // more efficient way to access the
+ // diagonal than by just using the
+ // standard el(i,j) function.
+ return el(i, i);
+ }
}
col_indices);
col_value_ptr = values;
n_columns = n_cols;
-# ifdef DEBUG
- for (size_type j = 0; j < n_cols; ++j)
- AssertIsFinite(values[j]);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (size_type j = 0; j < n_cols; ++j)
+ AssertIsFinite(values[j]);
+ }
}
else
{
AssertThrow(ierr == 0, ExcTrilinosError(ierr));
}
-# ifdef DEBUG
- if (ierr > 0)
+ if constexpr (running_in_debug_mode())
{
- std::cout << "------------------------------------------" << std::endl;
- std::cout << "Got error " << ierr << " in row " << row << " of proc "
- << matrix->RowMap().Comm().MyPID()
- << " when trying to add the columns:" << std::endl;
- for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
- std::cout << col_index_ptr[i] << " ";
- std::cout << std::endl << std::endl;
- std::cout << "Matrix row "
- << (matrix->RowMap().MyGID(
- static_cast<TrilinosWrappers::types::int_type>(row)) ==
- false ?
- "(nonlocal part)" :
- "")
- << " has the following indices:" << std::endl;
- std::vector<TrilinosWrappers::types::int_type> indices;
- const Epetra_CrsGraph *graph =
- (nonlocal_matrix.get() != nullptr &&
- matrix->RowMap().MyGID(
- static_cast<TrilinosWrappers::types::int_type>(row)) == false) ?
- &nonlocal_matrix->Graph() :
- &matrix->Graph();
-
- indices.resize(graph->NumGlobalIndices(row));
- int n_indices = 0;
- graph->ExtractGlobalRowCopy(row,
- indices.size(),
- n_indices,
- indices.data());
- AssertDimension(n_indices, indices.size());
-
- for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
- std::cout << indices[i] << " ";
- std::cout << std::endl << std::endl;
- Assert(ierr <= 0, ExcAccessToNonPresentElement(row, col_index_ptr[0]));
+ if (ierr > 0)
+ {
+ std::cout << "------------------------------------------"
+ << std::endl;
+ std::cout << "Got error " << ierr << " in row " << row
+ << " of proc " << matrix->RowMap().Comm().MyPID()
+ << " when trying to add the columns:" << std::endl;
+ for (TrilinosWrappers::types::int_type i = 0; i < n_columns; ++i)
+ std::cout << col_index_ptr[i] << " ";
+ std::cout << std::endl << std::endl;
+ std::cout << "Matrix row "
+ << (matrix->RowMap().MyGID(
+ static_cast<TrilinosWrappers::types::int_type>(
+ row)) == false ?
+ "(nonlocal part)" :
+ "")
+ << " has the following indices:" << std::endl;
+ std::vector<TrilinosWrappers::types::int_type> indices;
+ const Epetra_CrsGraph *graph =
+ (nonlocal_matrix.get() != nullptr &&
+ matrix->RowMap().MyGID(
+ static_cast<TrilinosWrappers::types::int_type>(row)) ==
+ false) ?
+ &nonlocal_matrix->Graph() :
+ &matrix->Graph();
+
+ indices.resize(graph->NumGlobalIndices(row));
+ int n_indices = 0;
+ graph->ExtractGlobalRowCopy(row,
+ indices.size(),
+ n_indices,
+ indices.data());
+ AssertDimension(n_indices, indices.size());
+
+ for (TrilinosWrappers::types::int_type i = 0; i < n_indices; ++i)
+ std::cout << indices[i] << " ";
+ std::cout << std::endl << std::endl;
+ Assert(ierr <= 0,
+ ExcAccessToNonPresentElement(row, col_index_ptr[0]));
+ }
}
-# endif
Assert(ierr >= 0, ExcTrilinosError(ierr));
}
IndexSet nonlocal_partitioner = writable_rows;
AssertDimension(nonlocal_partitioner.size(),
row_parallel_partitioning.size());
-# ifdef DEBUG
- {
- IndexSet tmp = writable_rows & row_parallel_partitioning;
- Assert(tmp == row_parallel_partitioning,
- ExcMessage(
- "The set of writable rows passed to this method does not "
- "contain the locally owned rows, which is not allowed."));
- }
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ IndexSet tmp = writable_rows & row_parallel_partitioning;
+ Assert(tmp == row_parallel_partitioning,
+ ExcMessage(
+ "The set of writable rows passed to this method does not "
+ "contain the locally owned rows, which is not allowed."));
+ }
+ }
nonlocal_partitioner.subtract_set(row_parallel_partitioning);
if (Utilities::MPI::n_mpi_processes(communicator) > 1)
{
IndexSet nonlocal_partitioner = writable_rows;
AssertDimension(nonlocal_partitioner.size(),
row_parallel_partitioning.size());
-# ifdef DEBUG
- {
- IndexSet tmp = writable_rows & row_parallel_partitioning;
- Assert(tmp == row_parallel_partitioning,
- ExcMessage(
- "The set of writable rows passed to this method does not "
- "contain the locally owned rows, which is not allowed."));
- }
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ IndexSet tmp = writable_rows & row_parallel_partitioning;
+ Assert(tmp == row_parallel_partitioning,
+ ExcMessage(
+ "The set of writable rows passed to this method does not "
+ "contain the locally owned rows, which is not allowed."));
+ }
+ }
nonlocal_partitioner.subtract_set(row_parallel_partitioning);
if (Utilities::MPI::n_mpi_processes(communicator) > 1)
{
else
owned_elements = parallel_partitioner;
-# ifdef DEBUG
- const size_type n_elements_global =
- Utilities::MPI::sum(owned_elements.n_elements(), communicator);
+ if constexpr (running_in_debug_mode())
+ {
+ const size_type n_elements_global =
+ Utilities::MPI::sum(owned_elements.n_elements(), communicator);
- Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
-# endif
+ Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
+ }
last_action = Zero;
}
last_action = Insert;
}
-# ifdef DEBUG
- const Epetra_MpiComm *comm_ptr =
- dynamic_cast<const Epetra_MpiComm *>(&(v.vector->Comm()));
- Assert(comm_ptr != nullptr, ExcInternalError());
- const size_type n_elements_global =
- Utilities::MPI::sum(owned_elements.n_elements(), comm_ptr->Comm());
- Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ const Epetra_MpiComm *comm_ptr =
+ dynamic_cast<const Epetra_MpiComm *>(&(v.vector->Comm()));
+ Assert(comm_ptr != nullptr, ExcInternalError());
+ const size_type n_elements_global =
+ Utilities::MPI::sum(owned_elements.n_elements(), comm_ptr->Comm());
+ Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
+ }
}
}
else
vector = std::move(actual_vec);
-# ifdef DEBUG
- const Epetra_MpiComm *comm_ptr =
- dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
- Assert(comm_ptr != nullptr, ExcInternalError());
- const size_type n_elements_global =
- Utilities::MPI::sum(owned_elements.n_elements(), comm_ptr->Comm());
-
- Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ const Epetra_MpiComm *comm_ptr =
+ dynamic_cast<const Epetra_MpiComm *>(&(vector->Comm()));
+ Assert(comm_ptr != nullptr, ExcInternalError());
+ const size_type n_elements_global =
+ Utilities::MPI::sum(owned_elements.n_elements(), comm_ptr->Comm());
+
+ Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
+ }
}
last_action = Zero;
-# ifdef DEBUG
- const size_type n_elements_global =
- Utilities::MPI::sum(owned_elements.n_elements(), communicator);
+ if constexpr (running_in_debug_mode())
+ {
+ const size_type n_elements_global =
+ Utilities::MPI::sum(owned_elements.n_elements(), communicator);
- Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
-# endif
+ Assert(has_ghosts || n_elements_global == size(), ExcInternalError());
+ }
}
}
-# ifdef DEBUG
- // check that every process has decided to use the same mode. This will
- // otherwise result in undefined behavior in the call to
- // GlobalAssemble().
- const double double_mode = mode;
- const Epetra_MpiComm *comm_ptr =
- dynamic_cast<const Epetra_MpiComm *>(&(trilinos_partitioner().Comm()));
- Assert(comm_ptr != nullptr, ExcInternalError());
-
- const Utilities::MPI::MinMaxAvg result =
- Utilities::MPI::min_max_avg(double_mode, comm_ptr->GetMpiComm());
- Assert(result.max == result.min,
- ExcMessage(
- "Not all processors agree whether the last operation on "
- "this vector was an addition or a set operation. This will "
- "prevent the compress() operation from succeeding."));
-
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // check that every process has decided to use the same mode. This
+ // will otherwise result in undefined behavior in the call to
+ // GlobalAssemble().
+ const double double_mode = mode;
+ const Epetra_MpiComm *comm_ptr = dynamic_cast<const Epetra_MpiComm *>(
+ &(trilinos_partitioner().Comm()));
+ Assert(comm_ptr != nullptr, ExcInternalError());
+
+ const Utilities::MPI::MinMaxAvg result =
+ Utilities::MPI::min_max_avg(double_mode, comm_ptr->GetMpiComm());
+ Assert(result.max == result.min,
+ ExcMessage(
+ "Not all processors agree whether the last operation on "
+ "this vector was an addition or a set operation. This will "
+ "prevent the compress() operation from succeeding."));
+ }
// Now pass over the information about what we did last to the vector.
if (nonlocal_vector.get() == nullptr || mode != Add)
const unsigned int n_owned = (vector_partitioner->local_range().second -
vector_partitioner->local_range().first);
const std::size_t n_ghosts = ghost_dofs.size();
-#ifdef DEBUG
- for (const auto dof_index : dof_indices)
- AssertIndexRange(dof_index, n_owned + n_ghosts);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto dof_index : dof_indices)
+ AssertIndexRange(dof_index, n_owned + n_ghosts);
+ }
const unsigned int n_components = start_components.back();
std::vector<unsigned int> ghost_numbering(n_ghosts);
new_rowstart_plain.swap(row_starts_plain_indices);
new_hanging_node_constraint_masks.swap(hanging_node_constraint_masks);
-#ifdef DEBUG
- // sanity check 1: all indices should be smaller than the number of dofs
- // locally owned plus the number of ghosts
- const unsigned int index_range =
- (vector_partitioner->local_range().second -
- vector_partitioner->local_range().first) +
- vector_partitioner->ghost_indices().n_elements();
- for (const auto dof_index : dof_indices)
- AssertIndexRange(dof_index, index_range);
-
- // sanity check 2: for the constraint indicators, the first index should
- // be smaller than the number of indices in the row, and the second
- // index should be smaller than the number of constraints in the
- // constraint pool.
- for (unsigned int row = 0; row < task_info.cell_partition_data.back();
- ++row)
+ if constexpr (running_in_debug_mode())
{
- const unsigned int row_length_ind =
- row_starts[(row * vectorization_length + 1) * n_components].first -
- row_starts[row * vectorization_length * n_components].first;
- AssertIndexRange(
- row_starts[(row * vectorization_length + 1) * n_components].second,
- constraint_indicator.size() + 1);
- const std::pair<unsigned short, unsigned short> *
- con_it =
- constraint_indicator.data() +
- row_starts[row * vectorization_length * n_components].second,
- *end_con =
- constraint_indicator.data() +
- row_starts[(row * vectorization_length + 1) * n_components].second;
- for (; con_it != end_con; ++con_it)
+ // sanity check 1: all indices should be smaller than the number of
+ // dofs locally owned plus the number of ghosts
+ const unsigned int index_range =
+ (vector_partitioner->local_range().second -
+ vector_partitioner->local_range().first) +
+ vector_partitioner->ghost_indices().n_elements();
+ for (const auto dof_index : dof_indices)
+ AssertIndexRange(dof_index, index_range);
+
+ // sanity check 2: for the constraint indicators, the first index
+ // should be smaller than the number of indices in the row, and the
+ // second index should be smaller than the number of constraints in
+ // the constraint pool.
+ for (unsigned int row = 0; row < task_info.cell_partition_data.back();
+ ++row)
{
- AssertIndexRange(con_it->first, row_length_ind + 1);
- AssertIndexRange(con_it->second,
- constraint_pool_row_index.size() - 1);
+ const unsigned int row_length_ind =
+ row_starts[(row * vectorization_length + 1) * n_components]
+ .first -
+ row_starts[row * vectorization_length * n_components].first;
+ AssertIndexRange(
+ row_starts[(row * vectorization_length + 1) * n_components]
+ .second,
+ constraint_indicator.size() + 1);
+ const std::pair<unsigned short, unsigned short>
+ *con_it =
+ constraint_indicator.data() +
+ row_starts[row * vectorization_length * n_components].second,
+ *end_con =
+ constraint_indicator.data() +
+ row_starts[(row * vectorization_length + 1) * n_components]
+ .second;
+ for (; con_it != end_con; ++con_it)
+ {
+ AssertIndexRange(con_it->first, row_length_ind + 1);
+ AssertIndexRange(con_it->second,
+ constraint_pool_row_index.size() - 1);
+ }
}
- }
- // sanity check 3: check the number of cells once again
- unsigned int n_active_cells = 0;
- for (unsigned int c = 0; c < *(task_info.cell_partition_data.end() - 2);
- ++c)
- if (irregular_cells[c] > 0)
- n_active_cells += irregular_cells[c];
- else
- n_active_cells += vectorization_length;
- AssertDimension(n_active_cells, task_info.n_active_cells);
-#endif
+ // sanity check 3: check the number of cells once again
+ unsigned int n_active_cells = 0;
+ for (unsigned int c = 0;
+ c < *(task_info.cell_partition_data.end() - 2);
+ ++c)
+ if (irregular_cells[c] > 0)
+ n_active_cells += irregular_cells[c];
+ else
+ n_active_cells += vectorization_length;
+ AssertDimension(n_active_cells, task_info.n_active_cells);
+ }
compute_cell_index_compression(irregular_cells);
}
cell_partition_data.push_back(n_cell_batches + n_ghost_batches);
partition_row_index.back() = cell_partition_data.size() - 1;
-#ifdef DEBUG
- std::vector<unsigned int> renumber_cpy(renumbering);
- std::sort(renumber_cpy.begin(), renumber_cpy.end());
- for (unsigned int i = 0; i < renumber_cpy.size(); ++i)
- AssertDimension(i, renumber_cpy[i]);
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ std::vector<unsigned int> renumber_cpy(renumbering);
+ std::sort(renumber_cpy.begin(), renumber_cpy.end());
+ for (unsigned int i = 0; i < renumber_cpy.size(); ++i)
+ AssertDimension(i, renumber_cpy[i]);
+ }
}
partition_list = renumbering;
-#ifdef DEBUG
- // in debug mode, check that the partition color list is one-to-one
- {
- std::vector<unsigned int> sorted_pc_list(partition_color_list);
- std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
- for (unsigned int i = 0; i < sorted_pc_list.size(); ++i)
- Assert(sorted_pc_list[i] == i, ExcInternalError());
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ // in debug mode, check that the partition color list is one-to-one
+ {
+ std::vector<unsigned int> sorted_pc_list(partition_color_list);
+ std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
+ for (unsigned int i = 0; i < sorted_pc_list.size(); ++i)
+ Assert(sorted_pc_list[i] == i, ExcInternalError());
+ }
+ }
// set the start list for each block and compute the renumbering of
// cells
AssertDimension(counter_macro, n_cell_batches);
// check that the renumbering is one-to-one
-#ifdef DEBUG
- {
- std::vector<unsigned int> sorted_renumbering(renumbering);
- std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
- for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
- Assert(sorted_renumbering[i] == i, ExcInternalError());
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ std::vector<unsigned int> sorted_renumbering(renumbering);
+ std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
+ for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
+ Assert(sorted_renumbering[i] == i, ExcInternalError());
+ }
+ }
update_task_info(
partition_2layers_list);
}
- // in debug mode, check that the partition_2layers_list is one-to-one
-#ifdef DEBUG
- {
- std::vector<unsigned int> sorted_pc_list(partition_2layers_list);
- std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
- for (unsigned int i = 0; i < sorted_pc_list.size(); ++i)
- Assert(sorted_pc_list[i] == i, ExcInternalError());
- }
-#endif
+ // in debug mode, check that the partition_2layers_list is one-to-one
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ std::vector<unsigned int> sorted_pc_list(partition_2layers_list);
+ std::sort(sorted_pc_list.begin(), sorted_pc_list.end());
+ for (unsigned int i = 0; i < sorted_pc_list.size(); ++i)
+ Assert(sorted_pc_list[i] == i, ExcInternalError());
+ }
+ }
// Set the new renumbering
std::vector<unsigned int> renumbering_in(n_active_cells, 0);
AssertDimension(counter, n_active_cells);
AssertDimension(counter_macro, n_cell_batches);
// check that the renumbering is one-to-one
-#ifdef DEBUG
- {
- std::vector<unsigned int> sorted_renumbering(renumbering);
- std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
- for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
- Assert(sorted_renumbering[i] == i, ExcInternalError());
- }
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ {
+ std::vector<unsigned int> sorted_renumbering(renumbering);
+ std::sort(sorted_renumbering.begin(), sorted_renumbering.end());
+ for (unsigned int i = 0; i < sorted_renumbering.size(); ++i)
+ Assert(sorted_renumbering[i] == i, ExcInternalError());
+ }
+ }
}
// Update the task_info with the more information for the thread graph.
AssertThrowMPI(ierr);
requests.clear();
}
-# ifdef DEBUG
- // Make sure in debug mode, that everybody sent/received all packages
- // on this level. If a deadlock occurs here, the list of expected
- // senders is not computed correctly.
- const int ierr = MPI_Barrier(tria->get_mpi_communicator());
- AssertThrowMPI(ierr);
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ // Make sure in debug mode, that everybody sent/received all
+ // packages on this level. If a deadlock occurs here, the list of
+ // expected senders is not computed correctly.
+ const int ierr = MPI_Barrier(tria->get_mpi_communicator());
+ AssertThrowMPI(ierr);
+ }
}
#endif
dirichlet_indices.clear();
weights_on_refined.clear();
-#ifdef DEBUG
- if (mg_constrained_dofs)
+ if constexpr (running_in_debug_mode())
{
- const unsigned int n_levels =
- dof_handler.get_triangulation().n_global_levels();
-
- for (unsigned int l = 0; l < n_levels; ++l)
+ if (mg_constrained_dofs)
{
- const auto &constraints =
- mg_constrained_dofs->get_user_constraint_matrix(l);
-
- // no inhomogeneities are supported
- AssertDimension(constraints.n_inhomogeneities(), 0);
+ const unsigned int n_levels =
+ dof_handler.get_triangulation().n_global_levels();
- for (const auto dof : constraints.get_local_lines())
+ for (unsigned int l = 0; l < n_levels; ++l)
{
- const auto *entries_ptr =
- constraints.get_constraint_entries(dof);
+ const auto &constraints =
+ mg_constrained_dofs->get_user_constraint_matrix(l);
- if (entries_ptr == nullptr)
- continue;
+ // no inhomogeneities are supported
+ AssertDimension(constraints.n_inhomogeneities(), 0);
- // only homogeneous or identity constraints are supported
- Assert((entries_ptr->size() == 0) ||
- ((entries_ptr->size() == 1) &&
- (std::abs((*entries_ptr)[0].second - 1.) <
- 100 * std::numeric_limits<double>::epsilon())),
- ExcNotImplemented());
+ for (const auto dof : constraints.get_local_lines())
+ {
+ const auto *entries_ptr =
+ constraints.get_constraint_entries(dof);
+
+ if (entries_ptr == nullptr)
+ continue;
+
+ // only homogeneous or identity constraints are supported
+ Assert((entries_ptr->size() == 0) ||
+ ((entries_ptr->size() == 1) &&
+ (std::abs((*entries_ptr)[0].second - 1.) <
+ 100 * std::numeric_limits<double>::epsilon())),
+ ExcNotImplemented());
+ }
}
}
}
-#endif
// we collect all child DoFs of a mother cell together. For faster
// tensorized operations, we align the degrees of freedom
Assert(dst.n_blocks() == this->n_mg_blocks,
ExcDimensionMismatch(dst.n_blocks(), this->n_mg_blocks));
-#ifdef DEBUG
- if (this->mg_constrained_dofs != nullptr)
- Assert(this->mg_constrained_dofs->get_user_constraint_matrix(to_level - 1)
- .get_local_lines()
- .size() == 0,
- ExcNotImplemented());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (this->mg_constrained_dofs != nullptr)
+ Assert(this->mg_constrained_dofs
+ ->get_user_constraint_matrix(to_level - 1)
+ .get_local_lines()
+ .size() == 0,
+ ExcNotImplemented());
+ }
// Multiplicate with prolongation
// matrix, but only those blocks
Assert((to_level >= 1) && (to_level <= prolongation_matrices.size()),
ExcIndexRange(to_level, 1, prolongation_matrices.size() + 1));
-#ifdef DEBUG
- if (this->mg_constrained_dofs != nullptr)
- Assert(this->mg_constrained_dofs->get_user_constraint_matrix(to_level - 1)
- .get_local_lines()
- .size() == 0,
- ExcNotImplemented());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ if (this->mg_constrained_dofs != nullptr)
+ Assert(this->mg_constrained_dofs
+ ->get_user_constraint_matrix(to_level - 1)
+ .get_local_lines()
+ .size() == 0,
+ ExcNotImplemented());
+ }
prolongation_matrices[to_level - 1]
->block(selected_block, selected_block)
const auto &dh = *data_ptr->dof_handler;
-#ifdef DEBUG
- for (const auto &fe : dh.get_fe_collection())
- Assert(
- fe.n_base_elements() == 1,
- ExcMessage(
- "This class currently only supports scalar elements and elements "
- "with a single base element."));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &fe : dh.get_fe_collection())
+ Assert(
+ fe.n_base_elements() == 1,
+ ExcMessage(
+ "This class currently only supports scalar elements and elements "
+ "with a single base element."));
+ }
for (unsigned int comp = 0; comp < dh.get_fe_collection().n_components();
++comp)
const std::vector<std::string> &names,
const VectorType vector_type)
{
-#ifdef DEBUG
- // make sure this function is
- // not called after some parameter
- // values have already been
- // processed
- Assert(patches.empty(), ExcDataAlreadyAdded());
-
- // also make sure that no name is
- // used twice
- for (const auto &name : names)
+ if constexpr (running_in_debug_mode())
{
- for (const auto &data_set : dof_data)
- for (const auto &data_set_name : data_set.names)
- Assert(name != data_set_name, ExcNameAlreadyUsed(name));
+ // make sure this function is
+ // not called after some parameter
+ // values have already been
+ // processed
+ Assert(patches.empty(), ExcDataAlreadyAdded());
+
+ // also make sure that no name is
+ // used twice
+ for (const auto &name : names)
+ {
+ for (const auto &data_set : dof_data)
+ for (const auto &data_set_name : data_set.names)
+ Assert(name != data_set_name, ExcNameAlreadyUsed(name));
- for (const auto &data_set : cell_data)
- for (const auto &data_set_name : data_set.names)
- Assert(name != data_set_name, ExcNameAlreadyUsed(name));
+ for (const auto &data_set : cell_data)
+ for (const auto &data_set_name : data_set.names)
+ Assert(name != data_set_name, ExcNameAlreadyUsed(name));
+ }
}
-#endif
switch (vector_type)
{
// In case of coarsening, we need to find a suitable FE index
// for the parent cell. We choose the 'least dominant fe'
// on all children from the associated FECollection.
-#ifdef DEBUG
- for (const auto &child : cell->child_iterators())
- Assert(child->is_active() && child->coarsen_flag_set(),
- typename dealii::Triangulation<
- dim>::ExcInconsistentCoarseningFlags());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &child : cell->child_iterators())
+ Assert(child->is_active() && child->coarsen_flag_set(),
+ typename dealii::Triangulation<
+ dim>::ExcInconsistentCoarseningFlags());
+ }
fe_index = dealii::internal::hp::DoFHandlerImplementation::
dominated_future_fe_on_children<dim, spacedim>(cell);
n_dofs_old = dof_handler->n_dofs();
const unsigned int in_size = all_in.size();
-#ifdef DEBUG
- Assert(in_size != 0,
- ExcMessage("The array of input vectors you pass to this "
- "function has no elements. This is not useful."));
- for (unsigned int i = 0; i < in_size; ++i)
+ if constexpr (running_in_debug_mode())
{
- Assert(all_in[i].size() == n_dofs_old,
- ExcDimensionMismatch(all_in[i].size(), n_dofs_old));
+ Assert(in_size != 0,
+ ExcMessage("The array of input vectors you pass to this "
+ "function has no elements. This is not useful."));
+ for (unsigned int i = 0; i < in_size; ++i)
+ {
+ Assert(all_in[i].size() == n_dofs_old,
+ ExcDimensionMismatch(all_in[i].size(), n_dofs_old));
+ }
}
-#endif
// We need to access dof indices on the entire domain. For
// parallel::shared::Triangulations, ownership of cells might change. If
std::vector<VectorType> &all_out) const
{
const unsigned int size = all_in.size();
-#ifdef DEBUG
- Assert(prepared_for == coarsening_and_refinement, ExcNotPrepared());
- Assert(all_out.size() == size, ExcDimensionMismatch(all_out.size(), size));
- for (unsigned int i = 0; i < size; ++i)
- Assert(all_in[i].size() == n_dofs_old,
- ExcDimensionMismatch(all_in[i].size(), n_dofs_old));
- for (unsigned int i = 0; i < all_out.size(); ++i)
- Assert(all_out[i].size() == dof_handler->n_dofs(),
- ExcDimensionMismatch(all_out[i].size(), dof_handler->n_dofs()));
- for (unsigned int i = 0; i < size; ++i)
- for (unsigned int j = 0; j < size; ++j)
- Assert(&all_in[i] != &all_out[j],
- ExcMessage("Vectors cannot be used as input and output"
- " at the same time!"));
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ Assert(prepared_for == coarsening_and_refinement, ExcNotPrepared());
+ Assert(all_out.size() == size,
+ ExcDimensionMismatch(all_out.size(), size));
+ for (unsigned int i = 0; i < size; ++i)
+ Assert(all_in[i].size() == n_dofs_old,
+ ExcDimensionMismatch(all_in[i].size(), n_dofs_old));
+ for (unsigned int i = 0; i < all_out.size(); ++i)
+ Assert(all_out[i].size() == dof_handler->n_dofs(),
+ ExcDimensionMismatch(all_out[i].size(),
+ dof_handler->n_dofs()));
+ for (unsigned int i = 0; i < size; ++i)
+ for (unsigned int j = 0; j < size; ++j)
+ Assert(&all_in[i] != &all_out[j],
+ ExcMessage("Vectors cannot be used as input and output"
+ " at the same time!"));
+ }
// We need to access dof indices on the entire domain. For
// parallel::shared::Triangulations, ownership of cells might change. If
const Point<spacedim> &candidate) const
{
(void)surrounding_points;
-# ifdef DEBUG
- for (unsigned int i = 0; i < surrounding_points.size(); ++i)
- Assert(closest_point(sh, surrounding_points[i], tolerance)
- .distance(surrounding_points[i]) <
- std::max(tolerance * surrounding_points[i].norm(), tolerance),
- ExcPointNotOnManifold<spacedim>(surrounding_points[i]));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (unsigned int i = 0; i < surrounding_points.size(); ++i)
+ Assert(closest_point(sh, surrounding_points[i], tolerance)
+ .distance(surrounding_points[i]) <
+ std::max(tolerance * surrounding_points[i].norm(),
+ tolerance),
+ ExcPointNotOnManifold<spacedim>(surrounding_points[i]));
+ }
return closest_point(sh, candidate, tolerance);
}
const Point<spacedim> &candidate) const
{
(void)surrounding_points;
-# ifdef DEBUG
- for (unsigned int i = 0; i < surrounding_points.size(); ++i)
- Assert(closest_point(sh, surrounding_points[i], tolerance)
- .distance(surrounding_points[i]) <
- std::max(tolerance * surrounding_points[i].norm(), tolerance),
- ExcPointNotOnManifold<spacedim>(surrounding_points[i]));
-# endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (unsigned int i = 0; i < surrounding_points.size(); ++i)
+ Assert(closest_point(sh, surrounding_points[i], tolerance)
+ .distance(surrounding_points[i]) <
+ std::max(tolerance * surrounding_points[i].norm(),
+ tolerance),
+ ExcPointNotOnManifold<spacedim>(surrounding_points[i]));
+ }
return line_intersection(sh, candidate, direction, tolerance);
}
constexpr int spacedim = 3;
TopoDS_Shape out_shape;
Tensor<1, spacedim> average_normal;
-# ifdef DEBUG
- for (const auto &point : surrounding_points)
+ if constexpr (running_in_debug_mode())
{
- Assert(closest_point(sh, point, tolerance).distance(point) <
- std::max(tolerance * point.norm(), tolerance),
- ExcPointNotOnManifold<spacedim>(point));
+ for (const auto &point : surrounding_points)
+ {
+ Assert(closest_point(sh, point, tolerance).distance(point) <
+ std::max(tolerance * point.norm(), tolerance),
+ ExcPointNotOnManifold<spacedim>(point));
+ }
}
-# endif
switch (surrounding_points.size())
{
owned_particles_end->particles.empty(),
ExcInternalError());
-#ifdef DEBUG
- // check that no cache element hits the three anchor states in the list of
- // particles
- for (const auto &it : cells_to_particle_cache)
- Assert(it != particles.begin() && it != owned_particles_end &&
- it != --(particles.end()),
- ExcInternalError());
-
- // check that we only have locally owned particles in the first region of
- // cells; note that we skip the very first anchor element
- for (auto it = particle_container_owned_begin();
- it != particle_container_owned_end();
- ++it)
- Assert(it->cell->is_locally_owned(), ExcInternalError());
-
- // check that the cache is consistent with the iterators
- std::vector<typename particle_container::iterator> verify_cache(
- triangulation->n_active_cells(), particles.end());
- for (auto it = particles.begin(); it != particles.end(); ++it)
- if (!it->particles.empty())
- verify_cache[it->cell->active_cell_index()] = it;
+ if constexpr (running_in_debug_mode())
+ {
+ // check that no cache element hits the three anchor states in the list
+ // of particles
+ for (const auto &it : cells_to_particle_cache)
+ Assert(it != particles.begin() && it != owned_particles_end &&
+ it != --(particles.end()),
+ ExcInternalError());
- for (unsigned int i = 0; i < verify_cache.size(); ++i)
- Assert(verify_cache[i] == cells_to_particle_cache[i], ExcInternalError());
-#endif
+ // check that we only have locally owned particles in the first region
+ // of cells; note that we skip the very first anchor element
+ for (auto it = particle_container_owned_begin();
+ it != particle_container_owned_end();
+ ++it)
+ Assert(it->cell->is_locally_owned(), ExcInternalError());
+
+ // check that the cache is consistent with the iterators
+ std::vector<typename particle_container::iterator> verify_cache(
+ triangulation->n_active_cells(), particles.end());
+ for (auto it = particles.begin(); it != particles.end(); ++it)
+ if (!it->particles.empty())
+ verify_cache[it->cell->active_cell_index()] = it;
+
+ for (unsigned int i = 0; i < verify_cache.size(); ++i)
+ Assert(verify_cache[i] == cells_to_particle_cache[i],
+ ExcInternalError());
+ }
// now compute local result with the function above and then compute the
// collective results
if (!properties.empty())
{
AssertDimension(properties.size(), positions.size());
-#ifdef DEBUG
- for (const auto &p : properties)
- AssertDimension(p.size(), n_properties_per_particle());
-#endif
+ if constexpr (running_in_debug_mode())
+ {
+ for (const auto &p : properties)
+ AssertDimension(p.size(), n_properties_per_particle());
+ }
}
if (!ids.empty())