* std::map<std::string,double> constants;
* constants["pi"] = numbers::PI;
*
- * // TensorFunctionParser with 2+1 variables (space + time) in 2D of rank 2.
+ * // TensorFunctionParser with 2+1 variables (space + time) in 2d of rank 2.
* // It is necessary to tell the parser that there is an additional variable
* // to be taken into account (t).
* TensorFunctionParser<2,2> tfp;
template <int>
struct types;
- // these struct mimics p4est for 1D
+ // these struct mimics p4est for 1d
template <>
struct types<1>
{
if (polynomial_degree <= 1)
return output;
- // fill the 1D interior weights
+ // fill the 1d interior weights
QGaussLobatto<1> quadrature(polynomial_degree + 1);
output[0].reinit(polynomial_degree - 1,
GeometryInfo<1>::vertices_per_cell);
{
Connectivity<T> connectivity(dim, cell_t_id);
- CRS<T> temp1; // needed for 3D
+ CRS<T> temp1; // needed for 3d
if (dim == 1)
connectivity.entity_to_entities(1, 0) = con_cv;
// This specialization is defined here so that the general template in the
- // source file doesn't need to have further 1D overloads for the internal
+ // source file doesn't need to have further 1d overloads for the internal
// functions it calls.
template <>
inline Triangulation<1, 1>::DistortedCellList
{
using namespace internal::ProjectToObject;
// Try to use the special flat algorithm for quads (this is better
- // than the general algorithm in 3D). This does not take into account
+ // than the general algorithm in 3d). This does not take into account
// whether projected_point is outside the quad, but we optimize along
// lines below anyway:
const int dim = Iterator::AccessorType::dimension;
// function we can use gradient descent to minimize it.
//
// Of course, this is much simpler in the structdim = 1 case (we
- // could rewrite the projection as a 1D optimization problem), but
+ // could rewrite the projection as a 1d optimization problem), but
// to reduce the potential for bugs we use the same code in both
// cases.
const double step_size = object->diameter() / 64.0;
static std::array<unsigned int, 4>
get_line_indices_of_cell(const TriaAccessor<2, dim, spacedim> &cell)
{
- // For 2D cells the access cell->line_orientation() is already
+ // For 2d cells the access cell->line_orientation() is already
// efficient
std::array<unsigned int, 4> line_indices = {};
for (unsigned int line : cell.line_indices())
static std::array<bool, 4>
get_line_orientations_of_cell(const TriaAccessor<2, dim, spacedim> &cell)
{
- // For 2D cells the access cell->line_orientation() is already
+ // For 2d cells the access cell->line_orientation() is already
// efficient
std::array<bool, 4> line_orientations = {};
for (unsigned int line : cell.line_indices())
switch (structdim)
{
case 1:
- // no anisotropic refinement in 1D
+ // no anisotropic refinement in 1d
return child(i);
case 2:
const unsigned int n_dofs = fe.dofs_per_cell;
const unsigned int t_dofs = fetest.dofs_per_cell;
AssertDimension(fe.get_fe().n_components(), dim);
- // There should be the right number of components (3 in 3D, otherwise 1)
+ // There should be the right number of components (3 in 3d, otherwise 1)
// for the curl.
AssertDimension(fetest.get_fe().n_components(), (dim == 3) ? dim : 1);
AssertDimension(M.m(), t_dofs);
/* clang-format off */
// We are limiting the number of threads according to the
// following formulas:
- // - in 2D: `threads = cells * (k+1)^d <= 4*CUDAWrappers::warp_size`
- // - in 3D: `threads = cells * (k+1)^d <= 2*CUDAWrappers::warp_size`
+ // - in 2d: `threads = cells * (k+1)^d <= 4*CUDAWrappers::warp_size`
+ // - in 3d: `threads = cells * (k+1)^d <= 2*CUDAWrappers::warp_size`
return dim==2 ? (fe_degree==1 ? CUDAWrappers::warp_size : // 128
fe_degree==2 ? CUDAWrappers::warp_size/4 : // 72
fe_degree==3 ? CUDAWrappers::warp_size/8 : // 64
eval0.template hessians<0, true, false>(values_dofs,
hessians_quad);
- // advance the next component in 1D array
+ // advance the next component in 1d array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += n_q_points;
if (evaluation_flag & EvaluationFlags::values)
eval1.template values<1, true, false>(temp1, values_quad);
- // advance to the next component in 1D array
+ // advance to the next component in 1d array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += 2 * n_q_points;
if (evaluation_flag & EvaluationFlags::values)
eval2.template values<2, true, false>(temp2, values_quad);
- // advance to the next component in 1D array
+ // advance to the next component in 1d array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += 3 * n_q_points;
values_dofs);
}
- // advance to the next component in 1D array
+ // advance to the next component in 1d array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += n_q_points;
eval0.template gradients<0, false, true>(temp1, values_dofs);
}
- // advance to the next component in 1D array
+ // advance to the next component in 1d array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += 2 * n_q_points;
eval0.template values<0, false, true>(temp2, values_dofs);
}
- // advance to the next component in 1D array
+ // advance to the next component in 1d array
values_dofs += dofs_per_comp;
values_quad += n_q_points;
gradients_quad += 3 * n_q_points;
constraint_mask_sorted,
v);
- if (dim == 2) // 2D: only faces
+ if (dim == 2) // 2d: only faces
{
const bool subcell_x = (mask >> 0) & 1;
const bool subcell_y = (mask >> 1) & 1;
values); // face 1
}
}
- else if (dim == 3) // 3D faces and edges
+ else if (dim == 3) // 3d faces and edges
{
const bool type_x = (mask >> 0) & 1;
const bool type_y = (mask >> 1) & 1;
if (dim == 2)
{
if (edge > 0)
- return false; // in 2D there are no edge constraints
+ return false; // in 2d there are no edge constraints
if (subcell == 0 && face == 0)
return true; // no constraints
const unsigned int n_raw_lines = triangulation.n_raw_lines();
this->line_to_cells.resize(n_raw_lines);
- // In 3D, we can have DoFs on only an edge being constrained (e.g. in a
+ // In 3d, we can have DoFs on only an edge being constrained (e.g. in a
// cartesian 2x2x2 grid, where only the upper left 2 cells are refined).
// This sets up a helper data structure in the form of a mapping from
// edges (i.e. lines) to neighboring cells.
// the gradient of the inverse is given by (multidimensional calculus) -
// J * (J * L) * J (the third J is because we need to transform the
// gradient L from the unit to the real cell, and then apply the inverse
- // Jacobian). Compare this with 1D with j(x) = 1/k(phi(x)), where j =
+ // Jacobian). Compare this with 1d with j(x) = 1/k(phi(x)), where j =
// phi' is the inverse of the jacobian and k is the derivative of the
// jacobian on the unit cell. Then j' = phi' k'/k^2 = j k' j^2.
template <int dim, typename Number>
q_point,
0)[direction][direction];
- // evaluate basis functions on the two 1D subfaces (i.e.,
+ // evaluate basis functions on the two 1d subfaces (i.e.,
// at the positions divided by one half and shifted by one
// half, respectively) for hanging nodes
q_point[direction] *= 0.5;
q_point,
0)[direction][direction];
}
- // evaluate basis functions on the 1D faces, i.e., in zero and
+ // evaluate basis functions on the 1d faces, i.e., in zero and
// one
Point<dim> q_point = unit_point;
q_point[direction] = 0;
// note: we cannot write `univariate_shape_data.quadrature = quad`,
// since the quadrature rule within UnivariateShapeData expects
- // a 1D quadrature rule. However, in this case we are not able to
+ // a 1d quadrature rule. However, in this case we are not able to
// define that rule anyway so other code cannot use this information.
univariate_shape_data.fe_degree = fe.degree;
scalar_lexicographic,
lexicographic_numbering);
- // to evaluate 1D polynomials, evaluate along the line with the first
+ // to evaluate 1d polynomials, evaluate along the line with the first
// unit support point, assuming that fe.shape_value(0,unit_point) ==
- // 1. otherwise, need other entry point (e.g. generating a 1D element
+ // 1. otherwise, need other entry point (e.g. generating a 1d element
// by reading the name, as done before r29356)
if (fe.has_support_points())
unit_point = fe.get_unit_support_points()[scalar_lexicographic[0]];
shape_hessians[i * n_q_points_1d + q] =
fe.shape_grad_grad(my_i, q_point)[0][0];
- // evaluate basis functions on the two 1D subfaces (i.e., at the
+ // evaluate basis functions on the two 1d subfaces (i.e., at the
// positions divided by one half and shifted by one half,
// respectively)
q_point[0] *= 0.5;
fe.shape_grad_grad(my_i, q_point)[0][0];
}
- // evaluate basis functions on the 1D faces, i.e., in zero and one
+ // evaluate basis functions on the 1d faces, i.e., in zero and one
Point<dim> q_point = unit_point;
q_point[0] = 0;
shape_data_on_face[0][i] = fe.shape_value(my_i, q_point);
}
}
- // face orientation for faces in 3D
+ // face orientation for faces in 3d
// (similar to MappingInfoStorage::QuadratureDescriptor::initialize)
if (dim == 3)
{
case 1:
++in;
++out;
- // faces 2 and 3 in 3D use local coordinate system zx, which
+ // faces 2 and 3 in 3d use local coordinate system zx, which
// is the other way around compared to the tensor
// product. Need to take that into account.
if (dim == 3)
case 1:
++in;
++out;
- // faces 2 and 3 in 3D use local coordinate system zx, which
+ // faces 2 and 3 in 3d use local coordinate system zx, which
// is the other way around compared to the tensor
// product. Need to take that into account.
if (dim == 3)
- // In this case, the 1D shape values read (sorted lexicographically, rows
- // run over 1D dofs, columns over quadrature points):
+ // In this case, the 1d shape values read (sorted lexicographically, rows
+ // run over 1d dofs, columns over quadrature points):
// Q2 --> [ 0.687 0 -0.087 ]
// [ 0.4 1 0.4 ]
// [-0.087 0 0.687 ]
// For the specialized loop used for the gradient computation in
- // here, the 1D shape values read (sorted lexicographically, rows
- // run over 1D dofs, columns over quadrature points):
+ // here, the 1d shape values read (sorted lexicographically, rows
+ // run over 1d dofs, columns over quadrature points):
// Q2 --> [-2.549 -1 0.549 ]
// [ 3.098 0 -3.098 ]
// [-0.549 1 2.549 ]
case 1:
++in;
++out;
- // faces 2 and 3 in 3D use local coordinate system zx, which
+ // faces 2 and 3 in 3d use local coordinate system zx, which
// is the other way around compared to the tensor
// product. Need to take that into account.
if (dim == 3)
AssertIndexRange(n_shapes, 200);
dealii::ndarray<Number2, 200, 2, dim> shapes;
- // Evaluate 1D polynomials and their derivatives
+ // Evaluate 1d polynomials and their derivatives
std::array<Number2, dim> point;
for (unsigned int d = 0; d < dim; ++d)
point[d] = p[d];
AssertIndexRange(n_shapes, 200);
dealii::ndarray<Number2, 200, 3, dim> shapes;
- // Evaluate 1D polynomials and their derivatives
+ // Evaluate 1d polynomials and their derivatives
std::array<Number2, dim> point;
for (unsigned int d = 0; d < dim; ++d)
point[d] = p[d];
AssertIndexRange(n_shapes, 200);
dealii::ndarray<Number, 200, 2, dim> shapes;
- // Evaluate 1D polynomials and their derivatives
+ // Evaluate 1d polynomials and their derivatives
std::array<Number, dim> point;
for (unsigned int d = 0; d < dim; ++d)
point[d] = p[d];
const unsigned int patch_dim =
(info.face_number == numbers::invalid_unsigned_int) ? dim : (dim - 1);
const unsigned int row_length = n_points;
- // If patches are 1D, end the
+ // If patches are 1d, end the
// patch after a row, else end
// it after a square
const unsigned int row_length2 =
AssertIndexRange(0, n_overlap);
AssertThrow(is_dg == false, ExcNotImplemented());
- // 2) loop over all dimensions and create 1D mass and stiffness
+ // 2) loop over all dimensions and create 1d mass and stiffness
// matrices so that boundary conditions and overlap are considered
const unsigned int n_dofs_1D = M_ref.n();
std::vector<bool> & dofs_processed)
{
// This function computes the L2-projection of the given
- // boundary function on 3D edges and returns the constraints
+ // boundary function on 3d edges and returns the constraints
// associated with the edge functions for the given cell.
//
// In the context of this function, by associated DoFs we mean:
std::vector<bool> & dofs_processed)
{
// This function computes the L2-projection of the boundary
- // function on the interior of faces only. In 3D, this should only be
+ // function on the interior of faces only. In 3d, this should only be
// called after first calling compute_edge_projection_l2, as it relies on
// edge constraints which are found.
{
case 2:
// NOTE: This is very similar to compute_edge_projection as used in
- // 3D,
+ // 3d,
// and contains a lot of overlap with that function.
{
// Find the DoFs we want to constrain. There are degree+1 in
// \int_{edge} (tangential* boundary_value) * (tangential *
// edge_shape_function_i) dS.
//
- // In 2D, tangential*vector is equivalent to
+ // In 2d, tangential*vector is equivalent to
// cross_product_3d(normal, vector), so we use this instead.
// This avoids possible issues with the computation of the
// tangent.
// Storage for the linear system.
// There are 2*degree*(degree+1) DoFs associated with a face in
- // 3D. Note this doesn't include the DoFs associated with edges on
+ // 3d. Note this doesn't include the DoFs associated with edges on
// that face.
FullMatrix<number> face_matrix(2 * degree * (degree + 1));
FullMatrix<number> face_matrix_inv(2 * degree * (degree + 1));
AffineConstraints<number> & constraints,
const hp::MappingCollection<dim, dim> &mapping_collection)
{
- // L2-projection based interpolation formed in one (in 2D) or two (in 3D)
+ // L2-projection based interpolation formed in one (in 2d) or two (in 3d)
// steps.
//
- // In 2D we only need to constrain edge DoFs.
+ // In 2d we only need to constrain edge DoFs.
//
- // In 3D we need to constrain both edge and face DoFs. This is done in two
+ // In 3d we need to constrain both edge and face DoFs. This is done in two
// parts.
//
// For edges, since the face shape functions are zero here ("bubble
// functions"), we project the tangential component of the boundary
// function and compute the L2-projection. This returns the values for the
- // DoFs associated with each edge shape function. In 3D, this is computed
- // by internals::compute_edge_projection_l2, in 2D, it is handled by
+ // DoFs associated with each edge shape function. In 3d, this is computed
+ // by internals::compute_edge_projection_l2, in 2d, it is handled by
// compute_face_projection_curl_conforming_l2.
//
// For faces we compute the residual of the boundary function which is
}
// Compute the projection of the boundary function
- // on the edge. In 2D this is all that's required.
+ // on the edge. In 2d this is all that's required.
compute_face_projection_curl_conforming_l2(
cell,
face,
Assert(a1 > 0., ExcInternalError());
f_prev = f;
- // 1D line-search function
+ // 1d line-search function
const auto line_func =
[&](const Number &x_line) -> std::pair<Number, Number> {
x = x0;
for (unsigned int i = 0; i < size; ++i)
{
// ArborX assumes that the center coordinates and the radius use float
- // and the sphere is 3D
+ // and the sphere is 3d
spheres.emplace_back(std::make_pair(
dealii::Point<3, float>(
static_cast<float>(dim_spheres[i].first[0]),
std::size_t i)
{
// ArborX assumes that the point coordinates use float and that the point
- // is 3D
+ // is 3d
return {static_cast<float>(v[i][0]),
static_cast<float>(v[i][1]),
dim == 2 ? 0 : static_cast<float>(v[i][2])};
const dealii::Point<dim, Number> min_corner = boundary_points.first;
const dealii::Point<dim, Number> max_corner = boundary_points.second;
// ArborX assumes that the bounding box coordinates use float and that the
- // bounding box is 3D
+ // bounding box is 3d
return {{static_cast<float>(min_corner[0]),
static_cast<float>(min_corner[1]),
dim == 2 ? 0.f : static_cast<float>(min_corner[2])},
std::size_t i)
{
// ArborX assumes that the center coordinates and the radius use float and
- // the sphere is 3D
+ // the sphere is 3d
return {{static_cast<float>(v[i].first[0]),
static_cast<float>(v[i].first[1]),
dim == 2 ? 0 : static_cast<float>(v[i].first[2])},
{
unsigned int new_dim;
- // HDF5/XDMF output only supports 1D or 3D output, so force rearrangement if
+ // HDF5/XDMF output only supports 1d or 3d output, so force rearrangement if
// needed
if (flags.xdmf_hdf5_output && dimension != 1)
new_dim = 3;
#else
- // Tecplot binary output only good for 2D & 3D
+ // Tecplot binary output only good for 2d & 3d
if (dim == 1)
{
write_tecplot(patches, data_names, nonscalar_data_ranges, flags, out);
const unsigned int size = last_component - first_component + 1;
if (size == 1)
- // 1D, 1 element
+ // 1d, 1 element
{
vtk_data[0][0] = data_vectors(first_component, n);
}
else if (size == 4)
- // 2D, 4 elements
+ // 2d, 4 elements
{
for (unsigned int c = 0; c < size; ++c)
{
}
}
else if (size == 9)
- // 3D 9 elements
+ // 3d 9 elements
{
for (unsigned int c = 0; c < size; ++c)
{
for (i = 0; i < data_filter.n_data_sets(); ++i)
{
// Allocate space for the point data
- // Must be either 1D or 3D
+ // Must be either 1d or 3d
const unsigned int pt_data_vector_dim = data_filter.get_data_set_dim(i);
vector_name = data_filter.get_data_set_name(i);
<< attribute_dim.first << "\" AttributeType=\""
<< (attribute_dim.second > 1 ? "Vector" : "Scalar")
<< "\" Center=\"Node\">\n";
- // Vectors must have 3 elements even for 2D models
+ // Vectors must have 3 elements even for 2d models
ss << indent(indent_level + 2) << "<DataItem Dimensions=\"" << num_nodes
<< " " << (attribute_dim.second > 1 ? 3 : 1)
<< "\" NumberType=\"Float\" Precision=\"8\" Format=\"HDF\">\n";
const SymmetricTensor<2, dim> hessian = function.hessian(center, component);
// Deviation from function value at the center, based on the
- // Taylor-expansion: |f'| * dx + 1/2 * |f''| * dx^2, (in 1D). dx is half
+ // Taylor-expansion: |f'| * dx + 1/2 * |f''| * dx^2, (in 1d). dx is half
// the side-length of the box.
double taylor_bound_f = 0;
taylor_bound_f += std::abs(gradient[i]) * dx_i;
// Deviation from value of df/dx_i at the center,
- // |f''| * dx, (in 1D).
+ // |f''| * dx, (in 1d).
double taylor_bound_dfdxi = 0;
for (unsigned int j = 0; j < dim; ++j)
// we add ranges in a consecutive way, so fast), otherwise, we work with
// add_range(). the number 9 is chosen heuristically given the fact that
// there are typically up to 8 independent ranges when adding the degrees of
- // freedom on a 3D cell or 9 when adding degrees of freedom of faces. if
+ // freedom on a 3d cell or 9 when adding degrees of freedom of faces. if
// doing cell-by-cell additions, we want to avoid repeated calls to
// IndexSet::compress() which gets called upon merging two index sets, so we
// want to be in the other branch then.
get_degree(
const std::vector<typename BarycentricPolynomials<dim>::PolyType> &polys)
{
- // Since the first variable in a simplex polynomial is, e.g., in 2D,
+ // Since the first variable in a simplex polynomial is, e.g., in 2d,
//
// t0 = 1 - x - y
//
for (unsigned int d = 0; d < dim; ++d)
pols.push_back(bubble_shapes);
- // In 2D, the only q_ij polynomials we will use are 31,32,13,23
+ // In 2d, the only q_ij polynomials we will use are 31,32,13,23
// where ij corresponds to index (i-1)+3*(j-1) (2,5,6,7)
- // In 3D, the only q_ijk polynomials we will use are 331,332,313,323,133,233
+ // In 3d, the only q_ijk polynomials we will use are 331,332,313,323,133,233
// where ijk corresponds to index (i-1)+3*(j-1)+9*(k-1) (8,17,20,23,24,25)
return pols;
}
if (dim == 2 || dim == 3)
return dim * GeometryInfo<dim>::vertices_per_cell +
GeometryInfo<dim>::faces_per_cell;
- // 2*4+4=12 polynomials in 2D and 3*8+6=30 polynomials in 3D
+ // 2*4+4=12 polynomials in 2d and 3*8+6=30 polynomials in 3d
Assert(false, ExcNotImplemented());
return 0;
// optimal for Cholesky factorization. LQ and RQ factorizations take
// advantage of “tall” grids (Pr > Pc )
- // Below we always try to create 2D processor grids:
+ // Below we always try to create 2d processor grids:
const int n_processes = Utilities::MPI::n_mpi_processes(mpi_comm);
: QSimplex<dim>(Quadrature<dim>())
{
Assert(1 <= dim && dim <= 3, ExcNotImplemented());
- // Just use Gauss in 1D: this is a high-order open rule so this is a
+ // Just use Gauss in 1d: this is a high-order open rule so this is a
// reasonable equivalent for generic programming.
if (dim == 1)
{
b_point_permutations.push_back(all_permutations(b_point));
};
- // Apply a Barycentric permutation where two points (in 3D) are different.
+ // Apply a Barycentric permutation where two points (in 3d) are different.
// Equivalent to s22 in quadpy.
auto process_point_2 = [&](const double a, const double w) {
Assert(dim == 3, ExcInternalError());
case 2:
if (use_odd_order)
{
- // WV-1, 2D
+ // WV-1, 2d
b_point_permutations.push_back({centroid});
b_weights.push_back(1.0000000000000000e+00);
}
else
{
- // WV-2, 2D
+ // WV-2, 2d
process_point_1(1.6666666666666669e-01,
3.3333333333333331e-01);
}
case 3:
if (use_odd_order)
{
- // WV-1, 3D
+ // WV-1, 3d
b_point_permutations.push_back({centroid});
b_weights.push_back(1.0000000000000000e+00);
}
else
{
- // WV-2, 3D
+ // WV-2, 3d
process_point_1(1.3819660112501050e-01,
2.5000000000000000e-01);
}
switch (dim)
{
case 2:
- // WV-4 in both cases (no WV-3 in 2D)
+ // WV-4 in both cases (no WV-3 in 2d)
process_point_1(9.1576213509770743e-02, 1.0995174365532187e-01);
process_point_1(4.4594849091596489e-01, 2.2338158967801147e-01);
break;
case 3:
if (use_odd_order)
{
- // WV-3, 3D
+ // WV-3, 3d
process_point_1(3.2816330251638171e-01,
1.3621784253708741e-01);
process_point_1(1.0804724989842859e-01,
}
else
{
- // WV-5 (no WV-4 in 3D)
+ // WV-5 (no WV-4 in 3d)
Quadrature<dim>::operator=(QWitherdenVincentSimplex<dim>(3));
}
break;
case 2:
if (use_odd_order)
{
- // WV-5, 2D
+ // WV-5, 2d
b_point_permutations.push_back({centroid});
b_weights.push_back(2.2500000000000001e-01);
process_point_1(1.0128650732345634e-01,
}
else
{
- // WV-6, 2D
+ // WV-6, 2d
process_point_1(6.3089014491502227e-02,
5.0844906370206819e-02);
process_point_1(2.4928674517091043e-01,
case 3:
if (use_odd_order)
{
- // WV-5, 3D
+ // WV-5, 3d
process_point_1(3.1088591926330061e-01,
1.1268792571801590e-01);
process_point_1(9.2735250310891248e-02,
}
else
{
- // WV-6, 3D
+ // WV-6, 3d
process_point_1(4.0673958534611372e-02,
1.0077211055320640e-02);
process_point_1(3.2233789014227548e-01,
case 2:
if (use_odd_order)
{
- // WV-7, 2D
+ // WV-7, 2d
process_point_1(3.3730648554587850e-02,
1.6545050110792131e-02);
process_point_1(4.7430969250471822e-01,
}
else
{
- // WV-8, 2D
+ // WV-8, 2d
b_point_permutations.push_back({centroid});
b_weights.push_back(1.4431560767778717e-01);
process_point_1(5.0547228317030957e-02,
case 3:
if (use_odd_order)
{
- // WV-7, 3D
+ // WV-7, 3d
b_point_permutations.push_back({centroid});
b_weights.push_back(9.5485289464130846e-02);
process_point_1(3.1570114977820279e-01,
}
else
{
- // WV-8, 3D
+ // WV-8, 3d
process_point_1(1.0795272496221089e-01,
2.6426650908408830e-02);
process_point_1(1.8510948778258660e-01,
case 2:
if (use_odd_order)
{
- // WV-9, 2D
+ // WV-9, 2d
b_point_permutations.push_back({centroid});
b_weights.push_back(9.7135796282798836e-02);
process_point_1(4.4729513394452691e-02,
}
else
{
- // WV-10, 2D
+ // WV-10, 2d
b_point_permutations.push_back({centroid});
b_weights.push_back(8.1743329146285973e-02);
process_point_1(3.2055373216943517e-02,
case 3:
if (use_odd_order)
{
- // WV-9, 3D
+ // WV-9, 3d
b_point_permutations.push_back({centroid});
b_weights.push_back(5.8010548912480253e-02);
process_point_1(6.1981697552226933e-10,
}
else
{
- // WV-10, 3D
+ // WV-10, 3d
b_point_permutations.push_back({centroid});
b_weights.push_back(4.7399773556020743e-02);
process_point_1(3.1225006869518868e-01,
}
break;
case 6:
- // There is no WV-11 rule in 3D yet
+ // There is no WV-11 rule in 3d yet
Assert(dim == 2, ExcNotImplemented());
if (use_odd_order)
{
- // WV-11, 2D
+ // WV-11, 2d
b_point_permutations.push_back({centroid});
b_weights.push_back(8.5761179732224219e-02);
process_point_1(2.8485417614371900e-02, 1.0431870512894697e-02);
}
else
{
- // WV-12, 2D
+ // WV-12, 2d
process_point_1(2.4646363436335583e-02, 7.9316425099736389e-03);
process_point_1(4.8820375094554153e-01, 2.4266838081452032e-02);
process_point_1(1.0925782765935427e-01, 2.8486052068877544e-02);
}
break;
case 7:
- // There is no WV-13 rule in 3D yet
+ // There is no WV-13 rule in 3d yet
Assert(dim == 2, ExcNotImplemented());
if (use_odd_order)
{
- // WV-13, 2D
+ // WV-13, 2d
b_point_permutations.push_back({centroid});
b_weights.push_back(6.7960036586831640e-02);
process_point_1(2.1509681108843159e-02, 6.0523371035391717e-03);
}
else
{
- // WV-14, 2D
+ // WV-14, 2d
process_point_1(1.9390961248701044e-02, 4.9234036024000819e-03);
process_point_1(6.1799883090872587e-02, 1.4433699669776668e-02);
process_point_1(4.8896391036217862e-01, 2.1883581369428889e-02);
if (update_4th_derivatives)
n_values_and_derivatives = 5;
- // Compute the values (and derivatives, if necessary) of all 1D polynomials
+ // Compute the values (and derivatives, if necessary) of all 1d polynomials
// at this evaluation point. We need to compute dim*n_polynomials
// evaluations, involving an evaluation of each polynomial for each
// coordinate direction. Once we have those values, we perform the
// (Zoltan_HSFC_InvHilbertXd)
// https://github.com/aditi137/Hilbert/blob/master/Hilbert/hilbert.cpp
- // now we can map to 1D coordinate stored in Transpose format
+ // now we can map to 1d coordinate stored in Transpose format
// adopt AxestoTranspose function from the paper, that
// transforms in-place between geometrical axes and Hilbert transpose.
// Example: b=5 bits for each of n=3 coordinates.
// Collection of utilities that compute intersection between simplices
// identified by array of points. The return type is the one of
// CGAL::intersection(), i.e. a std_cxx17::optional<std_cxx17::variant<>>.
- // Intersection between 2D and 3D objects and 1D/3D objects are available
+ // Intersection between 2d and 3d objects and 1d/3d objects are available
// only with CGAL versions greater or equal than 5.5, hence the
// corresponding functions are guarded by #ifdef directives. All the
// signatures follow the convection that the first entity has an intrinsic
// periodicity, e.g. when finding the maximum cell level around a
// vertex.
//
- // Example: On a 3D cell with vertices numbered from 0 to 7 and
+ // Example: On a 3d cell with vertices numbered from 0 to 7 and
// periodic boundary conditions in x direction, the vector
// topological_vertex_numbering will contain the numbers
// {0,0,2,2,4,4,6,6} (because the vertex pairs {0,1}, {2,3}, {4,5},
// mentioned above)
// TODO: The check for 'dim==2' was inserted by intuition. It
// fixes
- // the previous problems with step-27 in 3D. But an
+ // the previous problems with step-27 in 3d. But an
// explanation for this is still required, and what we do here
// is not what we describe in the paper!.
if ((unique_sets_of_dofs == 2) && (dim == 2))
// mentioned above)
// TODO: The check for 'dim==2' was inserted by intuition. It
// fixes
- // the previous problems with step-27 in 3D. But an
+ // the previous problems with step-27 in 3d. But an
// explanation for this is still required, and what we do here
// is not what we describe in the paper!.
if ((unique_sets_of_dofs == 2) && (dim == 2))
{
const auto &r = dofs_on_objects[a];
if (a == 10 || a == 16)
- // switch order x-z for y faces in 3D to lexicographic
+ // switch order x-z for y faces in 3d to lexicographic
// layout
for (unsigned int i1 = 0; i1 < nn; ++i1)
for (unsigned int i0 = 0; i0 < nn; ++i0)
// now add the DoF on the adjacent ghost cells to the IndexSet
- // Note: For certain meshes (in particular in 3D and with many
+ // Note: For certain meshes (in particular in 3d and with many
// processors), it is really necessary to cache intermediate data. After
// trying several objects such as std::set, a vector that is always kept
// sorted, and a vector that is initially unsorted and sorted once at the
// add the DoF on the adjacent ghost cells to the IndexSet
- // Note: For certain meshes (in particular in 3D and with many
+ // Note: For certain meshes (in particular in 3d and with many
// processors), it is really necessary to cache intermediate data. After
// trying several objects such as std::set, a vector that is always kept
// sorted, and a vector that is initially unsorted and sorted once at the
Assert(!face_1->has_children(), ExcInternalError());
// Important note:
- // In 3D we have to take care of the fact that face_rotation gives
+ // In 3d we have to take care of the fact that face_rotation gives
// the relative rotation of face_1 to face_2, i.e. we have to invert
// the rotation when constraining face_2 to face_1. Therefore
// face_flip has to be toggled if face_rotation is true: In case of
cell->neighbor_face_no(face_n);
- // In 1D, go straight to the cell behind this
+ // In 1d, go straight to the cell behind this
// particular cell's most terminal cell. This makes us
// skip the if (neighbor->has_children()) section
// below. We need to do this since we otherwise
// iterate over the children of the face, which are
- // always 0 in 1D.
+ // always 0 in 1d.
if (dim == 1)
while (neighbor->has_children())
neighbor = neighbor->child(face_n == 0 ? 1 : 0);
neighbor->is_locally_owned())
continue; // (the neighbor is finer)
- // In 1D, go straight to the cell behind this
+ // In 1d, go straight to the cell behind this
// particular cell's most terminal cell. This makes us
// skip the if (neighbor->has_children()) section
// below. We need to do this since we otherwise
// iterate over the children of the face, which are
- // always 0 in 1D.
+ // always 0 in 1d.
if (dim == 1)
while (neighbor->has_children())
neighbor = neighbor->child(face == 0 ? 1 : 0);
const bool face_flip,
const bool face_rotation) const
{
- // general template for 1D and 2D: not
+ // general template for 1d and 2d: not
// implemented. in fact, the function
// shouldn't even be called unless we are
// in 3d, so throw an internal error
const unsigned int index,
const bool line_orientation) const
{
- // general template for 1D and 2D: do
+ // general template for 1d and 2d: do
// nothing. Do not throw an Assertion,
// however, in order to allow to call this
- // function in 2D as well
+ // function in 2d as well
if (dim < 3)
return index;
void
FE_ABF<dim>::initialize_quad_dof_index_permutation_and_sign_change()
{
- // for 1D and 2D, do nothing
+ // for 1d and 2d, do nothing
if (dim < 3)
return;
// Additional functionality for the ABF elements
// TODO: Here the canonical extension of the principle
// behind the ABF elements is implemented. It is unclear,
- // if this really leads to the ABF spaces in 3D!
+ // if this really leads to the ABF spaces in 3d!
interior_weights_abf.reinit(TableIndices<3>(cell_quadrature.size(),
polynomials_abf[0]->n() * dim,
dim));
for (unsigned int fp = 0; fp < n_face_points; ++fp)
{
// TODO: Check what the face_orientation, face_flip and face_rotation
- // have to be in 3D
+ // have to be in 3d
unsigned int k = QProjector<dim>::DataSetDescriptor::face(
this->reference_cell(), face, false, false, false, n_face_points);
for (unsigned int i = 0; i < boundary_weights_abf.size(1); ++i)
void
FE_BDM<dim>::initialize_quad_dof_index_permutation_and_sign_change()
{
- // for 1D and 2D, do nothing
+ // for 1d and 2d, do nothing
if (dim < 3)
return;
unsigned int pbase = 0;
for (auto f : GeometryInfo<dim>::face_indices())
{
- // Old version with no moments in 2D. See comment below in
+ // Old version with no moments in 2d. See comment below in
// initialize_support_points()
if (test_values_face.size() == 0)
{
// cell. First on the faces, we have to test polynomials of degree
// up to deg, which means we need dg+1 points in each direction. The
// fact that we do not have tensor product polynomials will be
- // considered later. In 2D, we can use point values.
+ // considered later. In 2d, we can use point values.
QGauss<dim - 1> face_points(deg + 1);
// TODO: the implementation makes the assumption that all faces have the
this->n_dofs_per_face(face_no)));
// Currently, for backward compatibility, we do not use moments, but
- // point values on faces in 2D. In 3D, this is impossible, since the
+ // point values on faces in 2d. In 3d, this is impossible, since the
// moments are only taken with respect to PolynomialsP.
if (dim > 2)
internal::FE_BDM::initialize_test_values(test_values_face,
void
FE_BernardiRaugel<dim>::initialize_quad_dof_index_permutation_and_sign_change()
{
- // for 1D and 2D, do nothing
+ // for 1d and 2d, do nothing
if (dim < 3)
return;
// compute the number of unknowns per cell interior/face/edge
//
// there are <tt>dim</tt> degrees of freedom per vertex and there
- // is 1 degree of freedom per edge in 2D (face in 3D)
+ // is 1 degree of freedom per edge in 2d (face in 3d)
std::vector<unsigned int> dpo(dim + 1, 0u);
dpo[0] = dim;
dpo[dim - 1] = 1u;
// support points.
// TODO: Verify that all faces are the same with respect to
// these support points. Furthermore, check if something has to
- // be done for the face orientation flag in 3D.
+ // be done for the face orientation flag in 3d.
const Quadrature<dim> subface_quadrature =
subface == numbers::invalid_unsigned_int ?
QProjector<dim>::project_to_face(this->reference_cell(),
void
FE_Nedelec<dim>::initialize_quad_dof_index_permutation_and_sign_change()
{
- // for 1D and 2D, do nothing
+ // for 1d and 2d, do nothing
if (dim < 3)
return;
// For a given cell we have:
// n_line_dofs = dofs_per_line*lines_per_cell.
// n_face_dofs = dofs_per_face*faces_per_cell.
- // n_cell_dofs = dofs_per_quad (2D)
- // = dofs_per_hex (3D)
+ // n_cell_dofs = dofs_per_quad (2d)
+ // = dofs_per_hex (3d)
//
// i.e. For the local dof numbering:
// the first line dof is 0,
// This is simple enough as there is only 1 lowest order and
// degree higher orders DoFs per line.
//
- // On a 2D cell, we have 3 types: Type 1/2/3:
+ // On a 2d cell, we have 3 types: Type 1/2/3:
// - The ordering done by type:
// - Type 1: 0 <= i1,j1 < degree. degree^2 in total.
// Numbered: ij1 = i1 + j1*(degree). i.e. cell_dof_index
// sigma_imj_sign[i][j].
//
// Note that not every i,j combination is a valid edge (there are only
- // 12 valid edges in 3D), but we compute them all as it simplifies
+ // 12 valid edges in 3d), but we compute them all as it simplifies
// things.
// store the sign of each component x, y, z in the sigma list.
// This function handles the cell-dependent construction of the EDGE-based
// shape functions.
//
- // Note it will handle both 2D and 3D, in 2D, the edges are faces, but we
+ // Note it will handle both 2d and 3d, in 2d, the edges are faces, but we
// handle them here.
//
// It will fill in the missing parts of fe_data which were not possible to
// This function handles the cell-dependent construction of the FACE-based
// shape functions.
//
- // Note that it should only be called in 3D.
+ // Note that it should only be called in 3d.
Assert(dim == 3, ExcDimensionMismatch(dim, 3));
//
// It will fill in the missing parts of fe_data which were not possible to
AssertDimension(quadrature.size(), 1);
// Note for future improvement:
- // We don't have the full quadrature - should use QProjector to create the 2D
+ // We don't have the full quadrature - should use QProjector to create the 2d
// quadrature.
//
// For now I am effectively generating all of the shape function vals/grads,
// where the objects inside the vector refer to:
// 0 = vertex
// 1 = edge
- // 2 = face (which is a cell in 2D)
+ // 2 = face (which is a cell in 2d)
// 3 = cell
std::vector<unsigned int> dpo;
const std::vector<MappingKind> &,
std::vector<double> &)
{
- // Nothing to do in 1D.
+ // Nothing to do in 1d.
}
// TODO: This function is not a consistent fix of the orientation issue
- // like in 3D. It is rather kept not to break legacy behavior in 2D but
+ // like in 3d. It is rather kept not to break legacy behavior in 2d but
// should be replaced. See also the implementation of
// FE_RaviartThomas<dim>::initialize_quad_dof_index_permutation_and_sign_change()
// or other H(div) conforming elements such as FE_ABF<dim> and
const std::vector<MappingKind> & /*mapping_kind*/,
std::vector<double> & /*face_sign*/)
{
- // Nothing to do. In 3D we take care of it through the
+ // Nothing to do. In 3d we take care of it through the
// adjust_quad_dof_sign_for_face_orientation_table
}
const bool face_flip,
const bool face_rotation) const
{
- // do nothing in 1D and 2D
+ // do nothing in 1d and 2d
if (dim < 3)
return false;
this->mapping_kind,
fe_data.dof_sign_change);
- // TODO: This, similarly to the Nedelec case, is just a legacy function in 2D
- // and affects only face_dofs of H(div) conformal FEs. It does nothing in 1D.
- // Also nothing in 3D since we take care of it by using the
+ // TODO: This, similarly to the Nedelec case, is just a legacy function in 2d
+ // and affects only face_dofs of H(div) conformal FEs. It does nothing in 1d.
+ // Also nothing in 3d since we take care of it by using the
// adjust_quad_dof_sign_for_face_orientation_table.
internal::FE_PolyTensor::get_dof_sign_change_h_div(cell,
*this,
this->mapping_kind,
fe_data.dof_sign_change);
- // TODO: This, similarly to the Nedelec case, is just a legacy function in 2D
- // and affects only face_dofs of H(div) conformal FEs. It does nothing in 1D.
- // Also nothing in 3D since we take care of it by using the
+ // TODO: This, similarly to the Nedelec case, is just a legacy function in 2d
+ // and affects only face_dofs of H(div) conformal FEs. It does nothing in 1d.
+ // Also nothing in 3d since we take care of it by using the
// adjust_quad_dof_sign_for_face_orientation_table.
internal::FE_PolyTensor::get_dof_sign_change_h_div(cell,
*this,
this->mapping_kind,
fe_data.dof_sign_change);
- // TODO: This, similarly to the Nedelec case, is just a legacy function in 2D
- // and affects only face_dofs of H(div) conformal FEs. It does nothing in 1D.
- // Also nothing in 3D since we take care of it by using the
+ // TODO: This, similarly to the Nedelec case, is just a legacy function in 2d
+ // and affects only face_dofs of H(div) conformal FEs. It does nothing in 1d.
+ // Also nothing in 3d since we take care of it by using the
// adjust_quad_dof_sign_for_face_orientation_table.
internal::FE_PolyTensor::get_dof_sign_change_h_div(cell,
*this,
// difference could be attributed to FP errors, as it was in the
// range of 1.0e-16. These errors originate in the loss of
// symmetry in the FP approximation of the shape-functions.
- // Considering a 3rd order shape function in 1D, we have
+ // Considering a 3rd order shape function in 1d, we have
// N0(x)=N3(1-x) and N1(x)=N2(1-x). For higher order polynomials
// the FP approximations of the shape functions do not satisfy
// these equations any more! Thus in the following code
// support points.
// TODO: Verify that all faces are the same with respect to
// these support points. Furthermore, check if something has to
- // be done for the face orientation flag in 3D.
+ // be done for the face orientation flag in 3d.
const Quadrature<dim> subface_quadrature =
subface == numbers::invalid_unsigned_int ?
QProjector<dim>::project_to_face(this->reference_cell(),
this->unit_face_support_points[face_no].resize(
Utilities::fixed_power<dim - 1>(q_degree + 1));
- // In 1D, there is only one 0-dimensional support point, so there is nothing
+ // In 1d, there is only one 0-dimensional support point, so there is nothing
// more to be done.
if (dim == 1)
return;
void
FE_Q_Base<dim, spacedim>::initialize_quad_dof_index_permutation()
{
- // for 1D and 2D, do nothing
+ // for 1d and 2d, do nothing
if (dim < 3)
return;
#endif
// to efficiently evaluate the polynomial at the subcell, make use of
- // the tensor product structure of this element and only evaluate 1D
+ // the tensor product structure of this element and only evaluate 1d
// information from the polynomial. This makes the cost of this function
// almost negligible also for high order elements
const unsigned int dofs1d = q_degree + 1;
}
}
- // now expand from 1D info. block innermost dimension (x_0) in order to
+ // now expand from 1d info. block innermost dimension (x_0) in order to
// avoid difficult checks at innermost loop
unsigned int j_indices[dim];
internal::FE_Q_Base::zero_indices<dim>(j_indices);
// same logic as in initialize_embedding to evaluate the
// polynomial faster than from the tensor product: since we
// evaluate all polynomials, it is much faster to just compute
- // the 1D values for all polynomials before and then get the
+ // the 1d values for all polynomials before and then get the
// dim-data.
for (unsigned int j = 0; j < dofs1d; ++j)
for (unsigned int d = 0; d < dim; ++d)
void
FE_RaviartThomas<dim>::initialize_quad_dof_index_permutation_and_sign_change()
{
- // For 1D do nothing.
+ // For 1d do nothing.
//
- // TODO: For 2D we simply keep the legacy behavior for now. This should be
- // changed in the future and can be taken care of by similar means as the 3D
+ // TODO: For 2d we simply keep the legacy behavior for now. This should be
+ // changed in the future and can be taken care of by similar means as the 3d
// case below. The legacy behavior can be found in fe_poly_tensor.cc in the
// function internal::FE_PolyTensor::get_dof_sign_change_h_div(...)
if (dim < 3)
this->n_dofs_per_quad(face_no),
ExcInternalError());
- // The 3D RaviartThomas space has tensor_degree*tensor_degree face dofs
+ // The 3d RaviartThomas space has tensor_degree*tensor_degree face dofs
const unsigned int n = this->tensor_degree();
Assert(n * n == this->n_dofs_per_quad(face_no), ExcInternalError());
// vector refer to:
// 0 = vertex
// 1 = edge
- // 2 = face (which is a cell in 2D)
+ // 2 = face (which is a cell in 2d)
// 3 = cell
std::vector<unsigned int>
get_rt_dpo_vector(const unsigned int dim, const unsigned int degree)
FE_RaviartThomasNodal<
dim>::initialize_quad_dof_index_permutation_and_sign_change()
{
- // for 1D and 2D, do nothing
+ // for 1d and 2d, do nothing
if (dim < 3)
return;
void
FE_RT_Bubbles<dim>::initialize_quad_dof_index_permutation_and_sign_change()
{
- // for 1D and 2D, do nothing
+ // for 1d and 2d, do nothing
if (dim < 3)
return;
if (conformity == FiniteElementData<dim>::Conformity::L2)
return {};
- // this concept doesn't exist in 1D so just return an empty vector
+ // this concept doesn't exist in 1d so just return an empty vector
if (dim == 1)
return {};
{
const auto fe_p =
BarycentricPolynomials<dim>::get_fe_p_basis(degree);
- // no further work is needed in 1D
+ // no further work is needed in 1d
if (dim == 1)
return fe_p;
- // in 2D and 3D we add a centroid bubble function
+ // in 2d and 3d we add a centroid bubble function
auto c_bubble = BarycentricPolynomial<dim>() + 1;
for (const auto &vertex : reference_cell.vertex_indices())
c_bubble = c_bubble * M(vertex);
AssertIndexRange(face_index, GeometryInfo<dim>::faces_per_cell);
// FE_TraceQ shares the numbering of elemental degrees of freedom with FE_Q
- // except for the missing interior ones (quad dofs in 2D and hex dofs in
- // 3D). Therefore, it is safe to ask fe_q for the corresponding
+ // except for the missing interior ones (quad dofs in 2d and hex dofs in
+ // 3d). Therefore, it is safe to ask fe_q for the corresponding
// information. The assertion 'shape_index < this->n_dofs_per_cell()' will
// make sure that we only access the trace dofs.
return fe_q.has_support_on_face(shape_index, face_index);
FE_TraceQ<dim, spacedim>::get_dpo_vector(const unsigned int deg)
{
// This constructs FE_TraceQ in exactly the same way as FE_Q except for the
- // interior degrees of freedom that are not present here (line in 1D, quad
- // in 2D, hex in 3D).
+ // interior degrees of freedom that are not present here (line in 1d, quad
+ // in 2d, hex in 3d).
AssertThrow(deg > 0, ExcMessage("FE_TraceQ needs to be of degree > 0."));
std::vector<unsigned int> dpo(dim + 1, 1U);
dpo[dim] = 0;
// check for distorted cells.
- // TODO: this allows for anisotropies of up to 1e6 in 3D and
- // 1e12 in 2D. might want to find a finer
+ // TODO: this allows for anisotropies of up to 1e6 in 3d and
+ // 1e12 in 2d. might want to find a finer
// (dimension-independent) criterion
Assert(det >
1e-12 * Utilities::fixed_power<dim>(
// check for distorted cells.
- // TODO: this allows for anisotropies of up to 1e6 in 3D and
- // 1e12 in 2D. might want to find a finer
+ // TODO: this allows for anisotropies of up to 1e6 in 3d and
+ // 1e12 in 2d. might want to find a finer
// (dimension-independent) criterion
Assert(det > 1e-12 * Utilities::fixed_power<dim>(
cell->diameter() / std::sqrt(double(dim))),
// check for distorted cells.
- // TODO: this allows for anisotropies of up to 1e6 in 3D and
- // 1e12 in 2D. might want to find a finer
+ // TODO: this allows for anisotropies of up to 1e6 in 3d and
+ // 1e12 in 2d. might want to find a finer
// (dimension-independent) criterion
Assert(det > 1e-12 * Utilities::fixed_power<dim>(
cell->diameter() / std::sqrt(double(dim))),
// check for distorted cells.
- // TODO: this allows for anisotropies of up to 1e6 in 3D and
- // 1e12 in 2D. might want to find a finer
+ // TODO: this allows for anisotropies of up to 1e6 in 3d and
+ // 1e12 in 2d. might want to find a finer
// (dimension-independent) criterion
Assert(det > 1e-12 * Utilities::fixed_power<dim>(
cell->diameter() / std::sqrt(double(dim))),
if (tensor_product_quadrature)
{
- // use a 1D FE_DGQ and adjust the hierarchic -> lexicographic
+ // use a 1d FE_DGQ and adjust the hierarchic -> lexicographic
// numbering manually (building an FE_Q<dim> is relatively
// expensive due to constraints)
const FE_DGQ<1> fe(polynomial_degree);
((dim == 1) || ((dim == 2) && (dim == spacedim))))
{
// The dimension-dependent algorithms are much faster (about 25-45x in
- // 2D) but fail most of the time when the given point (p) is not in the
+ // 2d) but fail most of the time when the given point (p) is not in the
// cell. The dimension-independent Newton algorithm given below is
// slower, but more robust (though it still sometimes fails). Therefore
// this function implements the following strategy based on the
// p's dimension:
//
- // * In 1D this mapping is linear, so the mapping is always invertible
+ // * In 1d this mapping is linear, so the mapping is always invertible
// (and the exact formula is known) as long as the cell has non-zero
// length.
- // * In 2D the exact (quadratic) formula is called first. If either the
+ // * In 2d the exact (quadratic) formula is called first. If either the
// exact formula does not succeed (negative discriminant in the
// quadratic formula) or succeeds but finds a solution outside of the
// unit cell, then the Newton solver is called. The rationale for the
// Newton solver (if it converges) will only return one answer.
// Otherwise the exact formula successfully found a point in the unit
// cell and that value is returned.
- // * In 3D there is no (known to the authors) exact formula, so the Newton
+ // * In 3d there is no (known to the authors) exact formula, so the Newton
// algorithm is used.
const auto vertices_ = this->get_vertices(cell);
// check for distorted cells.
- // TODO: this allows for anisotropies of up to 1e6 in 3D and
- // 1e12 in 2D. might want to find a finer
+ // TODO: this allows for anisotropies of up to 1e6 in 3d and
+ // 1e12 in 2d. might want to find a finer
// (dimension-independent) criterion
Assert(det >
1e-12 * Utilities::fixed_power<dim>(
// check for distorted cells.
- // TODO: this allows for anisotropies of up to 1e6 in 3D and
- // 1e12 in 2D. might want to find a finer
+ // TODO: this allows for anisotropies of up to 1e6 in 3d and
+ // 1e12 in 2d. might want to find a finer
// (dimension-independent) criterion
Assert(det > 1e-12 * Utilities::fixed_power<dim>(
cell->diameter() / std::sqrt(double(dim))),
// in block 4. end_b0_x_u = end_b0_x_l for symmetric airfoils
const double end_b0_x_l;
- // x-coordinate of first airfoil point in airfoil_1D[0] and
- // airfoil_1D[1]
+ // x-coordinate of first airfoil point in airfoil_1d[0] and
+ // airfoil_1d[1]
const double nose_x;
- // x-coordinate of last airfoil point in airfoil_1D[0] and airfoil_1D[1]
+ // x-coordinate of last airfoil point in airfoil_1d[0] and airfoil_1d[1]
const double tail_x;
- // y-coordinate of last airfoil point in airfoil_1D[0] and airfoil_1D[1]
+ // y-coordinate of last airfoil point in airfoil_1d[0] and airfoil_1d[1]
const double tail_y;
// x-coordinate of C,D,E,F indicating ending of blocks 1 and 4 or
}
if (dim > 2)
{
- // In 3D, we have some more edges to deal with
+ // In 3d, we have some more edges to deal with
for (unsigned int i = 1; i < dim; ++i)
points.push_back(0.5 * (points[i - 1] + points[i + 1]));
// And we need face midpoints
Assert(false, ExcNotImplemented());
}
- // Implementation for 2D only
+ // Implementation for 2d only
template <>
void
parallelogram(Triangulation<2> &tria,
"The distance between corner points must be positive."))
// actual code is external since
- // 1-D is different from 2/3D.
+ // 1-D is different from 2/3d.
colorize_subdivided_hyper_rectangle(tria, p1, p2, epsilon);
}
}
const double epsilon = 0.01 * min_size;
// actual code is external since
- // 1-D is different from 2/3D.
+ // 1-D is different from 2/3d.
colorize_subdivided_hyper_rectangle(tria, p1, p2, epsilon);
}
}
namespace internal
{
- // helper function to check if point is in 2D box
+ // helper function to check if point is in 2d box
bool inline point_in_2d_box(const Point<2> &p,
const Point<2> &c,
const double radius)
n_slices,
colorize);
- // extrude to 3D
+ // extrude to 3d
extrude_triangulation(tria_2, n_slices, L, tria, true);
// shift in Z direction to match specified center
tria_2, shell_region_width, n_shells, skewness, colorize);
extrude_triangulation(tria_2, 5, 0.41, tria, true);
- // set up the new 3D manifolds
+ // set up the new 3d manifolds
const types::manifold_id cylindrical_manifold_id = 0;
const types::manifold_id tfi_manifold_id = 1;
const PolarManifold<2> *const m_ptr =
- // Implementation for 2D only
+ // Implementation for 2d only
template <>
void
hyper_cube_slit(Triangulation<2> &tria,
- // Implementation for 2D only
+ // Implementation for 2d only
template <>
void
hyper_L(Triangulation<2> &tria,
- // Implementation for 2D only
+ // Implementation for 2d only
template <>
void
hyper_ball(Triangulation<2> &tria,
- // Implementation for 2D only
+ // Implementation for 2d only
template <>
void
cylinder(Triangulation<2> &tria,
- // Implementation for 2D only
+ // Implementation for 2d only
template <>
void
cylinder_shell(Triangulation<2> &,
- // Implementation for 2D only
+ // Implementation for 2d only
template <>
void
half_hyper_shell(Triangulation<2> & tria,
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
hyper_cube_slit(Triangulation<3> &tria,
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
enclosed_hyper_cube(Triangulation<3> &tria,
}
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
hyper_L(Triangulation<3> &tria,
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
hyper_ball(Triangulation<3> &tria,
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
subdivided_cylinder(Triangulation<3> & tria,
tria.set_manifold(0, CylindricalManifold<3>());
}
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
cylinder(Triangulation<3> &tria,
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
half_hyper_ball(Triangulation<3> &tria,
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
half_hyper_shell(Triangulation<3> &tria,
}
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
quarter_hyper_shell(Triangulation<3> & tria,
}
- // Implementation for 3D only
+ // Implementation for 3d only
template <>
void
cylinder_shell(Triangulation<3> & tria,
const auto mid = cell.face(f)->manifold_id();
// process boundary-faces: set boundary and manifold ids
- if (dim == 2) // 2D boundary-faces
+ if (dim == 2) // 2d boundary-faces
{
for (const auto &face_vertices :
vertex_ids_for_boundary_faces_2d[f])
add_cell(1, face_vertices, bid, mid);
}
- else if (dim == 3) // 3D boundary-faces
+ else if (dim == 3) // 3d boundary-faces
{
// set manifold ids of tet-boundary-faces according to
// hex-boundary-faces
ExcMessage("Invalid input."));
}
- // Create a hyperball domain in 2D that will act as the reference cross
+ // Create a hyperball domain in 2d that will act as the reference cross
// section for each pipe segment.
Triangulation<dim - 1, spacedim - 1> tria_base;
GridGenerator::hyper_ball_balanced(tria_base,
in >> vertex[d];
for (unsigned int d = spacedim; d < 3; ++d)
{
- // file is always in 3D
+ // file is always in 3d
double dummy;
in >> dummy;
}
// of which we're not particularly interested in except
// whether they represent quads or hexahedrals.
// *ELEMENT, TYPE=S4R, ELSET=EB<material id>
- // *ELEMENT, TYPE=C3D8R, ELSET=EB<material id>
- // *ELEMENT, TYPE=C3D8
+ // *ELEMENT, TYPE=C3d8R, ELSET=EB<material id>
+ // *ELEMENT, TYPE=C3d8
// Elements itself (n=4 or n=8):
// Index, i[0], ..., i[n]
{
using LineList = std::list<LineEntry>;
- // We should never get here in 1D since this function is overloaded for
+ // We should never get here in 1d since this function is overloaded for
// all dim == 1 cases.
Assert(dim == 2 || dim == 3, ExcInternalError());
- // Generic functions for appending face data in 2D or 3D. TODO: we can
+ // Generic functions for appending face data in 2d or 3d. TODO: we can
// remove these once we have 'if constexpr'.
namespace internal
{
internal::FaceDataHelper<dim> face_data;
std::set<CellData<1>, internal::CellDataComparator<1>>
- line_data; // only used in 3D
+ line_data; // only used in 3d
for (const auto &cell : tria.cell_iterators_on_level(0))
{
{
if (dim == 2)
{
- // flip the cell across the y = x line in 2D
+ // flip the cell across the y = x line in 2d
std::swap(cell.vertices[1], cell.vertices[2]);
}
else if (dim == 3)
{
- // swap the front and back faces in 3D
+ // swap the front and back faces in 3d
std::swap(cell.vertices[0], cell.vertices[2]);
std::swap(cell.vertices[1], cell.vertices[3]);
std::swap(cell.vertices[4], cell.vertices[6]);
};
- // Transformation to rotate around one of the cartesian z-axis in 2D.
+ // Transformation to rotate around one of the cartesian z-axis in 2d.
class Rotate2d
{
public:
namespace
{
// Split get_subdomain_association() for p::d::T since we want to compile
- // it in 1D but none of the p4est stuff is available in 1D.
+ // it in 1d but none of the p4est stuff is available in 1d.
template <int dim, int spacedim>
void
get_subdomain_association(
cells_to_add.push_back(cell);
}
}
- // point on line in 3D: We cannot simply take the intersection between
+ // point on line in 3d: We cannot simply take the intersection between
// the two vertices of cells because of hanging nodes. So instead we
// list the vertices around both points and then select the
// appropriate cells according to the result of read_to_unit_cell
static inline std::bitset<3>
lookup(const MATCH_T &)
{
- // The 1D case is trivial
+ // The 1d case is trivial
return 1; // [true ,false,false]
}
};
static inline std::bitset<3>
lookup(const MATCH_T &matching)
{
- // In 2D matching faces (=lines) results in two cases: Either
+ // In 2d matching faces (=lines) results in two cases: Either
// they are aligned or flipped. We store this "line_flip"
// property somewhat sloppy as "face_flip"
// (always: face_orientation = true, face_rotation = false)
static inline std::bitset<3>
lookup(const MATCH_T &matching)
{
- // The full fledged 3D case. *Yay*
+ // The full fledged 3d case. *Yay*
// See the documentation in include/deal.II/base/geometry_info.h
// as well as the actual implementation in source/grid/tria.cc
// for more details...
continue;
}
- // If not in 3D, just use the implementation from PolarManifold
+ // If not in 3d, just use the implementation from PolarManifold
// after we verified that the candidate is not the center.
if (spacedim < 3)
new_points[row] = polar_manifold.get_new_point(
namespace
{
- // version for 1D
+ // version for 1d
template <typename AccessorType>
Point<AccessorType::space_dimension>
compute_transfinite_interpolation(const AccessorType &cell,
cell.vertex(1) * chart_point[0];
}
- // version for 2D
+ // version for 2d
template <typename AccessorType>
Point<AccessorType::space_dimension>
compute_transfinite_interpolation(const AccessorType &cell,
// contribution of the vertices. If a line employs the same manifold
// as the cell, we can merge the weights of the line with the weights
// of the vertex with a negative sign while going through the faces
- // (this is a bit artificial in 2D but it becomes clear in 3D where we
+ // (this is a bit artificial in 2d but it becomes clear in 3d where we
// avoid looking at the faces' orientation and other complications).
// add the contribution from the lines around the cell (first line in
{0, 1, 2, 3},
{4, 5, 6, 7}};
- // version for 3D
+ // version for 3d
template <typename AccessorType>
Point<AccessorType::space_dimension>
compute_transfinite_interpolation(const AccessorType &cell,
const types::manifold_id my_manifold_id = cell.manifold_id();
const Triangulation<dim, spacedim> &tria = cell.get_triangulation();
- // Same approach as in 2D, but adding the faces, subtracting the edges, and
+ // Same approach as in 2d, but adding the faces, subtracting the edges, and
// adding the vertices
const std::array<Point<spacedim>, 8> vertices{{cell.vertex(0),
cell.vertex(1),
// method usually does not need more than 5-8 iterations, but sometimes
// we might have had a bad initial guess and then we can accelerate
// convergence considerably with getting the actual Jacobian rather than
- // using secant-like methods (one gradient calculation in 3D costs as
+ // using secant-like methods (one gradient calculation in 3d costs as
// much as 3 more iterations). this usually happens close to convergence
// and one more step with the finite-differenced Jacobian leads to
// convergence
// Function that can guess the location of a chart point by assuming that
// the eight surrounding points are points on a two-dimensional object
- // (either a cell in 2D or the face of a hexahedron in 3D), arranged like
+ // (either a cell in 2d or the face of a hexahedron in 3d), arranged like
//
// 2 - 7 - 3
// | |
use_structdim_2_guesses = true;
else if (spacedim == 3)
// otherwise these vectors are roughly orthogonal: enable the
- // structdim 3 optimization if we are in 3D
+ // structdim 3 optimization if we are in 3d
use_structdim_3_guesses = true;
}
// we should enable at most one of the optimizations
const auto &crs = connectivity.entity_to_entities(dim, dim - 1);
const auto &nei = connectivity.entity_to_entities(dim, dim);
- // in 2D optional: since in in pure QUAD meshes same line
+ // in 2d optional: since in in pure QUAD meshes same line
// orientations can be guaranteed
bool orientation_needed = false;
if (dim == 3)
if (dim != 3)
continue;
- // ... and the lines of quads in 3D
+ // ... and the lines of quads in 3d
const auto &crs = connectivity.entity_to_entities(2, 1);
for (unsigned int i = crs.ptr[face]; i < crs.ptr[face + 1]; ++i)
tria.faces->lines.boundary_or_material_id[crs.col[i]]
.boundary_id = 0;
}
}
- else // 1D
+ else // 1d
{
static const unsigned int t_tba = static_cast<unsigned int>(-1);
static const unsigned int t_inner = static_cast<unsigned int>(-2);
subcells[i]->set_subdomain_id(subdomainid);
// TODO: here we assume that all children have the same reference
- // cell type as the parent! This is justified for 2D.
+ // cell type as the parent! This is justified for 2d.
triangulation.levels[subcells[i]->level()]
->reference_cell[subcells[i]->index()] = cell->reference_cell();
// the evaluation of the formulae
// is a bit tricky when done dimension
// independently, so we write this function
- // for 2D and 3D separately
+ // for 2d and 3d separately
/*
Get the computation of the barycenter by this little Maple script. We
use the bilinear mapping of the unit quad to the real quad. However,
void
ScaLAPACKMatrix<NumberType>::copy_to(FullMatrix<NumberType> &matrix) const
{
- // FIXME: use PDGEMR2D for copying?
- // PDGEMR2D copies a submatrix of A on a submatrix of B.
+ // FIXME: use PDGEMR2d for copying?
+ // PDGEMR2d copies a submatrix of A on a submatrix of B.
// A and B can have different distributions
// see http://icl.cs.utk.edu/lapack-forum/viewtopic.php?t=50
Assert(n_rows == int(matrix.m()), ExcDimensionMismatch(n_rows, matrix.m()));
namespace MGTools
{
- // specializations for 1D
+ // specializations for 1d
template <>
void
compute_row_length_vector(const DoFHandler<1, 1> &,
- // Template for 2D and 3D. For 1D see specialization above
+ // Template for 2d and 3d. For 1d see specialization above
template <int dim, int spacedim>
void
compute_row_length_vector(const DoFHandler<dim, spacedim> &dofs,
while (i < fe.get_first_quad_index(face_no))
row_lengths[cell_indices[i++]] += increment;
- // Now quads in 2D and 3D
+ // Now quads in 2d and 3d
increment =
(dim > 2) ?
fe.n_dofs_per_cell() - (dim - 2) * fe.n_dofs_per_face(face_no) :
GeometryInfo<dim>::faces_per_cell * fe.n_dofs_per_face(face_no);
while (i < fe.get_first_hex_index())
row_lengths[cell_indices[i++]] += increment;
- // Finally, cells in 3D
+ // Finally, cells in 3d
increment = fe.n_dofs_per_cell() - GeometryInfo<dim>::faces_per_cell *
fe.n_dofs_per_face(face_no);
while (i < fe.n_dofs_per_cell())
}
- // This is the template for 2D and 3D. See version for 1D above
+ // This is the template for 2d and 3d. See version for 1d above
template <int dim, int spacedim>
void
compute_row_length_vector(const DoFHandler<dim, spacedim> & dofs,
++i;
}
- // Now quads in 2D and 3D
+ // Now quads in 2d and 3d
while (i < fe.get_first_hex_index())
{
for (unsigned int base = 0; base < fe.n_base_elements(); ++base)
++i;
}
- // Finally, cells in 3D
+ // Finally, cells in 3d
while (i < fe.n_dofs_per_cell())
{
for (unsigned int base = 0; base < fe.n_base_elements(); ++base)
elem_info.element_is_continuous = fe.n_dofs_per_vertex() > 0;
Assert(fe.n_dofs_per_vertex() < 2, ExcNotImplemented());
- // step 1.2: get renumbering of 1D basis functions to lexicographic
+ // step 1.2: get renumbering of 1d basis functions to lexicographic
// numbers. The distinction according to fe.n_dofs_per_vertex() is to
// support both continuous and discontinuous bases.
std::vector<unsigned int> renumbering(fe.n_dofs_per_cell());
fe.n_dofs_per_vertex();
}
- // step 1.3: create a dummy 1D quadrature formula to extract the
+ // step 1.3: create a dummy 1d quadrature formula to extract the
// lexicographic numbering for the elements
Assert(fe.n_dofs_per_vertex() == 0 || fe.n_dofs_per_vertex() == 1,
ExcNotImplemented());
const dealii::Triangulation<dim> &tria = dof_handler.get_triangulation();
- // ---------------------------- 1. Extract 1D info about the finite
- // element step 1.1: create a 1D copy of the finite element from FETools
+ // ---------------------------- 1. Extract 1d info about the finite
+ // element step 1.1: create a 1d copy of the finite element from FETools
// where we substitute the template argument
AssertDimension(dof_handler.get_fe().n_base_elements(), 1);
std::string fe_name = dof_handler.get_fe().base_element(0).get_name();
level_set,
additional_data)
{
- // Tensor products of each quadrature in q_collection_1D. Used on the
+ // Tensor products of each quadrature in q_collection_1d. Used on the
// non-intersected cells.
hp::QCollection<dim> q_collection;
for (unsigned int i = 0; i < q_collection_1D.size(); ++i)
level_set,
additional_data)
{
- // Tensor products of each quadrature in q_collection_1D. Used on the
+ // Tensor products of each quadrature in q_collection_1d. Used on the
// non-intersected cells.
hp::QCollection<dim - 1> q_collection;
for (unsigned int i = 0; i < q_collection_1D.size(); ++i)
std::sort(side_lengths.begin(), side_lengths.end());
// Check if the two largest side lengths have the same length. This
- // function isn't called in 1D, so the (dim - 2)-element exists.
+ // function isn't called in 1d, so the (dim - 2)-element exists.
if (boost::math::epsilon_difference(side_lengths[dim - 1].first,
side_lengths[dim - 2].first) < 100)
return std_cxx17::optional<unsigned int>();
for (const double root : roots)
{
- // A surface integral in 1D is just a point evaluation,
+ // A surface integral in 1d is just a point evaluation,
// so the weight is always 1.
const double weight = 1;
const Point<1> point(root);
{
AssertIndexRange(face_index, GeometryInfo<1>::faces_per_cell);
- // The only vertex the 1D-face has.
+ // The only vertex the 1d-face has.
const Point<1> vertex =
box.vertex(GeometryInfo<1>::face_to_cell_vertices(face_index, 0));
// don't iterate over all cells and if cell data is requested. in that
// case, we need to calculate cell_number as in the DataOut class
-// Not implemented for 3D
+// Not implemented for 3d
namespace internal
// steps for the previous grid
//
// use a double value since for each
- // four cells (in 2D) that we flagged
+ // four cells (in 2d) that we flagged
// for coarsening we result in one
// new. but since we loop over flagged
// cells, we have to subtract 3/4 of
namespace VectorTools
{
- // separate implementation for 1D because otherwise we get linker errors since
+ // separate implementation for 1d because otherwise we get linker errors since
// (hp::)FEFaceValues<1> is not compiled
template <>
void