// This is the only include file that is new: It introduces the
// parallel::distributed::SolutionTransfer equivalent of the
-// dealii::SolutionTransfer class to take a solution from on mesh to the next
+// SolutionTransfer class to take a solution from on mesh to the next
// one upon mesh refinement, but in the case of parallel distributed
// triangulations:
#include <deal.II/distributed/solution_transfer.h>
}
Utilities::MPI::MPI_InitFinalize mpi_initialization(
- argc, argv, dealii::numbers::invalid_unsigned_int);
+ argc, argv, numbers::invalid_unsigned_int);
ConservationLaw<2> cons(argv[1]);
cons.run();
// @sect3{Coefficients and helper classes}
// MatrixFree operators must use the
-// dealii::LinearAlgebra::distributed::Vector vector type. Here we define
+// LinearAlgebra::distributed::Vector vector type. Here we define
// operations which copy to and from Trilinos vectors for compatibility with
// the matrix-based code. Note that this functionality does not currently
// exist for PETSc vector types, so Trilinos must be installed to use the
namespace ChangeVectorTypes
{
template <typename number>
- void copy(LA::MPI::Vector & out,
- const dealii::LinearAlgebra::distributed::Vector<number> &in)
+ void copy(LA::MPI::Vector & out,
+ const LinearAlgebra::distributed::Vector<number> &in)
{
- dealii::LinearAlgebra::ReadWriteVector<double> rwv(
- out.locally_owned_elements());
+ LinearAlgebra::ReadWriteVector<double> rwv(out.locally_owned_elements());
rwv.import(in, VectorOperation::insert);
#ifdef USE_PETSC_LA
AssertThrow(false,
template <typename number>
- void copy(dealii::LinearAlgebra::distributed::Vector<number> &out,
- const LA::MPI::Vector & in)
+ void copy(LinearAlgebra::distributed::Vector<number> &out,
+ const LA::MPI::Vector & in)
{
- dealii::LinearAlgebra::ReadWriteVector<double> rwv;
+ LinearAlgebra::ReadWriteVector<double> rwv;
#ifdef USE_PETSC_LA
(void)in;
AssertThrow(false,
//
// Finally, the system_rhs vector is of type LA::MPI::Vector, but the
// MatrixFree class only work for
-// dealii::LinearAlgebra::distributed::Vector. Therefore we must
+// LinearAlgebra::distributed::Vector. Therefore we must
// compute the right-hand side using MatrixFree functionality and then
// use the functions in the `ChangeVectorType` namespace to copy it to
// the correct type.
preconditioner(dof_handler, mg, mg_transfer);
// Copy the solution vector and right-hand side from LA::MPI::Vector
- // to dealii::LinearAlgebra::distributed::Vector so that we can solve.
+ // to LinearAlgebra::distributed::Vector so that we can solve.
MatrixFreeActiveVector solution_copy;
MatrixFreeActiveVector right_hand_side_copy;
mf_system_matrix.initialize_dof_vector(solution_copy);
// Since CUDAWrappers::MatrixFree::Data doesn't know about the size of its
// arrays, we need to store the number of quadrature points and the numbers
// of degrees of freedom in this class to do necessary index conversions.
- static const unsigned int n_dofs_1d = fe_degree + 1;
- static const unsigned int n_local_dofs =
- dealii::Utilities::pow(n_dofs_1d, dim);
- static const unsigned int n_q_points =
- dealii::Utilities::pow(n_dofs_1d, dim);
+ static const unsigned int n_dofs_1d = fe_degree + 1;
+ static const unsigned int n_local_dofs = Utilities::pow(n_dofs_1d, dim);
+ static const unsigned int n_q_points = Utilities::pow(n_dofs_1d, dim);
private:
double *coef;
// <code>distributed/tria.h</code> and
// <code>lac/la_parallel_vector.h</code>. Instead of a Trilinos, or PETSc
// specific matrix class, we will use a non-distributed
-// dealii::SparseMatrix (<code>lac/sparse_matrix.h</code>) to store the local
+// SparseMatrix (<code>lac/sparse_matrix.h</code>) to store the local
// part of the $\mathbf{c}_{ij}$, $\mathbf{n}_{ij}$ and $d_{ij}$ matrices.
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/parallel.h>
// First column-loop: we compute and store the entries of the
// matrix norm_matrix and write normalized entries into the
// matrix nij_matrix:
- std::for_each(
- sparsity_pattern.begin(row_index),
- sparsity_pattern.end(row_index),
- [&](const dealii::SparsityPatternIterators::Accessor &jt) {
- const auto c_ij = gather_get_entry(cij_matrix, &jt);
- const double norm = c_ij.norm();
-
- set_entry(norm_matrix, &jt, norm);
- for (unsigned int j = 0; j < dim; ++j)
- set_entry(nij_matrix[j], &jt, c_ij[j] / norm);
- });
+ std::for_each(sparsity_pattern.begin(row_index),
+ sparsity_pattern.end(row_index),
+ [&](const SparsityPatternIterators::Accessor &jt) {
+ const auto c_ij =
+ gather_get_entry(cij_matrix, &jt);
+ const double norm = c_ij.norm();
+
+ set_entry(norm_matrix, &jt, norm);
+ for (unsigned int j = 0; j < dim; ++j)
+ set_entry(nij_matrix[j], &jt, c_ij[j] / norm);
+ });
}
};
// for VectorTools::interpolate(). We work around this issue by, first,
// creating a lambda function that for a given position <code>x</code>
// returns just the value of the <code>i</code>th component. This
- // lambda in turn is converted to a dealii::Function with the help of
+ // lambda in turn is converted to a Function<dim> object with the help of
// the ScalarFunctionFromFunctionObject wrapper.
-
for (unsigned int i = 0; i < problem_dimension; ++i)
VectorTools::interpolate(offline_data.dof_handler,
ScalarFunctionFromFunctionObject<dim, double>(
discretization.triangulation.save(name + "-checkpoint.mesh");
- if (dealii::Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
+ if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
{
std::ofstream file(name + "-checkpoint.metadata", std::ios::binary);
boost::archive::binary_oarchive oa(file);
std::vector<types::global_dof_index> fluid_dof_indices(
fluid_fe->n_dofs_per_cell());
- FullMatrix<double> local_matrix(fluid_fe->n_dofs_per_cell(),
+ FullMatrix<double> local_matrix(fluid_fe->n_dofs_per_cell(),
fluid_fe->n_dofs_per_cell());
- dealii::Vector<double> local_rhs(fluid_fe->n_dofs_per_cell());
+ Vector<double> local_rhs(fluid_fe->n_dofs_per_cell());
const auto penalty_parameter =
1.0 / GridTools::minimal_cell_diameter(fluid_tria);
const auto invS = inverse_operator(S, cg, amgS);
const auto P = block_diagonal_operator<2, LA::MPI::BlockVector>(
- std::array<
- dealii::LinearOperator<typename LA::MPI::BlockVector::BlockType>,
- 2>{{amgA, amgS}});
+ std::array<LinearOperator<typename LA::MPI::BlockVector::BlockType>, 2>{
+ {amgA, amgS}});
SolverControl solver_control(system_matrix.m(),
1e-10 * system_rhs.l2_norm());
evaluate_function<dim, VectorizedArrayType, dim>(
*constant_function, Point<dim, VectorizedArrayType>());
- const dealii::internal::EvaluatorTensorProduct<
- dealii::internal::EvaluatorVariant::evaluate_evenodd,
+ const internal::EvaluatorTensorProduct<
+ internal::EvaluatorVariant::evaluate_evenodd,
dim,
n_points_1d,
n_points_1d,
// Transform values from collocation space to the original
// Gauss-Lobatto space:
internal::FEEvaluationImplBasisChange<
- dealii::internal::EvaluatorVariant::evaluate_evenodd,
+ internal::EvaluatorVariant::evaluate_evenodd,
internal::EvaluatorQuantity::hessian,
dim,
degree + 1,
std::complex<double> mu_inv(const Point<dim> & x,
types::material_id material);
- rank2_type sigma(const dealii::Point<dim> &x,
- types::material_id left,
- types::material_id right);
+ rank2_type sigma(const Point<dim> & x,
+ types::material_id left,
+ types::material_id right);
- rank1_type J_a(const dealii::Point<dim> &point, types::material_id id);
+ rank1_type J_a(const Point<dim> &point, types::material_id id);
private:
rank2_type epsilon_1;
template <int dim>
typename Parameters<dim>::rank2_type
- Parameters<dim>::sigma(const dealii::Point<dim> & /*x*/,
+ Parameters<dim>::sigma(const Point<dim> & /*x*/,
types::material_id left,
types::material_id right)
{
template <int dim>
typename Parameters<dim>::rank1_type
- Parameters<dim>::J_a(const dealii::Point<dim> &point,
- types::material_id /*id*/)
+ Parameters<dim>::J_a(const Point<dim> &point, types::material_id /*id*/)
{
rank1_type J_a;
const auto distance = (dipole_position - point).norm() / dipole_radius;
// This is a helper function that takes the tangential component of a tensor.
template <int dim>
DEAL_II_ALWAYS_INLINE inline Tensor<1, dim, std::complex<double>>
- tangential_part(const dealii::Tensor<1, dim, std::complex<double>> &tensor,
- const Tensor<1, dim> & normal)
+ tangential_part(const Tensor<1, dim, std::complex<double>> &tensor,
+ const Tensor<1, dim> & normal)
{
auto result = tensor;
result[0] = normal[1] * (tensor[0] * normal[1] - tensor[1] * normal[0]);
// immersed quadrature rules.
non_matching_fe_values.reinit(cell);
- // After calling reinit, we can retrieve a dealii::FEValues object with
+ // After calling reinit, we can retrieve a FEValues object with
// quadrature points that corresponds to integrating over the inside
// region of the cell. This is the object we use to do the local
- // assembly. This is similar to how hp::FEValues builds dealii::FEValues
- // objects. However, one difference here is that the dealii::FEValues
+ // assembly. This is similar to how hp::FEValues builds FEValues
+ // objects. However, one difference here is that the FEValues
// object is returned as an optional. This is a type that wraps an
// object that may or may not be present. This requires us to add an
// if-statement to check if the returned optional contains a value,