#include <deal.II/base/subscriptor.h>
#include <deal.II/base/vectorization.h>
+#include <deal.II/lac/block_vector_base.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/householder.h>
#include <deal.II/lac/lapack_full_matrix.h>
{
namespace SolverGMRESImplementation
{
- template <class VectorType>
+ template <typename VectorType, typename Enable = void>
+ struct is_dealii_compatible_distributed_vector;
+
+ template <typename VectorType>
+ struct is_dealii_compatible_distributed_vector<
+ VectorType,
+ typename std::enable_if<!internal::is_block_vector<VectorType>>::type>
+ {
+ static constexpr bool value = std::is_same<
+ VectorType,
+ LinearAlgebra::distributed::Vector<typename VectorType::value_type,
+ MemorySpace::Host>>::value;
+ };
+
+
+
+ template <typename VectorType>
+ struct is_dealii_compatible_distributed_vector<
+ VectorType,
+ typename std::enable_if<internal::is_block_vector<VectorType>>::type>
+ {
+ static constexpr bool value = std::is_same<
+ typename VectorType::BlockType,
+ LinearAlgebra::distributed::Vector<typename VectorType::value_type,
+ MemorySpace::Host>>::value;
+ };
+
+
+
+ template <typename VectorType,
+ std::enable_if_t<!IsBlockVector<VectorType>::value, VectorType>
+ * = nullptr>
+ unsigned int
+ n_blocks(const VectorType &)
+ {
+ return 1;
+ }
+
+
+
+ template <typename VectorType,
+ std::enable_if_t<IsBlockVector<VectorType>::value, VectorType> * =
+ nullptr>
+ unsigned int
+ n_blocks(const VectorType &vector)
+ {
+ return vector.n_blocks();
+ }
+
+
+
+ template <typename VectorType,
+ std::enable_if_t<!IsBlockVector<VectorType>::value, VectorType>
+ * = nullptr>
+ VectorType &
+ block(VectorType &vector, const unsigned int b)
+ {
+ AssertDimension(b, 0);
+ (void)b;
+ return vector;
+ }
+
+
+
+ template <typename VectorType,
+ std::enable_if_t<!IsBlockVector<VectorType>::value, VectorType>
+ * = nullptr>
+ const VectorType &
+ block(const VectorType &vector, const unsigned int b)
+ {
+ AssertDimension(b, 0);
+ (void)b;
+ return vector;
+ }
+
+
+
+ template <typename VectorType,
+ std::enable_if_t<IsBlockVector<VectorType>::value, VectorType> * =
+ nullptr>
+ typename VectorType::BlockType &
+ block(VectorType &vector, const unsigned int b)
+ {
+ return vector.block(b);
+ }
+
+
+
+ template <typename VectorType,
+ std::enable_if_t<IsBlockVector<VectorType>::value, VectorType> * =
+ nullptr>
+ const typename VectorType::BlockType &
+ block(const VectorType &vector, const unsigned int b)
+ {
+ return vector.block(b);
+ }
+
+
+
+ template <class VectorType,
+ std::enable_if_t<
+ !is_dealii_compatible_distributed_vector<VectorType>::value,
+ VectorType> * = nullptr>
void
Tvmult_add(const unsigned int dim,
const VectorType & vv,
- template <class Number>
+ template <class VectorType,
+ std::enable_if_t<
+ is_dealii_compatible_distributed_vector<VectorType>::value,
+ VectorType> * = nullptr>
void
- Tvmult_add(
- const unsigned int dim,
- const LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &vv,
- const internal::SolverGMRESImplementation::TmpVectors<
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>>
- & orthogonal_vectors,
- Vector<double> &h)
+ Tvmult_add(const unsigned int dim,
+ const VectorType & vv,
+ const internal::SolverGMRESImplementation::TmpVectors<VectorType>
+ & orthogonal_vectors,
+ Vector<double> &h)
{
- unsigned int j = 0;
-
- if (dim <= 128)
+ for (unsigned int b = 0; b < n_blocks(vv); ++b)
{
- // optimized path
- static constexpr unsigned int n_lanes =
- VectorizedArray<double>::size();
+ unsigned int j = 0;
- VectorizedArray<double> hs[128];
- for (unsigned int d = 0; d < dim; ++d)
- hs[d] = 0.0;
+ if (dim <= 128)
+ {
+ // optimized path
+ static constexpr unsigned int n_lanes =
+ VectorizedArray<double>::size();
- unsigned int c = 0;
+ VectorizedArray<double> hs[128];
+ for (unsigned int d = 0; d < dim; ++d)
+ hs[d] = 0.0;
- for (; c < vv.locally_owned_size() / n_lanes / 4;
- ++c, j += n_lanes * 4)
- for (unsigned int i = 0; i < dim; ++i)
- {
- VectorizedArray<double> vvec[4];
- for (unsigned int k = 0; k < 4; ++k)
- vvec[k].load(vv.begin() + j + k * n_lanes);
+ unsigned int c = 0;
- for (unsigned int k = 0; k < 4; ++k)
+ for (; c < block(vv, b).locally_owned_size() / n_lanes / 4;
+ ++c, j += n_lanes * 4)
+ for (unsigned int i = 0; i < dim; ++i)
{
- VectorizedArray<double> temp;
- temp.load(orthogonal_vectors[i].begin() + j + k * n_lanes);
- hs[i] += temp * vvec[k];
+ VectorizedArray<double> vvec[4];
+ for (unsigned int k = 0; k < 4; ++k)
+ vvec[k].load(block(vv, b).begin() + j + k * n_lanes);
+
+ for (unsigned int k = 0; k < 4; ++k)
+ {
+ VectorizedArray<double> temp;
+ temp.load(block(orthogonal_vectors[i], b).begin() + j +
+ k * n_lanes);
+ hs[i] += temp * vvec[k];
+ }
}
- }
- c *= 4;
- for (; c < vv.locally_owned_size() / n_lanes; ++c, j += n_lanes)
- for (unsigned int i = 0; i < dim; ++i)
- {
- VectorizedArray<double> vvec, temp;
- vvec.load(vv.begin() + j);
- temp.load(orthogonal_vectors[i].begin() + j);
- hs[i] += temp * vvec;
- }
+ c *= 4;
+ for (; c < block(vv, b).locally_owned_size() / n_lanes;
+ ++c, j += n_lanes)
+ for (unsigned int i = 0; i < dim; ++i)
+ {
+ VectorizedArray<double> vvec, temp;
+ vvec.load(block(vv, b).begin() + j);
+ temp.load(block(orthogonal_vectors[i], b).begin() + j);
+ hs[i] += temp * vvec;
+ }
- for (unsigned int i = 0; i < dim; ++i)
- for (unsigned int v = 0; v < n_lanes; ++v)
- h(i) += hs[i][v];
- }
+ for (unsigned int i = 0; i < dim; ++i)
+ for (unsigned int v = 0; v < n_lanes; ++v)
+ h(i) += hs[i][v];
+ }
- // remainder loop of optimized path or non-optimized path (if
- // dim>128)
- for (; j < vv.locally_owned_size(); ++j)
- for (unsigned int i = 0; i < dim; ++i)
- h(i) += orthogonal_vectors[i].local_element(j) * vv.local_element(j);
+ // remainder loop of optimized path or non-optimized path (if
+ // dim>128)
+ for (; j < block(vv, b).locally_owned_size(); ++j)
+ for (unsigned int i = 0; i < dim; ++i)
+ h(i) += block(orthogonal_vectors[i], b).local_element(j) *
+ block(vv, b).local_element(j);
+ }
Utilities::MPI::sum(h, MPI_COMM_WORLD, h);
}
- template <class VectorType>
+ template <class VectorType,
+ std::enable_if_t<
+ !is_dealii_compatible_distributed_vector<VectorType>::value,
+ VectorType> * = nullptr>
double
substract_and_norm(
const unsigned int dim,
- template <class Number>
+ template <class VectorType,
+ std::enable_if_t<
+ is_dealii_compatible_distributed_vector<VectorType>::value,
+ VectorType> * = nullptr>
double
substract_and_norm(
const unsigned int dim,
- const internal::SolverGMRESImplementation::TmpVectors<
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>>
+ const internal::SolverGMRESImplementation::TmpVectors<VectorType>
& orthogonal_vectors,
const Vector<double> &h,
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &vv)
+ VectorType & vv)
{
static constexpr unsigned int n_lanes = VectorizedArray<double>::size();
- double norm_vv_temp = 0.0;
- VectorizedArray<double> norm_vv_temp_vectorized = 0.0;
+ double norm_vv_temp = 0.0;
- unsigned int j = 0;
- unsigned int c = 0;
- for (; c < vv.locally_owned_size() / n_lanes / 4; ++c, j += n_lanes * 4)
+ for (unsigned int b = 0; b < n_blocks(vv); ++b)
{
- VectorizedArray<double> temp[4];
+ VectorizedArray<double> norm_vv_temp_vectorized = 0.0;
- for (unsigned int k = 0; k < 4; ++k)
- temp[k].load(vv.begin() + j + k * n_lanes);
-
- for (unsigned int i = 0; i < dim; ++i)
+ unsigned int j = 0;
+ unsigned int c = 0;
+ for (; c < block(vv, b).locally_owned_size() / n_lanes / 4;
+ ++c, j += n_lanes * 4)
{
- const double factor = h(i);
+ VectorizedArray<double> temp[4];
+
for (unsigned int k = 0; k < 4; ++k)
+ temp[k].load(block(vv, b).begin() + j + k * n_lanes);
+
+ for (unsigned int i = 0; i < dim; ++i)
{
- VectorizedArray<double> vec;
- vec.load(orthogonal_vectors[i].begin() + j + k * n_lanes);
- temp[k] -= factor * vec;
+ const double factor = h(i);
+ for (unsigned int k = 0; k < 4; ++k)
+ {
+ VectorizedArray<double> vec;
+ vec.load(block(orthogonal_vectors[i], b).begin() + j +
+ k * n_lanes);
+ temp[k] -= factor * vec;
+ }
}
- }
-
- for (unsigned int k = 0; k < 4; ++k)
- temp[k].store(vv.begin() + j + k * n_lanes);
- norm_vv_temp_vectorized += (temp[0] * temp[0] + temp[1] * temp[1]) +
- (temp[2] * temp[2] + temp[3] * temp[3]);
- }
+ for (unsigned int k = 0; k < 4; ++k)
+ temp[k].store(block(vv, b).begin() + j + k * n_lanes);
- c *= 4;
- for (; c < vv.locally_owned_size() / n_lanes; ++c, j += n_lanes)
- {
- VectorizedArray<double> temp;
- temp.load(vv.begin() + j);
+ norm_vv_temp_vectorized +=
+ (temp[0] * temp[0] + temp[1] * temp[1]) +
+ (temp[2] * temp[2] + temp[3] * temp[3]);
+ }
- for (unsigned int i = 0; i < dim; ++i)
+ c *= 4;
+ for (; c < block(vv, b).locally_owned_size() / n_lanes;
+ ++c, j += n_lanes)
{
- VectorizedArray<double> vec;
- vec.load(orthogonal_vectors[i].begin() + j);
- temp -= h(i) * vec;
- }
+ VectorizedArray<double> temp;
+ temp.load(block(vv, b).begin() + j);
- temp.store(vv.begin() + j);
+ for (unsigned int i = 0; i < dim; ++i)
+ {
+ VectorizedArray<double> vec;
+ vec.load(block(orthogonal_vectors[i], b).begin() + j);
+ temp -= h(i) * vec;
+ }
- norm_vv_temp_vectorized += temp * temp;
- }
+ temp.store(block(vv, b).begin() + j);
+
+ norm_vv_temp_vectorized += temp * temp;
+ }
- for (unsigned int v = 0; v < n_lanes; ++v)
- norm_vv_temp += norm_vv_temp_vectorized[v];
+ for (unsigned int v = 0; v < n_lanes; ++v)
+ norm_vv_temp += norm_vv_temp_vectorized[v];
- for (; j < vv.locally_owned_size(); ++j)
- {
- double temp = vv.local_element(j);
- for (unsigned int i = 0; i < dim; ++i)
- temp -= h(i) * orthogonal_vectors[i].local_element(j);
- vv.local_element(j) = temp;
+ for (; j < block(vv, b).locally_owned_size(); ++j)
+ {
+ double temp = block(vv, b).local_element(j);
+ for (unsigned int i = 0; i < dim; ++i)
+ temp -= h(i) * block(orthogonal_vectors[i], b).local_element(j);
+ block(vv, b).local_element(j) = temp;
- norm_vv_temp += temp * temp;
+ norm_vv_temp += temp * temp;
+ }
}
return std::sqrt(Utilities::MPI::sum(norm_vv_temp, MPI_COMM_WORLD));
}
- template <class VectorType>
+ template <class VectorType,
+ std::enable_if_t<
+ !is_dealii_compatible_distributed_vector<VectorType>::value,
+ VectorType> * = nullptr>
double
sadd_and_norm(VectorType & v,
const double factor_a,
}
- template <class Number>
+ template <class VectorType,
+ std::enable_if_t<
+ is_dealii_compatible_distributed_vector<VectorType>::value,
+ VectorType> * = nullptr>
double
- sadd_and_norm(
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &v,
- const double factor_a,
- const LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &b,
- const double factor_b)
+ sadd_and_norm(VectorType & v,
+ const double factor_a,
+ const VectorType &w,
+ const double factor_b)
{
double norm = 0;
- for (unsigned int j = 0; j < v.locally_owned_size(); ++j)
- {
- const double temp =
- v.local_element(j) * factor_a + b.local_element(j) * factor_b;
+ for (unsigned int b = 0; b < n_blocks(v); ++b)
+ for (unsigned int j = 0; j < block(v, b).locally_owned_size(); ++j)
+ {
+ const double temp = block(v, b).local_element(j) * factor_a +
+ block(w, b).local_element(j) * factor_b;
- v.local_element(j) = temp;
+ block(v, b).local_element(j) = temp;
- norm += temp * temp;
- }
+ norm += temp * temp;
+ }
return std::sqrt(Utilities::MPI::sum(norm, MPI_COMM_WORLD));
}
- template <class VectorType>
+ template <class VectorType,
+ std::enable_if_t<
+ !is_dealii_compatible_distributed_vector<VectorType>::value,
+ VectorType> * = nullptr>
void
add(VectorType & p,
const unsigned int dim,
- template <class Number>
+ template <class VectorType,
+ std::enable_if_t<
+ is_dealii_compatible_distributed_vector<VectorType>::value,
+ VectorType> * = nullptr>
void
- add(LinearAlgebra::distributed::Vector<Number, MemorySpace::Host> &p,
- const unsigned int dim,
- const Vector<double> & h,
- const internal::SolverGMRESImplementation::TmpVectors<
- LinearAlgebra::distributed::Vector<Number, MemorySpace::Host>>
+ add(VectorType & p,
+ const unsigned int dim,
+ const Vector<double> &h,
+ const internal::SolverGMRESImplementation::TmpVectors<VectorType>
& tmp_vectors,
const bool zero_out)
{
- for (unsigned int j = 0; j < p.locally_owned_size(); ++j)
- {
- double temp = zero_out ? 0 : p.local_element(j);
- for (unsigned int i = 0; i < dim; ++i)
- temp += tmp_vectors[i].local_element(j) * h(i);
- p.local_element(j) = temp;
- }
+ for (unsigned int b = 0; b < n_blocks(p); ++b)
+ for (unsigned int j = 0; j < block(p, b).locally_owned_size(); ++j)
+ {
+ double temp = zero_out ? 0 : block(p, b).local_element(j);
+ for (unsigned int i = 0; i < dim; ++i)
+ temp += block(tmp_vectors[i], b).local_element(j) * h(i);
+ block(p, b).local_element(j) = temp;
+ }
}
#include <deal.II/lac/diagonal_matrix.h>
+#include <deal.II/lac/la_parallel_block_vector.h>
#include <deal.II/lac/la_parallel_vector.h>
#include <deal.II/lac/precondition.h>
#include <deal.II/lac/solver_gmres.h>
dst.scale(diagonal);
}
+ void
+ vmult(LinearAlgebra::distributed::BlockVector<double> & dst,
+ const LinearAlgebra::distributed::BlockVector<double> &src) const
+ {
+ dst = src;
+ for (unsigned int b = 0; b < src.n_blocks(); ++b)
+ dst.block(0).scale(diagonal);
+ }
+
+ unsigned int
+ m() const
+ {
+ return diagonal.size();
+ }
+
const LinearAlgebra::distributed::Vector<double> &diagonal;
};
}
+
+SolverControl::State
+monitor_norm_block(const unsigned int iteration,
+ const double check_value,
+ const LinearAlgebra::distributed::BlockVector<double> &)
+{
+ deallog << " estimated residual at iteration " << iteration << ": "
+ << check_value << std::endl;
+ return SolverControl::success;
+}
+
+
int
main()
{
initlog();
// Create diagonal matrix with entries between 1 and 30
- DiagonalMatrix<LinearAlgebra::distributed::Vector<double>> unit_matrix;
- unit_matrix.get_vector().reinit(30);
- unit_matrix.get_vector() = 1.0;
+ LinearAlgebra::distributed::Vector<double> matrix_entries_(30);
+ matrix_entries_ = 1.0;
+ MyDiagonalMatrix unit_matrix(matrix_entries_);
LinearAlgebra::distributed::Vector<double> matrix_entries(unit_matrix.m());
for (unsigned int i = 0; i < unit_matrix.m(); ++i)
matrix_entries(i) = i + 1;
MyDiagonalMatrix matrix(matrix_entries);
- LinearAlgebra::distributed::Vector<double> rhs(unit_matrix.m()),
- sol(unit_matrix.m());
+ LinearAlgebra::distributed::Vector<double> rhs(unit_matrix.m());
+ LinearAlgebra::distributed::Vector<double> sol(unit_matrix.m());
rhs = 1.;
- deallog << "Solve with PreconditionIdentity: " << std::endl;
- SolverControl control(40, 1e-4);
- SolverGMRES<LinearAlgebra::distributed::Vector<double>>::AdditionalData data3(
- 8);
- data3.orthogonalization_strategy =
- SolverGMRES<LinearAlgebra::distributed::Vector<double>>::AdditionalData::
- OrthogonalizationStrategy::classical_gram_schmidt;
- SolverGMRES<LinearAlgebra::distributed::Vector<double>> solver(control,
- data3);
- solver.connect(&monitor_norm);
- solver.solve(matrix, sol, rhs, PreconditionIdentity());
-
- deallog << "Solve with diagonal preconditioner: " << std::endl;
- sol = 0;
- solver.solve(matrix, sol, rhs, unit_matrix);
+ {
+ deallog << "Solve with PreconditionIdentity: " << std::endl;
+ SolverControl control(40, 1e-4);
+ SolverGMRES<LinearAlgebra::distributed::Vector<double>>::AdditionalData
+ data3(8);
+ data3.orthogonalization_strategy =
+ SolverGMRES<LinearAlgebra::distributed::Vector<double>>::AdditionalData::
+ OrthogonalizationStrategy::classical_gram_schmidt;
+ SolverGMRES<LinearAlgebra::distributed::Vector<double>> solver(control,
+ data3);
+ solver.connect(&monitor_norm);
+ solver.solve(matrix, sol, rhs, PreconditionIdentity());
+
+ deallog << "Solve with diagonal preconditioner: " << std::endl;
+ sol = 0;
+ solver.solve(matrix, sol, rhs, unit_matrix);
+ }
+
+ {
+ LinearAlgebra::distributed::BlockVector<double> rhs_(1);
+ LinearAlgebra::distributed::BlockVector<double> sol_(1);
+ rhs_.block(0) = rhs;
+ sol_.block(0) = sol;
+
+ deallog << "Solve with PreconditionIdentity: " << std::endl;
+ SolverControl control(40, 1e-4);
+ SolverGMRES<LinearAlgebra::distributed::BlockVector<double>>::AdditionalData
+ data3(8);
+ data3.orthogonalization_strategy =
+ SolverGMRES<LinearAlgebra::distributed::BlockVector<double>>::
+ AdditionalData::OrthogonalizationStrategy::classical_gram_schmidt;
+ SolverGMRES<LinearAlgebra::distributed::BlockVector<double>> solver(control,
+ data3);
+ solver.connect(&monitor_norm_block);
+ solver.solve(matrix, sol_, rhs_, PreconditionIdentity());
+
+ deallog << "Solve with diagonal preconditioner: " << std::endl;
+ sol_ = 0;
+ solver.solve(matrix, sol_, rhs_, unit_matrix);
+ }
}