From: Denis Davydov Date: Sat, 10 Feb 2018 06:50:53 +0000 (+0100) Subject: add BlockVector::multivector_inner_product_with_metric() X-Git-Tag: v9.0.0-rc1~445^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F5884%2Fhead;p=dealii.git add BlockVector::multivector_inner_product_with_metric() and BlockVector::mmult(const BlockVector &, const FullMatrixType &) --- diff --git a/doc/news/changes/minor/20180210DenisDavydov b/doc/news/changes/minor/20180210DenisDavydov new file mode 100644 index 0000000000..ddda9265bb --- /dev/null +++ b/doc/news/changes/minor/20180210DenisDavydov @@ -0,0 +1,5 @@ +New: add parallel::distributed::BlockVector::mmult(const BlockVector &, const FullMatrixType &) +and parallel::distributed::BlockVector::multivector_inner_product_with_metric(const FullMatrixType &, const BlockVector &V, const bool) const +to operate on multivectors with a metric tensor. +
+(Denis Davydov, 2018/02/10) diff --git a/include/deal.II/lac/la_parallel_block_vector.h b/include/deal.II/lac/la_parallel_block_vector.h index 81545c8bea..31d644831a 100644 --- a/include/deal.II/lac/la_parallel_block_vector.h +++ b/include/deal.II/lac/la_parallel_block_vector.h @@ -469,6 +469,39 @@ namespace LinearAlgebra const BlockVector &V, const bool symmetric = false) const; + /** + * Calculate the scalar product between each block of this vector and @p V + * using a metric tensor @p matrix. This function + * computes the result of $ \sum_{ij} A^{ij} U_i \cdot V_j$ where $U_i$ + * and $V_j$ indicate the $i$th block (not element) of $U$ and the + * $j$th block of $V$, respectively. If @p symmetric is + * true, it is assumed that $U_i \cdot V_j$ and $A^{ij}$ are + * symmetric matrices and almost half of the scalar products can be avoided. + * + * Obviously, this function can only be used if all blocks of both vectors + * are of the same size. + * + * @note Internally, a single global reduction will be called to + * accumulate the scalar product between locally owned degrees of freedom. + */ + template + Number multivector_inner_product_with_metric(const FullMatrixType &matrix, + const BlockVector &V, + const bool symmetric = false) const; + + /** + * Set each block of this vector as follows: + * $U^i = \sum_{j} V_j A^{ji}$ where $U^i$ + * and $V_j$ indicate the $i$th block (not element) of $U$ and the + * $j$th block of $V$, respectively. + * + * Obviously, this function can only be used if all blocks of both vectors + * are of the same size. + */ + template + void mmult(const BlockVector &V, + const FullMatrixType &matrix); + /** * Add @p a to all components. Note that @p a is a scalar not a vector. */ diff --git a/include/deal.II/lac/la_parallel_block_vector.templates.h b/include/deal.II/lac/la_parallel_block_vector.templates.h index ed35e14ce9..0090e56c5c 100644 --- a/include/deal.II/lac/la_parallel_block_vector.templates.h +++ b/include/deal.II/lac/la_parallel_block_vector.templates.h @@ -866,6 +866,85 @@ namespace LinearAlgebra Utilities::MPI::sum(matrix, this->block(0).get_mpi_communicator(), matrix); } + + + template + template + Number + BlockVector::multivector_inner_product_with_metric(const FullMatrixType &matrix, + const BlockVector &V, + const bool symmetric) const + { + Number res = Number(0.); + + const unsigned int m = this->n_blocks(); + const unsigned int n = V.n_blocks(); + + // in case one vector is empty and the second one is not, the + // FullMatrix resized to (m,n) will have 0 both in m() and n() + // which is how TableBase::reinit() works. + // Since in this case there is nothing to do anyway -- return immediately. + if (n==0 || m==0) + return res; + + Assert (matrix.m() == m, + dealii::ExcDimensionMismatch(matrix.m(),m)); + Assert (matrix.n() == n, + dealii::ExcDimensionMismatch(matrix.n(),n)); + + if (symmetric) + { + Assert (m == n, + dealii::ExcDimensionMismatch(m,n)); + + for (unsigned int i = 0; i < m; i++) + { + res += matrix(i,i) * this->block(i).inner_product_local(V.block(i)); + for (unsigned int j = i+1; j < n; j++) + res += 2. * matrix(i,j) * this->block(i).inner_product_local(V.block(j)); + } + } + else + { + for (unsigned int i = 0; i < m; i++) + for (unsigned int j = 0; j < n; j++) + res += matrix(i,j) * this->block(i).inner_product_local(V.block(j)); + } + + return Utilities::MPI::sum(res, this->block(0).get_mpi_communicator()); + } + + + + template + template + void + BlockVector::mmult(const BlockVector &V, + const FullMatrixType &matrix) + { + const unsigned int n = this->n_blocks(); + const unsigned int m = V.n_blocks(); + + // in case one vector is empty and the second one is not, the + // FullMatrix resized to (m,n) will have 0 both in m() and n() + // which is how TableBase::reinit() works. + // Since in this case there is nothing to do anyway -- return immediately. + if (n==0 || m==0) + return; + + Assert (matrix.m() == m, + dealii::ExcDimensionMismatch(matrix.m(),m)); + Assert (matrix.n() == n, + dealii::ExcDimensionMismatch(matrix.n(),n)); + + (*this) = Number(); + for (unsigned int i = 0; i < n; i++) + for (unsigned int j = 0; j < m; j++) + this->block(i).add (matrix(j,i), V.block(j)); + } + + + } // end of namespace distributed } // end of namespace parallel diff --git a/source/lac/la_parallel_block_vector.inst.in b/source/lac/la_parallel_block_vector.inst.in index 92ce202797..657e150417 100644 --- a/source/lac/la_parallel_block_vector.inst.in +++ b/source/lac/la_parallel_block_vector.inst.in @@ -28,6 +28,18 @@ for (SCALAR : REAL_AND_COMPLEX_SCALARS) template void BlockVector::multivector_inner_product(LAPACKFullMatrix &, const BlockVector &V, const bool) const; + template + SCALAR + BlockVector::multivector_inner_product_with_metric(const FullMatrix &, const BlockVector &V, const bool) const; + template + SCALAR + BlockVector::multivector_inner_product_with_metric(const LAPACKFullMatrix &, const BlockVector &V, const bool) const; + template + void + BlockVector::mmult(const BlockVector &V,const FullMatrix &); + template + void + BlockVector::mmult(const BlockVector &V,const LAPACKFullMatrix &); \} \} } diff --git a/tests/mpi/parallel_block_vector_07.cc b/tests/mpi/parallel_block_vector_07.cc new file mode 100644 index 0000000000..7e7d67f563 --- /dev/null +++ b/tests/mpi/parallel_block_vector_07.cc @@ -0,0 +1,160 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// this BlockVector::multivector_inner_product_with_metric(). +// Triangulation and Mass operator are the same as in matrix_free/mass_operator_01.cc + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + + + +template +void test (const unsigned int n_blocks = 5) +{ + typedef double number; + + parallel::distributed::Triangulation tria (MPI_COMM_WORLD); + GridGenerator::hyper_cube (tria); + tria.refine_global(1); + typename Triangulation::active_cell_iterator + cell = tria.begin_active (), + endc = tria.end(); + cell = tria.begin_active (); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + if (cell->center().norm()<0.2) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + if (dim < 3 && fe_degree < 2) + tria.refine_global(2); + else + tria.refine_global(1); + if (tria.begin(tria.n_levels()-1)->is_locally_owned()) + tria.begin(tria.n_levels()-1)->set_refine_flag(); + if (tria.last()->is_locally_owned()) + tria.last()->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + cell = tria.begin_active (); + for (unsigned int i=0; i<10-3*dim; ++i) + { + cell = tria.begin_active (); + unsigned int counter = 0; + for (; cell!=endc; ++cell, ++counter) + if (cell->is_locally_owned()) + if (counter % (7-i) == 0) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + } + + FE_Q fe (fe_degree); + DoFHandler dof (tria); + dof.distribute_dofs(fe); + + IndexSet owned_set = dof.locally_owned_dofs(); + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dof, relevant_set); + + ConstraintMatrix constraints (relevant_set); + DoFTools::make_hanging_node_constraints(dof, constraints); + VectorTools::interpolate_boundary_values (dof, 0, Functions::ZeroFunction(), + constraints); + constraints.close(); + + std::shared_ptr > mf_data(new MatrixFree ()); + { + const QGauss<1> quad (fe_degree+2); + typename MatrixFree::AdditionalData data; + data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + data.tasks_block_size = 7; + mf_data->reinit (dof, constraints, quad, data); + } + + MatrixFreeOperators::MassOperator > mf; + mf.initialize(mf_data); + mf.compute_diagonal(); + + LinearAlgebra::distributed::BlockVector left(n_blocks), right(n_blocks); + for (unsigned int b = 0; b < n_blocks; ++b) + { + mf_data->initialize_dof_vector (left.block(b)); + mf_data->initialize_dof_vector (right.block(b)); + left.block(b) = 0.; + right.block(b) = 0.; + for (unsigned int i=0; i(); + left.block(b).local_element(i) = random_value(); + } + } + + FullMatrix metric(n_blocks,n_blocks); + for (unsigned int i = 0; i < n_blocks; ++i) + for (unsigned int j = 0; j < n_blocks; ++j) + metric(i,j) = 1. + (i*3 + j*7); + + const double res = left.multivector_inner_product_with_metric(metric, right); + + double res2 = 0.; + for (unsigned int i = 0; i < n_blocks; ++i) + for (unsigned int j = 0; j < n_blocks; ++j) + res2 += metric(i,j) * (left.block(i) * right.block(j)); + + const double diff_norm = std::abs(res-res2); + deallog << "Norm of difference: " << diff_norm << std::endl; +} + + +int main (int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads()); + + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + deallog.push(Utilities::int_to_string(myid)); + + if (myid == 0) + { + initlog(); + deallog << std::setprecision(4); + + test<2,1>(); + } + else + { + test<2,1>(); + } +} diff --git a/tests/mpi/parallel_block_vector_07.with_p4est=true.mpirun=1.output b/tests/mpi/parallel_block_vector_07.with_p4est=true.mpirun=1.output new file mode 100644 index 0000000000..b3f4ed9bf1 --- /dev/null +++ b/tests/mpi/parallel_block_vector_07.with_p4est=true.mpirun=1.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000 diff --git a/tests/mpi/parallel_block_vector_07.with_p4est=true.mpirun=4.output b/tests/mpi/parallel_block_vector_07.with_p4est=true.mpirun=4.output new file mode 100644 index 0000000000..b3f4ed9bf1 --- /dev/null +++ b/tests/mpi/parallel_block_vector_07.with_p4est=true.mpirun=4.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000 diff --git a/tests/mpi/parallel_block_vector_08.cc b/tests/mpi/parallel_block_vector_08.cc new file mode 100644 index 0000000000..bccb635727 --- /dev/null +++ b/tests/mpi/parallel_block_vector_08.cc @@ -0,0 +1,163 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// this BlockVector::mmult(const BlockVector &V,const FullMatrixType &matrix). +// Triangulation and Mass operator are the same as in matrix_free/mass_operator_01.cc + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + + + +template +void test (const unsigned int n_blocks = 5) +{ + typedef double number; + + parallel::distributed::Triangulation tria (MPI_COMM_WORLD); + GridGenerator::hyper_cube (tria); + tria.refine_global(1); + typename Triangulation::active_cell_iterator + cell = tria.begin_active (), + endc = tria.end(); + cell = tria.begin_active (); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + if (cell->center().norm()<0.2) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + if (dim < 3 && fe_degree < 2) + tria.refine_global(2); + else + tria.refine_global(1); + if (tria.begin(tria.n_levels()-1)->is_locally_owned()) + tria.begin(tria.n_levels()-1)->set_refine_flag(); + if (tria.last()->is_locally_owned()) + tria.last()->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + cell = tria.begin_active (); + for (unsigned int i=0; i<10-3*dim; ++i) + { + cell = tria.begin_active (); + unsigned int counter = 0; + for (; cell!=endc; ++cell, ++counter) + if (cell->is_locally_owned()) + if (counter % (7-i) == 0) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + } + + FE_Q fe (fe_degree); + DoFHandler dof (tria); + dof.distribute_dofs(fe); + + IndexSet owned_set = dof.locally_owned_dofs(); + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dof, relevant_set); + + ConstraintMatrix constraints (relevant_set); + DoFTools::make_hanging_node_constraints(dof, constraints); + VectorTools::interpolate_boundary_values (dof, 0, Functions::ZeroFunction(), + constraints); + constraints.close(); + + std::shared_ptr > mf_data(new MatrixFree ()); + { + const QGauss<1> quad (fe_degree+2); + typename MatrixFree::AdditionalData data; + data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + data.tasks_block_size = 7; + mf_data->reinit (dof, constraints, quad, data); + } + + MatrixFreeOperators::MassOperator > mf; + mf.initialize(mf_data); + mf.compute_diagonal(); + + LinearAlgebra::distributed::BlockVector left(n_blocks), right(n_blocks), left2(n_blocks); + for (unsigned int b = 0; b < n_blocks; ++b) + { + mf_data->initialize_dof_vector (left.block(b)); + mf_data->initialize_dof_vector (left2.block(b)); + mf_data->initialize_dof_vector (right.block(b)); + left.block(b) = 0.; + left2.block(b) = 0.; + right.block(b) = 0.; + for (unsigned int i=0; i(); + left.block(b).local_element(i) = random_value(); + } + } + + FullMatrix metric(n_blocks,n_blocks); + for (unsigned int i = 0; i < n_blocks; ++i) + for (unsigned int j = 0; j < n_blocks; ++j) + metric(i,j) = 0.3 + (i*3 + j*7); + + left.mmult(right,metric); + + for (unsigned int i = 0; i < n_blocks; ++i) + for (unsigned int j = 0; j < n_blocks; ++j) + left2.block(i).add(metric(j,i), right.block(j)); + + left2.add(-1., left); + + const double diff_norm = left2.linfty_norm(); + deallog << "Norm of difference: " << diff_norm << std::endl; +} + + +int main (int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads()); + + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + deallog.push(Utilities::int_to_string(myid)); + + if (myid == 0) + { + initlog(); + deallog << std::setprecision(4); + + test<2,1>(); + } + else + { + test<2,1>(); + } +} diff --git a/tests/mpi/parallel_block_vector_08.with_p4est=true.mpirun=1.output b/tests/mpi/parallel_block_vector_08.with_p4est=true.mpirun=1.output new file mode 100644 index 0000000000..b3f4ed9bf1 --- /dev/null +++ b/tests/mpi/parallel_block_vector_08.with_p4est=true.mpirun=1.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000 diff --git a/tests/mpi/parallel_block_vector_08.with_p4est=true.mpirun=4.output b/tests/mpi/parallel_block_vector_08.with_p4est=true.mpirun=4.output new file mode 100644 index 0000000000..b3f4ed9bf1 --- /dev/null +++ b/tests/mpi/parallel_block_vector_08.with_p4est=true.mpirun=4.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000 diff --git a/tests/mpi/parallel_block_vector_09.cc b/tests/mpi/parallel_block_vector_09.cc new file mode 100644 index 0000000000..a66bc3780e --- /dev/null +++ b/tests/mpi/parallel_block_vector_09.cc @@ -0,0 +1,168 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// use BlockVector::mmult(const BlockVector &V, const FullMatrixType &matrix) +// and multivector_inner_product_with_metric() to test that +// [ S^{ij}u_i ] * u_j == S^{ij} [u_i * u_j] +// In order to avoid taking S^T, just use symmetric matrix as a metric, which is +// actually the way it should be anyway. +// Triangulation and Mass operator are the same as in matrix_free/mass_operator_01.cc + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + + + +template +void test (const unsigned int n_blocks = 5) +{ + typedef double number; + + parallel::distributed::Triangulation tria (MPI_COMM_WORLD); + GridGenerator::hyper_cube (tria); + tria.refine_global(1); + typename Triangulation::active_cell_iterator + cell = tria.begin_active (), + endc = tria.end(); + cell = tria.begin_active (); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + if (cell->center().norm()<0.2) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + if (dim < 3 && fe_degree < 2) + tria.refine_global(2); + else + tria.refine_global(1); + if (tria.begin(tria.n_levels()-1)->is_locally_owned()) + tria.begin(tria.n_levels()-1)->set_refine_flag(); + if (tria.last()->is_locally_owned()) + tria.last()->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + cell = tria.begin_active (); + for (unsigned int i=0; i<10-3*dim; ++i) + { + cell = tria.begin_active (); + unsigned int counter = 0; + for (; cell!=endc; ++cell, ++counter) + if (cell->is_locally_owned()) + if (counter % (7-i) == 0) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + } + + FE_Q fe (fe_degree); + DoFHandler dof (tria); + dof.distribute_dofs(fe); + + IndexSet owned_set = dof.locally_owned_dofs(); + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dof, relevant_set); + + ConstraintMatrix constraints (relevant_set); + DoFTools::make_hanging_node_constraints(dof, constraints); + VectorTools::interpolate_boundary_values (dof, 0, Functions::ZeroFunction(), + constraints); + constraints.close(); + + std::shared_ptr > mf_data(new MatrixFree ()); + { + const QGauss<1> quad (fe_degree+2); + typename MatrixFree::AdditionalData data; + data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + data.tasks_block_size = 7; + mf_data->reinit (dof, constraints, quad, data); + } + + MatrixFreeOperators::MassOperator > mf; + mf.initialize(mf_data); + mf.compute_diagonal(); + + LinearAlgebra::distributed::BlockVector left(n_blocks), right(n_blocks), left2(n_blocks); + for (unsigned int b = 0; b < n_blocks; ++b) + { + mf_data->initialize_dof_vector (left.block(b)); + mf_data->initialize_dof_vector (left2.block(b)); + mf_data->initialize_dof_vector (right.block(b)); + left.block(b) = 0.; + left2.block(b) = 0.; + right.block(b) = 0.; + for (unsigned int i=0; i(); + left.block(b).local_element(i) = random_value(); + } + } + + FullMatrix metric(n_blocks,n_blocks); + metric = 0.; + for (unsigned int i = 0; i < n_blocks; ++i) + for (unsigned int j = i; j < n_blocks; ++j) + { + const double val = 0.3 + (3.3*i + 7.7*j); + metric(i,j) = val; + metric(j,i) = val; + } + + const double res = left.multivector_inner_product_with_metric(metric, right); + left2.mmult(left, metric); + const double res2 = left2 * right; + + const double diff_norm = std::abs(res - res2); + deallog << "Norm of difference: " << diff_norm << std::endl; +} + + +int main (int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads()); + + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + deallog.push(Utilities::int_to_string(myid)); + + if (myid == 0) + { + initlog(); + deallog << std::setprecision(4); + + test<2,1>(); + } + else + { + test<2,1>(); + } +} diff --git a/tests/mpi/parallel_block_vector_09.with_p4est=true.mpirun=1.output b/tests/mpi/parallel_block_vector_09.with_p4est=true.mpirun=1.output new file mode 100644 index 0000000000..b3f4ed9bf1 --- /dev/null +++ b/tests/mpi/parallel_block_vector_09.with_p4est=true.mpirun=1.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000 diff --git a/tests/mpi/parallel_block_vector_09.with_p4est=true.mpirun=5.output b/tests/mpi/parallel_block_vector_09.with_p4est=true.mpirun=5.output new file mode 100644 index 0000000000..b3f4ed9bf1 --- /dev/null +++ b/tests/mpi/parallel_block_vector_09.with_p4est=true.mpirun=5.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000 diff --git a/tests/mpi/parallel_block_vector_10.cc b/tests/mpi/parallel_block_vector_10.cc new file mode 100644 index 0000000000..6a6e2bd8a5 --- /dev/null +++ b/tests/mpi/parallel_block_vector_10.cc @@ -0,0 +1,161 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// same as _07 but use identity matrix as a metric so the result should be the same +// as operator* + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + + + +template +void test (const unsigned int n_blocks = 5) +{ + typedef double number; + + parallel::distributed::Triangulation tria (MPI_COMM_WORLD); + GridGenerator::hyper_cube (tria); + tria.refine_global(1); + typename Triangulation::active_cell_iterator + cell = tria.begin_active (), + endc = tria.end(); + cell = tria.begin_active (); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + if (cell->center().norm()<0.2) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + if (dim < 3 && fe_degree < 2) + tria.refine_global(2); + else + tria.refine_global(1); + if (tria.begin(tria.n_levels()-1)->is_locally_owned()) + tria.begin(tria.n_levels()-1)->set_refine_flag(); + if (tria.last()->is_locally_owned()) + tria.last()->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + cell = tria.begin_active (); + for (unsigned int i=0; i<10-3*dim; ++i) + { + cell = tria.begin_active (); + unsigned int counter = 0; + for (; cell!=endc; ++cell, ++counter) + if (cell->is_locally_owned()) + if (counter % (7-i) == 0) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + } + + FE_Q fe (fe_degree); + DoFHandler dof (tria); + dof.distribute_dofs(fe); + + IndexSet owned_set = dof.locally_owned_dofs(); + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dof, relevant_set); + + ConstraintMatrix constraints (relevant_set); + DoFTools::make_hanging_node_constraints(dof, constraints); + VectorTools::interpolate_boundary_values (dof, 0, Functions::ZeroFunction(), + constraints); + constraints.close(); + + std::shared_ptr > mf_data(new MatrixFree ()); + { + const QGauss<1> quad (fe_degree+2); + typename MatrixFree::AdditionalData data; + data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + data.tasks_block_size = 7; + mf_data->reinit (dof, constraints, quad, data); + } + + MatrixFreeOperators::MassOperator > mf; + mf.initialize(mf_data); + mf.compute_diagonal(); + + LinearAlgebra::distributed::BlockVector left(n_blocks), right(n_blocks); + for (unsigned int b = 0; b < n_blocks; ++b) + { + mf_data->initialize_dof_vector (left.block(b)); + mf_data->initialize_dof_vector (right.block(b)); + left.block(b) = 0.; + right.block(b) = 0.; + for (unsigned int i=0; i(); + left.block(b).local_element(i) = random_value(); + } + } + + FullMatrix metric(n_blocks,n_blocks); + metric = 0.; + for (unsigned int i = 0; i < n_blocks; ++i) + metric(i,i) = 1.; + + const double res = left.multivector_inner_product_with_metric(metric, right); + const double res2 = left * right; + + double res3 = 0.; + for (unsigned int i = 0; i < n_blocks; ++i) + res3 += left.block(i) * right.block(i); + + const double diff_norm = std::abs(res-res2); + const double diff_norm2 = std::abs(res-res3); + deallog << "Norm of difference: " << diff_norm << " " << diff_norm2 << std::endl; +} + + +int main (int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads()); + + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + deallog.push(Utilities::int_to_string(myid)); + + if (myid == 0) + { + initlog(); + deallog << std::setprecision(4); + + test<2,1>(); + } + else + { + test<2,1>(); + } +} diff --git a/tests/mpi/parallel_block_vector_10.with_p4est=true.mpirun=1.output b/tests/mpi/parallel_block_vector_10.with_p4est=true.mpirun=1.output new file mode 100644 index 0000000000..9b6ca0bae6 --- /dev/null +++ b/tests/mpi/parallel_block_vector_10.with_p4est=true.mpirun=1.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000 0.000 diff --git a/tests/mpi/parallel_block_vector_10.with_p4est=true.mpirun=4.output b/tests/mpi/parallel_block_vector_10.with_p4est=true.mpirun=4.output new file mode 100644 index 0000000000..9b6ca0bae6 --- /dev/null +++ b/tests/mpi/parallel_block_vector_10.with_p4est=true.mpirun=4.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000 0.000 diff --git a/tests/mpi/parallel_block_vector_11.cc b/tests/mpi/parallel_block_vector_11.cc new file mode 100644 index 0000000000..3771204634 --- /dev/null +++ b/tests/mpi/parallel_block_vector_11.cc @@ -0,0 +1,165 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +// this BlockVector::multivector_inner_product_with_metric(symmetric=true). +// Triangulation and Mass operator are the same as in matrix_free/mass_operator_01.cc + +#include "../tests.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + + + + +template +void test (const unsigned int n_blocks = 5) +{ + typedef double number; + + parallel::distributed::Triangulation tria (MPI_COMM_WORLD); + GridGenerator::hyper_cube (tria); + tria.refine_global(1); + typename Triangulation::active_cell_iterator + cell = tria.begin_active (), + endc = tria.end(); + cell = tria.begin_active (); + for (; cell!=endc; ++cell) + if (cell->is_locally_owned()) + if (cell->center().norm()<0.2) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + if (dim < 3 && fe_degree < 2) + tria.refine_global(2); + else + tria.refine_global(1); + if (tria.begin(tria.n_levels()-1)->is_locally_owned()) + tria.begin(tria.n_levels()-1)->set_refine_flag(); + if (tria.last()->is_locally_owned()) + tria.last()->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + cell = tria.begin_active (); + for (unsigned int i=0; i<10-3*dim; ++i) + { + cell = tria.begin_active (); + unsigned int counter = 0; + for (; cell!=endc; ++cell, ++counter) + if (cell->is_locally_owned()) + if (counter % (7-i) == 0) + cell->set_refine_flag(); + tria.execute_coarsening_and_refinement(); + } + + FE_Q fe (fe_degree); + DoFHandler dof (tria); + dof.distribute_dofs(fe); + + IndexSet owned_set = dof.locally_owned_dofs(); + IndexSet relevant_set; + DoFTools::extract_locally_relevant_dofs (dof, relevant_set); + + ConstraintMatrix constraints (relevant_set); + DoFTools::make_hanging_node_constraints(dof, constraints); + VectorTools::interpolate_boundary_values (dof, 0, Functions::ZeroFunction(), + constraints); + constraints.close(); + + std::shared_ptr > mf_data(new MatrixFree ()); + { + const QGauss<1> quad (fe_degree+2); + typename MatrixFree::AdditionalData data; + data.tasks_parallel_scheme = + MatrixFree::AdditionalData::none; + data.tasks_block_size = 7; + mf_data->reinit (dof, constraints, quad, data); + } + + MatrixFreeOperators::MassOperator > mf; + mf.initialize(mf_data); + mf.compute_diagonal(); + + LinearAlgebra::distributed::BlockVector left(n_blocks), right(n_blocks); + for (unsigned int b = 0; b < n_blocks; ++b) + { + mf_data->initialize_dof_vector (left.block(b)); + mf_data->initialize_dof_vector (right.block(b)); + left.block(b) = 0.; + right.block(b) = 0.; + for (unsigned int i=0; i(); + } + right.block(b).update_ghost_values(); + mf.vmult (left.block(b), right.block(b)); + } + + FullMatrix metric(n_blocks,n_blocks); + for (unsigned int i = 0; i < n_blocks; ++i) + for (unsigned int j = i; j < n_blocks; ++j) + { + const double val = 1. + (i*3 + j*7); + metric(i,j) = val; + metric(j,i) = val; + } + + const double res = left.multivector_inner_product_with_metric(metric, right, true); + + double res2 = 0.; + for (unsigned int i = 0; i < n_blocks; ++i) + for (unsigned int j = 0; j < n_blocks; ++j) + res2 += metric(i,j) * (left.block(i) * right.block(j)); + + const double diff_norm = std::abs(res-res2); + deallog << "Norm of difference: " << diff_norm << std::endl; +} + + +int main (int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, testing_max_num_threads()); + + unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD); + deallog.push(Utilities::int_to_string(myid)); + + if (myid == 0) + { + initlog(); + deallog << std::setprecision(4); + + test<2,1>(); + } + else + { + test<2,1>(); + } +} diff --git a/tests/mpi/parallel_block_vector_11.with_p4est=true.mpirun=4.output b/tests/mpi/parallel_block_vector_11.with_p4est=true.mpirun=4.output new file mode 100644 index 0000000000..b3f4ed9bf1 --- /dev/null +++ b/tests/mpi/parallel_block_vector_11.with_p4est=true.mpirun=4.output @@ -0,0 +1,2 @@ + +DEAL:0::Norm of difference: 0.000