From c172ca9d9d2daf25cbf3c91ae2be4e7798ad8bdd Mon Sep 17 00:00:00 2001 From: =?utf8?q?Uwe=20K=C3=B6cher?= Date: Thu, 14 Jun 2018 13:40:29 +0200 Subject: [PATCH] add tests for TrilinosWrapper::SparseMatrix::add and ::copy_from new file: tests/trilinos/sparse_matrix_add_03.cc new file: tests/trilinos/sparse_matrix_add_03.with_trilinos=true.with_mpi=true.mpirun=3.output new file: tests/trilinos/sparse_matrix_copy_from_02.cc new file: tests/trilinos/sparse_matrix_copy_from_02.with_trilinos=true.with_mpi=true.mpirun=3.output --- tests/trilinos/sparse_matrix_add_03.cc | 114 ++++++++++++++++++ ...rilinos=true.with_mpi=true.mpirun=3.output | 5 + tests/trilinos/sparse_matrix_copy_from_02.cc | 114 ++++++++++++++++++ ...rilinos=true.with_mpi=true.mpirun=3.output | 5 + 4 files changed, 238 insertions(+) create mode 100644 tests/trilinos/sparse_matrix_add_03.cc create mode 100644 tests/trilinos/sparse_matrix_add_03.with_trilinos=true.with_mpi=true.mpirun=3.output create mode 100644 tests/trilinos/sparse_matrix_copy_from_02.cc create mode 100644 tests/trilinos/sparse_matrix_copy_from_02.with_trilinos=true.with_mpi=true.mpirun=3.output diff --git a/tests/trilinos/sparse_matrix_add_03.cc b/tests/trilinos/sparse_matrix_add_03.cc new file mode 100644 index 0000000000..e46b6ec315 --- /dev/null +++ b/tests/trilinos/sparse_matrix_add_03.cc @@ -0,0 +1,114 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2004 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// Test TrilinosWrappers::SparseMatrix::add(factor, other_matrix) +// for the case of a non-contiguous set of rows. + +#include +#include +#include + +#include +#include +#include + +#include + +#include "../tests.h" + + +void +test() +{ + const auto MyPID{Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)}; + const auto NumProc{Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)}; + + if (!MyPID) + deallog << "NumProc=" << NumProc << std::endl; + + // create non-contiguous index set for NumProc > 1 + dealii::IndexSet parallel_partitioning(NumProc * 2); + + // non-contiguous + parallel_partitioning.add_index(MyPID); + parallel_partitioning.add_index(NumProc + MyPID); + + // create sparsity pattern from parallel_partitioning + + // The sparsity pattern corresponds to a [FE_DGQ<1>(p=0)]^2 FESystem, + // on a triangulation in which each MPI process owns 2 cells, + // with reordered dofs by its components, such that the rows in the + // final matrix are locally not in a contiguous set. + + dealii::TrilinosWrappers::SparsityPattern sp_M(parallel_partitioning, + MPI_COMM_WORLD, + 2); + + sp_M.add(MyPID, MyPID); + sp_M.add(MyPID, NumProc + MyPID); + sp_M.add(NumProc + MyPID, MyPID); + sp_M.add(NumProc + MyPID, NumProc + MyPID); + + sp_M.compress(); + + // create matrix with dummy entries on the diagonal + dealii::TrilinosWrappers::SparseMatrix M0; + M0.reinit(sp_M); + M0 = 0; + + for (const auto &i : parallel_partitioning) + M0.set(i, i, dealii::numbers::PI); + + M0.compress(dealii::VectorOperation::insert); + + //////////////////////////////////////////////////////////////////////// + // test ::add(TrilinosScalar, SparseMatrix) + // + + dealii::TrilinosWrappers::SparseMatrix M1; + M1.reinit(sp_M); + M1 = 0; + M1.add(1.0, M0); + + // check + for (const auto &i : parallel_partitioning) + { + const auto &el = M1.el(i, i); + + if (!MyPID) + deallog << "i = " << i << " , j = " << i << " , el = " << el + << std::endl; + + AssertThrow(el == dealii::numbers::PI, dealii::ExcInternalError()); + } + + if (!MyPID) + deallog << "OK" << std::endl; +} + + + +int +main(int argc, char **argv) +{ + initlog(); + + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, testing_max_num_threads()); + + test(); +} diff --git a/tests/trilinos/sparse_matrix_add_03.with_trilinos=true.with_mpi=true.mpirun=3.output b/tests/trilinos/sparse_matrix_add_03.with_trilinos=true.with_mpi=true.mpirun=3.output new file mode 100644 index 0000000000..dea2eaf38a --- /dev/null +++ b/tests/trilinos/sparse_matrix_add_03.with_trilinos=true.with_mpi=true.mpirun=3.output @@ -0,0 +1,5 @@ + +DEAL::NumProc=3 +DEAL::i = 0 , j = 0 , el = 3.14159 +DEAL::i = 3 , j = 3 , el = 3.14159 +DEAL::OK diff --git a/tests/trilinos/sparse_matrix_copy_from_02.cc b/tests/trilinos/sparse_matrix_copy_from_02.cc new file mode 100644 index 0000000000..3dbbb5214a --- /dev/null +++ b/tests/trilinos/sparse_matrix_copy_from_02.cc @@ -0,0 +1,114 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2004 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// Test TrilinosWrappers::SparseMatrix::copy_from(other_matrix) +// for the case of a non-contiguous set of rows. + +#include +#include +#include + +#include +#include +#include + +#include + +#include "../tests.h" + + +void +test() +{ + const auto MyPID{Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)}; + const auto NumProc{Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD)}; + + if (!MyPID) + deallog << "NumProc=" << NumProc << std::endl; + + // create non-contiguous index set for NumProc > 1 + dealii::IndexSet parallel_partitioning(NumProc * 2); + + // non-contiguous + parallel_partitioning.add_index(MyPID); + parallel_partitioning.add_index(NumProc + MyPID); + + // create sparsity pattern from parallel_partitioning + + // The sparsity pattern corresponds to a [FE_DGQ<1>(p=0)]^2 FESystem, + // on a triangulation in which each MPI process owns 2 cells, + // with reordered dofs by its components, such that the rows in the + // final matrix are locally not in a contiguous set. + + dealii::TrilinosWrappers::SparsityPattern sp_M(parallel_partitioning, + MPI_COMM_WORLD, + 2); + + sp_M.add(MyPID, MyPID); + sp_M.add(MyPID, NumProc + MyPID); + sp_M.add(NumProc + MyPID, MyPID); + sp_M.add(NumProc + MyPID, NumProc + MyPID); + + sp_M.compress(); + + // create matrix with dummy entries on the diagonal + dealii::TrilinosWrappers::SparseMatrix M0; + M0.reinit(sp_M); + M0 = 0; + + for (const auto &i : parallel_partitioning) + M0.set(i, i, dealii::numbers::PI); + + M0.compress(dealii::VectorOperation::insert); + + //////////////////////////////////////////////////////////////////////// + // test ::add(TrilinosScalar, SparseMatrix) + // + + dealii::TrilinosWrappers::SparseMatrix M1; + M1.reinit(sp_M); // avoid deep copy + M1 = 0; + M1.copy_from(M0); + + // check + for (const auto &i : parallel_partitioning) + { + const auto &el = M1.el(i, i); + + if (!MyPID) + deallog << "i = " << i << " , j = " << i << " , el = " << el + << std::endl; + + AssertThrow(el == dealii::numbers::PI, dealii::ExcInternalError()); + } + + if (!MyPID) + deallog << "OK" << std::endl; +} + + + +int +main(int argc, char **argv) +{ + initlog(); + + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, testing_max_num_threads()); + + test(); +} diff --git a/tests/trilinos/sparse_matrix_copy_from_02.with_trilinos=true.with_mpi=true.mpirun=3.output b/tests/trilinos/sparse_matrix_copy_from_02.with_trilinos=true.with_mpi=true.mpirun=3.output new file mode 100644 index 0000000000..dea2eaf38a --- /dev/null +++ b/tests/trilinos/sparse_matrix_copy_from_02.with_trilinos=true.with_mpi=true.mpirun=3.output @@ -0,0 +1,5 @@ + +DEAL::NumProc=3 +DEAL::i = 0 , j = 0 , el = 3.14159 +DEAL::i = 3 , j = 3 , el = 3.14159 +DEAL::OK -- 2.39.5