From 6d118c3fa76e93f6ed6e21e880d5909a77421dce Mon Sep 17 00:00:00 2001 From: Peter Munch Date: Mon, 4 Jul 2022 22:01:14 +0200 Subject: [PATCH] Add functions to restrict a matrix --- include/deal.II/lac/sparse_matrix_tools.h | 620 ++++++++++++++++++ tests/lac/sparse_matrix_tools_01.cc | 189 ++++++ ...linos=true.with_p4est=true.mpirun=1.output | 280 ++++++++ ...linos=true.with_p4est=true.mpirun=2.output | 209 ++++++ 4 files changed, 1298 insertions(+) create mode 100644 include/deal.II/lac/sparse_matrix_tools.h create mode 100644 tests/lac/sparse_matrix_tools_01.cc create mode 100644 tests/lac/sparse_matrix_tools_01.with_trilinos=true.with_p4est=true.mpirun=1.output create mode 100644 tests/lac/sparse_matrix_tools_01.with_trilinos=true.with_p4est=true.mpirun=2.output diff --git a/include/deal.II/lac/sparse_matrix_tools.h b/include/deal.II/lac/sparse_matrix_tools.h new file mode 100644 index 0000000000..be5a7d7372 --- /dev/null +++ b/include/deal.II/lac/sparse_matrix_tools.h @@ -0,0 +1,620 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_sparse_matrix_tools_h +#define dealii_sparse_matrix_tools_h + +#include + +#include + +#include + +#include + +DEAL_II_NAMESPACE_OPEN + +/** + * A namespace to process sparse matrices. + */ +namespace SparseMatrixTools +{ + /** + * Given a sparse matrix (@p system_matrix, @p sparsity_pattern), + * construct a new sparse matrix (@p system_matrix_out, @p sparsity_pattern_out) + * by restriction + * @f[ + * A_i = R_i A R_i^T, + * @f] + * where the Boolean matrix $R_i$ is defined by the entries of @p requested_is. + * + * The function can be called by multiple processes with different sets + * of indices, allowing to assign each process a different $A_i$. + * + * Such a function is useful to implement Schwarz methods, where + * operations of type + * @f[ + * u^{n} = u^{n-1} + \sum_{i} R_i^T A_i^{-1} R_i (f - A u^{n-1}) + * @f] + * are performed to iteratively solve a system of type $Au=f$. + * + * @warning This is a collective call that needs to be executed by all + * processes in the communicator of @p sparse_matrix_in. + */ + template + void + restrict_to_serial_sparse_matrix(const SparseMatrixType & sparse_matrix_in, + const SparsityPatternType &sparsity_pattern, + const IndexSet & requested_is, + SparseMatrixType2 & system_matrix_out, + SparsityPatternType2 &sparsity_pattern_out); + + /** + * Similar to the above function, but taking two index sets + * (@p index_set_0, @p index_set_1), allowing to block the matrix. This + * is particularly useful, when dealing with vectors of type + * parallel::distributed::Vector, where the vector is blocked according + * to locally owned and ghost indices. As a consequence, the most + * typical usecase will be to pass in the set of locally owned DoFs and set + * of active or locally relevant DoFs. + * + * @warning This is a collective call that needs to be executed by all + * processes in the communicator of @p sparse_matrix_in. + */ + template + void + restrict_to_serial_sparse_matrix(const SparseMatrixType & sparse_matrix_in, + const SparsityPatternType &sparsity_pattern, + const IndexSet & index_set_0, + const IndexSet & index_set_1, + SparseMatrixType2 & system_matrix_out, + SparsityPatternType2 &sparsity_pattern_out); + + /** + * A restriction operation similar to the above one. However, the operation + * is performed for each locally owned active cell individually and index sets + * are given by their DoFs. The correct entries in the resulting vector can + * accessed by CellAccessor::active_cell_index(). + * + * @note In a certain sense, this is the reversion of the cell loop during + * matrix assembly. However, doing this on a distributed matrix is not + * trivial, since 1) rows might be owned by different processes and 2) degrees + * of freedom might be constrained, resulting in "missing" entries in the + * matrix. + * + * @warning This is a collective call that needs to be executed by all + * processes in the communicator of @p sparse_matrix_in. + */ + template + void + restrict_to_cells(const SparseMatrixType & system_matrix, + const SparsityPatternType & sparsity_pattern, + const DoFHandler &dof_handler, + std::vector> &blocks); + + /** + * A restriction operation similar to the above one. However, the indices + * of the blocks can be chosen arbitrarily. If the indices of cells are + * given, the ouput is the same as of the above function. However, one + * can also provide, e.g., indices that are also part of a halo around + * a cell to implement element-block based overlapping Schwarz methods. + * + * If no indices are provided for a block, the resulting matrix of this + * block is empty. + * + * @warning This is a collective call that needs to be executed by all + * processes in the communicator of @p sparse_matrix_in. + */ + template + void + restrict_to_full_matrices( + const SparseMatrixType & sparse_matrix_in, + const SparsityPatternType & sparsity_pattern, + const std::vector> &indices, + std::vector> & blocks); + + +#ifndef DOXYGEN + /*---------------------- Inline functions ---------------------------------*/ + + namespace internal + { + template + std::tuple + compute_prefix_sum(const T &value, const MPI_Comm &comm) + { +# ifndef DEAL_II_WITH_MPI + (void)comm; + return {0, value}; +# else + T prefix = {}; + + int ierr = + MPI_Exscan(&value, + &prefix, + 1, + Utilities::MPI::mpi_type_id_for_type, + MPI_SUM, + comm); + AssertThrowMPI(ierr); + + T sum = Utilities::MPI::sum(value, comm); + + return {prefix, sum}; +# endif + } + + template + using get_mpi_communicator_t = + decltype(std::declval().get_mpi_communicator()); + + template + constexpr bool has_get_mpi_communicator = + dealii::internal::is_supported_operation; + + template + using local_size_t = decltype(std::declval().local_size()); + + template + constexpr bool has_local_size = + dealii::internal::is_supported_operation; + + template , + SparseMatrixType> * = nullptr> + MPI_Comm + get_mpi_communicator(const SparseMatrixType &sparse_matrix) + { + return sparse_matrix.get_mpi_communicator(); + } + + template , + SparseMatrixType> * = nullptr> + MPI_Comm + get_mpi_communicator(const SparseMatrixType &sparse_matrix) + { + return MPI_COMM_SELF; + } + + template , + SparseMatrixType> * = nullptr> + unsigned int + get_local_size(const SparseMatrixType &sparse_matrix) + { + return sparse_matrix.local_size(); + } + + template , + SparseMatrixType> * = nullptr> + unsigned int + get_local_size(const SparseMatrixType &sparse_matrix) + { + AssertDimension(sparse_matrix.m(), sparse_matrix.n()); + + return sparse_matrix.m(); + } + + // Helper function to extract for a distributed sparse matrix rows + // potentially not owned by the current process. + template + std::vector>> + extract_remote_rows(const SparseMatrixType & system_matrix, + const SparsityPatternType &sparsity_pattern, + const IndexSet & locally_active_dofs, + const MPI_Comm & comm) + { + std::vector dummy(locally_active_dofs.n_elements()); + + const auto local_size = get_local_size(system_matrix); + const auto prefix_sum = compute_prefix_sum(local_size, comm); + IndexSet locally_owned_dofs(std::get<1>(prefix_sum)); + locally_owned_dofs.add_range(std::get<0>(prefix_sum), + std::get<0>(prefix_sum) + local_size); + + Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload + process(locally_owned_dofs, locally_active_dofs, comm, dummy, true); + + Utilities::MPI::ConsensusAlgorithms::Selector< + std::vector< + std::pair>, + std::vector> + consensus_algorithm; + consensus_algorithm.run(process, comm); + + using T1 = std::vector< + std::pair>>>; + + auto requesters = process.get_requesters(); + + std::vector>> + locally_relevant_matrix_entries(locally_active_dofs.n_elements()); + + + std::vector ranks; + ranks.reserve(requesters.size()); + + for (const auto &i : requesters) + ranks.push_back(i.first); + + std::vector> row_to_procs( + locally_owned_dofs.n_elements()); + + for (const auto &requester : requesters) + for (const auto &index : requester.second) + row_to_procs[locally_owned_dofs.index_within_set(index)].push_back( + requester.first); + + std::map data; + + for (unsigned int i = 0; i < row_to_procs.size(); ++i) + { + if (row_to_procs[i].size() == 0) + continue; + + const auto row = locally_owned_dofs.nth_index_in_set(i); + auto entry = system_matrix.begin(row); + + const unsigned int row_length = sparsity_pattern.row_length(row); + + + std::pair>> + buffer; + buffer.first = row; + + for (unsigned int i = 0; i < row_length; ++i) + { + buffer.second.emplace_back(entry->column(), entry->value()); + + if (i + 1 != row_length) + ++entry; + } + + for (const auto &proc : + row_to_procs[locally_owned_dofs.index_within_set(buffer.first)]) + data[proc].emplace_back(buffer); + } + + dealii::Utilities::MPI::ConsensusAlgorithms::selector( + ranks, + [&](const unsigned int other_rank) { return data[other_rank]; }, + [&](const unsigned int &, const T1 &buffer_recv) { + for (const auto &i : buffer_recv) + { + auto &dst = + locally_relevant_matrix_entries[locally_active_dofs + .index_within_set(i.first)]; + dst = i.second; + std::sort(dst.begin(), + dst.end(), + [](const auto &a, const auto &b) { + return a.first < b.first; + }); + } + }, + comm); + + return locally_relevant_matrix_entries; + } + } // namespace internal + + + + template + void + restrict_to_serial_sparse_matrix(const SparseMatrixType & system_matrix, + const SparsityPatternType &sparsity_pattern, + const IndexSet & index_set_0, + const IndexSet & index_set_1, + SparseMatrixType2 & system_matrix_out, + SparsityPatternType2 &sparsity_pattern_out) + { + Assert(index_set_1.size() == 0 || index_set_0.size() == index_set_1.size(), + ExcInternalError()); + + auto index_set_1_cleared = index_set_1; + if (index_set_1.size() != 0) + index_set_1_cleared.subtract_set(index_set_0); + + const auto index_within_set = [&index_set_0, + &index_set_1_cleared](const auto n) { + if (index_set_0.is_element(n)) + return index_set_0.index_within_set(n); + else + return index_set_0.n_elements() + + index_set_1_cleared.index_within_set(n); + }; + + // 1) collect needed rows + auto index_set_union = index_set_0; + + if (index_set_1.size() != 0) + index_set_union.add_indices(index_set_1_cleared); + + // TODO: actually only communicate remote rows as in the case of + // SparseMatrixTools::restrict_to_cells() + const auto locally_relevant_matrix_entries = + internal::extract_remote_rows( + system_matrix, + sparsity_pattern, + index_set_union, + internal::get_mpi_communicator(system_matrix)); + + + // 2) create sparsity pattern + const unsigned int n_rows = index_set_union.n_elements(); + const unsigned int n_cols = index_set_union.n_elements(); + const unsigned int entries_per_row = + locally_relevant_matrix_entries.size() == 0 ? + 0 : + std::max_element(locally_relevant_matrix_entries.begin(), + locally_relevant_matrix_entries.end(), + [](const auto &a, const auto &b) { + return a.size() < b.size(); + }) + ->size(); + + sparsity_pattern_out.reinit(n_rows, n_cols, entries_per_row); + + std::vector temp_indices; + std::vector temp_values; + + for (unsigned int row = 0; row < index_set_union.n_elements(); ++row) + { + const auto &global_row_entries = locally_relevant_matrix_entries[row]; + + temp_indices.clear(); + temp_indices.reserve(global_row_entries.size()); + + for (const auto &global_row_entry : global_row_entries) + { + const auto global_index = std::get<0>(global_row_entry); + + if (index_set_union.is_element(global_index)) + temp_indices.push_back(index_within_set(global_index)); + } + + sparsity_pattern_out.add_entries( + index_within_set(index_set_union.nth_index_in_set(row)), + temp_indices.begin(), + temp_indices.end()); + } + + sparsity_pattern_out.compress(); + + // 3) setup matrix + system_matrix_out.reinit(sparsity_pattern_out); + + // 4) fill entries + for (unsigned int row = 0; row < index_set_union.n_elements(); ++row) + { + const auto &global_row_entries = locally_relevant_matrix_entries[row]; + + temp_indices.clear(); + temp_values.clear(); + + temp_indices.reserve(global_row_entries.size()); + temp_values.reserve(global_row_entries.size()); + + for (const auto &global_row_entry : global_row_entries) + { + const auto global_index = std::get<0>(global_row_entry); + + if (index_set_union.is_element(global_index)) + { + temp_indices.push_back(index_within_set(global_index)); + temp_values.push_back(std::get<1>(global_row_entry)); + } + } + + system_matrix_out.add(index_within_set( + index_set_union.nth_index_in_set(row)), + temp_indices, + temp_values); + } + + system_matrix_out.compress(VectorOperation::add); + } + + + + template + void + restrict_to_serial_sparse_matrix(const SparseMatrixType & system_matrix, + const SparsityPatternType &sparsity_pattern, + const IndexSet & requested_is, + SparseMatrixType2 & system_matrix_out, + SparsityPatternType2 &sparsity_pattern_out) + { + restrict_to_serial_sparse_matrix(system_matrix, + sparsity_pattern, + requested_is, + IndexSet(), // simply pass empty index set + system_matrix_out, + sparsity_pattern_out); + } + + + + template + void + restrict_to_full_matrices( + const SparseMatrixType & system_matrix, + const SparsityPatternType & sparsity_pattern, + const std::vector> &indices, + std::vector> & blocks) + { + // 0) determine which rows are locally owned and which ones are remote + const auto local_size = internal::get_local_size(system_matrix); + const auto prefix_sum = internal::compute_prefix_sum( + local_size, internal::get_mpi_communicator(system_matrix)); + IndexSet locally_owned_dofs(std::get<1>(prefix_sum)); + locally_owned_dofs.add_range(std::get<0>(prefix_sum), + std::get<0>(prefix_sum) + local_size); + + std::vector ghost_indices_vector; + + for (const auto &i : indices) + ghost_indices_vector.insert(ghost_indices_vector.end(), + i.begin(), + i.end()); + + std::sort(ghost_indices_vector.begin(), ghost_indices_vector.end()); + ghost_indices_vector.erase(std::unique(ghost_indices_vector.begin(), + ghost_indices_vector.end()), + ghost_indices_vector.end()); + + + IndexSet locally_active_dofs(std::get<1>(prefix_sum)); + locally_active_dofs.add_indices(ghost_indices_vector.begin(), + ghost_indices_vector.end()); + + locally_active_dofs.subtract_set(locally_owned_dofs); + + // 1) collect remote rows of sparse matrix + const auto locally_relevant_matrix_entries = + internal::extract_remote_rows(system_matrix, + sparsity_pattern, + locally_active_dofs, + internal::get_mpi_communicator( + system_matrix)); + + + // 2) loop over all cells and "revert" assembly + blocks.clear(); + blocks.resize(indices.size()); + + for (unsigned int c = 0; c < indices.size(); ++c) + { + if (indices[c].size() == 0) + continue; + + const auto &local_dof_indices = indices[c]; + auto & cell_matrix = blocks[c]; + + // allocate memory + const unsigned int dofs_per_cell = indices[c].size(); + + cell_matrix = FullMatrix(dofs_per_cell, dofs_per_cell); + + // loop over all entries of the restricted element matrix and + // do different things if rows are locally owned or not and + // if column entries of that row exist or not + for (unsigned int i = 0; i < dofs_per_cell; ++i) + for (unsigned int j = 0; j < dofs_per_cell; ++j) + { + if (locally_owned_dofs.is_element( + local_dof_indices[i])) // row is local + { + cell_matrix(i, j) = + sparsity_pattern.exists(local_dof_indices[i], + local_dof_indices[j]) ? + system_matrix(local_dof_indices[i], + local_dof_indices[j]) : + 0; + } + else // row is ghost + { + Assert(locally_active_dofs.is_element(local_dof_indices[i]), + ExcInternalError()); + + const auto &row_entries = + locally_relevant_matrix_entries[locally_active_dofs + .index_within_set( + local_dof_indices[i])]; + + const auto ptr = + std::lower_bound(row_entries.begin(), + row_entries.end(), + std::pair{ + local_dof_indices[j], /*dummy*/ 0.0}, + [](const auto a, const auto b) { + return a.first < b.first; + }); + + if (ptr != row_entries.end() && + local_dof_indices[j] == ptr->first) + cell_matrix(i, j) = ptr->second; + else + cell_matrix(i, j) = 0.0; + } + } + } + } + + + + template + void + restrict_to_cells(const SparseMatrixType & system_matrix, + const SparsityPatternType & sparsity_pattern, + const DoFHandler &dof_handler, + std::vector> &blocks) + { + std::vector> all_dof_indices; + all_dof_indices.resize(dof_handler.get_triangulation().n_active_cells()); + + for (const auto &cell : dof_handler.active_cell_iterators()) + { + if (cell->is_locally_owned() == false) + continue; + + auto &local_dof_indices = all_dof_indices[cell->active_cell_index()]; + local_dof_indices.resize(cell->get_fe().n_dofs_per_cell()); + cell->get_dof_indices(local_dof_indices); + } + + restrict_to_full_matrices(system_matrix, + sparsity_pattern, + all_dof_indices, + blocks); + } +#endif + +} // namespace SparseMatrixTools + +DEAL_II_NAMESPACE_CLOSE + +#endif diff --git a/tests/lac/sparse_matrix_tools_01.cc b/tests/lac/sparse_matrix_tools_01.cc new file mode 100644 index 0000000000..0644d35395 --- /dev/null +++ b/tests/lac/sparse_matrix_tools_01.cc @@ -0,0 +1,189 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// check different functions from the SparseMatrixTools namespace + +#include + +#include + +#include +#include + +#include + +#include + +#include +#include +#include +#include + +#include + +#include "../tests.h" + +template +void +reinit_sparsity_pattern(const DoFHandler &dof_handler, + SparsityPattern & sparsity_pattern) +{ + std::vector counter(dof_handler.n_dofs(), 0); + + std::vector local_dof_indices; + + for (const auto &cell : dof_handler.active_cell_iterators()) + { + local_dof_indices.resize(cell->get_fe().n_dofs_per_cell()); + + for (const auto i : local_dof_indices) + counter[i]++; + } + + sparsity_pattern.reinit(dof_handler.n_dofs(), + dof_handler.n_dofs(), + *std::max_element(counter.begin(), counter.end())); +} + +template +void +reinit_sparsity_pattern(const DoFHandler & dof_handler, + TrilinosWrappers::SparsityPattern &sparsity_pattern) +{ + sparsity_pattern.reinit(dof_handler.locally_owned_dofs(), + dof_handler.get_communicator()); +} + +template +void +test() +{ + const unsigned int fe_degree = 1; + + // create mesh, ... + parallel::distributed::Triangulation tria(MPI_COMM_WORLD); + GridGenerator::subdivided_hyper_cube(tria, 3); + + DoFHandler dof_handler(tria); + dof_handler.distribute_dofs(FE_Q(fe_degree)); + + QGauss quadrature(fe_degree + 1); + + AffineConstraints constraints; + DoFTools::make_zero_boundary_constraints(dof_handler, constraints); + constraints.close(); + + // create system matrix + SparsityPatternType sparsity_pattern; + reinit_sparsity_pattern(dof_handler, sparsity_pattern); + DoFTools::make_sparsity_pattern(dof_handler, + sparsity_pattern, + constraints, + false); + sparsity_pattern.compress(); + + SpareMatrixType laplace_matrix; + laplace_matrix.reinit(sparsity_pattern); + + MatrixCreator::create_laplace_matrix( + dof_handler, quadrature, laplace_matrix, nullptr, constraints); + + // extract blocks + std::vector> blocks; + SparseMatrixTools::restrict_to_cells(laplace_matrix, + sparsity_pattern, + dof_handler, + blocks); + + for (const auto &block : blocks) + { + if (block.m() == 0 && block.m() == 0) + continue; + + block.print_formatted(deallog.get_file_stream(), 2, false, 8); + deallog << std::endl; + } + + const auto test_restrict = [&](const IndexSet &is_0, const IndexSet &is_1) { + (void)is_1; + SparsityPatternType2 serial_sparsity_pattern; + SparseMatrixType2 serial_sparse_matrix; + + if (is_1.size() == 0) + SparseMatrixTools::restrict_to_serial_sparse_matrix( + laplace_matrix, + sparsity_pattern, + is_0, + serial_sparse_matrix, + serial_sparsity_pattern); + else + SparseMatrixTools::restrict_to_serial_sparse_matrix( + laplace_matrix, + sparsity_pattern, + is_0, + is_1, + serial_sparse_matrix, + serial_sparsity_pattern); + + FullMatrix serial_sparse_matrix_full; + serial_sparse_matrix_full.copy_from(serial_sparse_matrix); + serial_sparse_matrix_full.print_formatted(deallog.get_file_stream(), + 2, + false, + 8); + }; + + test_restrict(dof_handler.locally_owned_dofs(), {}); + test_restrict(DoFTools::extract_locally_active_dofs(dof_handler), {}); + test_restrict(dof_handler.locally_owned_dofs(), + DoFTools::extract_locally_active_dofs(dof_handler)); +} + +#include "../tests.h" + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + MPILogInitAll all; + + // SparseMatrix -> SparseMatrix + if (Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD) == 1) + test<2, + SparseMatrix, + SparsityPattern, + SparseMatrix, + SparsityPattern>(); + + // TrilinosWrappers::SparseMatrix -> SparseMatrix + test<2, + TrilinosWrappers::SparseMatrix, + TrilinosWrappers::SparsityPattern, + SparseMatrix, + SparsityPattern>(); + + // TrilinosWrappers::SparseMatrix -> TrilinosWrappers::SparseMatrix + test<2, + TrilinosWrappers::SparseMatrix, + TrilinosWrappers::SparsityPattern, + TrilinosWrappers::SparseMatrix, + TrilinosWrappers::SparsityPattern>(); +} diff --git a/tests/lac/sparse_matrix_tools_01.with_trilinos=true.with_p4est=true.mpirun=1.output b/tests/lac/sparse_matrix_tools_01.with_trilinos=true.with_p4est=true.mpirun=1.output new file mode 100644 index 0000000000..335f0e3081 --- /dev/null +++ b/tests/lac/sparse_matrix_tools_01.with_trilinos=true.with_p4est=true.mpirun=1.output @@ -0,0 +1,280 @@ + +0.67 + 1.33 + 1.33 + 2.67 +DEAL:0:: +1.33 + 1.33 + 2.67 -0.33 + -0.33 2.67 +DEAL:0:: +1.33 + 0.67 + 2.67 + 1.33 +DEAL:0:: +1.33 + 2.67 -0.33 + 1.33 + -0.33 2.67 +DEAL:0:: +2.67 -0.33 -0.33 -0.33 +-0.33 2.67 -0.33 -0.33 +-0.33 -0.33 2.67 -0.33 +-0.33 -0.33 -0.33 2.67 +DEAL:0:: +2.67 -0.33 + 1.33 +-0.33 2.67 + 1.33 +DEAL:0:: +1.33 + 2.67 + 0.67 + 1.33 +DEAL:0:: +2.67 -0.33 +-0.33 2.67 + 1.33 + 1.33 +DEAL:0:: +2.67 + 1.33 + 1.33 + 0.67 +DEAL:0:: +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 2.67 +DEAL:0:: +1.33 + 1.33 + 2.67 -0.33 + -0.33 2.67 +DEAL:0:: +1.33 + 0.67 + 2.67 + 1.33 +DEAL:0:: +1.33 + 2.67 -0.33 + 1.33 + -0.33 2.67 +DEAL:0:: +2.67 -0.33 -0.33 -0.33 +-0.33 2.67 -0.33 -0.33 +-0.33 -0.33 2.67 -0.33 +-0.33 -0.33 -0.33 2.67 +DEAL:0:: +2.67 -0.33 + 1.33 +-0.33 2.67 + 1.33 +DEAL:0:: +1.33 + 2.67 + 0.67 + 1.33 +DEAL:0:: +2.67 -0.33 +-0.33 2.67 + 1.33 + 1.33 +DEAL:0:: +2.67 + 1.33 + 1.33 + 0.67 +DEAL:0:: +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 2.67 +DEAL:0:: +1.33 + 1.33 + 2.67 -0.33 + -0.33 2.67 +DEAL:0:: +1.33 + 0.67 + 2.67 + 1.33 +DEAL:0:: +1.33 + 2.67 -0.33 + 1.33 + -0.33 2.67 +DEAL:0:: +2.67 -0.33 -0.33 -0.33 +-0.33 2.67 -0.33 -0.33 +-0.33 -0.33 2.67 -0.33 +-0.33 -0.33 -0.33 2.67 +DEAL:0:: +2.67 -0.33 + 1.33 +-0.33 2.67 + 1.33 +DEAL:0:: +1.33 + 2.67 + 0.67 + 1.33 +DEAL:0:: +2.67 -0.33 +-0.33 2.67 + 1.33 + 1.33 +DEAL:0:: +2.67 + 1.33 + 1.33 + 0.67 +DEAL:0:: +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 0.67 + 1.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 diff --git a/tests/lac/sparse_matrix_tools_01.with_trilinos=true.with_p4est=true.mpirun=2.output b/tests/lac/sparse_matrix_tools_01.with_trilinos=true.with_p4est=true.mpirun=2.output new file mode 100644 index 0000000000..36d4a3fd77 --- /dev/null +++ b/tests/lac/sparse_matrix_tools_01.with_trilinos=true.with_p4est=true.mpirun=2.output @@ -0,0 +1,209 @@ + +0.67 + 1.33 + 1.33 + 2.67 +DEAL:0:: +1.33 + 1.33 + 2.67 -0.33 + -0.33 2.67 +DEAL:0:: +1.33 + 2.67 -0.33 + 1.33 + -0.33 2.67 +DEAL:0:: +2.67 -0.33 -0.33 -0.33 +-0.33 2.67 -0.33 -0.33 +-0.33 -0.33 2.67 -0.33 +-0.33 -0.33 -0.33 2.67 +DEAL:0:: +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 +0.67 + 1.33 + 1.33 + 2.67 +DEAL:0:: +1.33 + 1.33 + 2.67 -0.33 + -0.33 2.67 +DEAL:0:: +1.33 + 2.67 -0.33 + 1.33 + -0.33 2.67 +DEAL:0:: +2.67 -0.33 -0.33 -0.33 +-0.33 2.67 -0.33 -0.33 +-0.33 -0.33 2.67 -0.33 +-0.33 -0.33 -0.33 2.67 +DEAL:0:: +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 +0.67 + 1.33 + 1.33 + 2.67 -0.33 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 -0.33 + 1.33 + -0.33 -0.33 2.67 -0.33 + -0.33 -0.33 -0.33 2.67 + +1.33 + 0.67 + 2.67 + 1.33 +DEAL:1:: +2.67 -0.33 + 1.33 +-0.33 2.67 + 1.33 +DEAL:1:: +1.33 + 2.67 + 0.67 + 1.33 +DEAL:1:: +2.67 -0.33 +-0.33 2.67 + 1.33 + 1.33 +DEAL:1:: +2.67 + 1.33 + 1.33 + 0.67 +DEAL:1:: +0.67 + 1.33 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +1.33 + 2.67 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 + -0.33 -0.33 2.67 + 0.67 + 1.33 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 + 1.33 + 2.67 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 + -0.33 -0.33 2.67 +1.33 + 0.67 + 2.67 + 1.33 +DEAL:1:: +2.67 -0.33 + 1.33 +-0.33 2.67 + 1.33 +DEAL:1:: +1.33 + 2.67 + 0.67 + 1.33 +DEAL:1:: +2.67 -0.33 +-0.33 2.67 + 1.33 + 1.33 +DEAL:1:: +2.67 + 1.33 + 1.33 + 0.67 +DEAL:1:: +0.67 + 1.33 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +1.33 + 2.67 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 + -0.33 -0.33 2.67 + 0.67 + 1.33 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 +0.67 + 1.33 + 1.33 + 0.67 + 1.33 + 1.33 + 0.67 + 1.33 + 2.67 -0.33 -0.33 + 1.33 + -0.33 2.67 -0.33 + -0.33 -0.33 2.67 + -- 2.39.5