From 24d347623f5d71f1eea0618aba848ac26e17009b Mon Sep 17 00:00:00 2001 From: Stefano Zampini Date: Tue, 24 Jan 2023 11:46:38 +0300 Subject: [PATCH] Add support for CommunicationPattern with PETSc SF --- doc/doxygen/references.bib | 16 + .../deal.II/lac/petsc_communication_pattern.h | 375 +++++++++++++ source/lac/CMakeLists.txt | 2 + source/lac/petsc_communication_pattern.cc | 492 ++++++++++++++++++ .../lac/petsc_communication_pattern.inst.in | 85 +++ .../petsc_noncontiguous_partitioner_01.cc | 78 +++ ...ncontiguous_partitioner_01.mpirun=2.output | 7 + .../petsc_noncontiguous_partitioner_02.cc | 163 ++++++ ...itioner_02.with_p4est=true.mpirun=1.output | 13 + ...itioner_02.with_p4est=true.mpirun=2.output | 27 + ...itioner_02.with_p4est=true.mpirun=4.output | 55 ++ ...itioner_02.with_p4est=true.mpirun=5.output | 69 +++ .../petsc_noncontiguous_partitioner_03.cc | 92 ++++ ...ncontiguous_partitioner_03.mpirun=2.output | 15 + tests/petsc/petsc_partitioner_06.cc | 168 ++++++ .../petsc_partitioner_06.mpirun=4.output | 27 + tests/petsc/petsc_partitioner_07.cc | 181 +++++++ .../petsc_partitioner_07.mpirun=4.output | 23 + 18 files changed, 1888 insertions(+) create mode 100644 include/deal.II/lac/petsc_communication_pattern.h create mode 100644 source/lac/petsc_communication_pattern.cc create mode 100644 source/lac/petsc_communication_pattern.inst.in create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_01.cc create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_01.mpirun=2.output create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_02.cc create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=1.output create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=2.output create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=4.output create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=5.output create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_03.cc create mode 100644 tests/petsc/petsc_noncontiguous_partitioner_03.mpirun=2.output create mode 100644 tests/petsc/petsc_partitioner_06.cc create mode 100644 tests/petsc/petsc_partitioner_06.mpirun=4.output create mode 100644 tests/petsc/petsc_partitioner_07.cc create mode 100644 tests/petsc/petsc_partitioner_07.mpirun=4.output diff --git a/doc/doxygen/references.bib b/doc/doxygen/references.bib index 3e749b8109..ef87c6a1f2 100644 --- a/doc/doxygen/references.bib +++ b/doc/doxygen/references.bib @@ -2006,3 +2006,19 @@ url = {https://doi.org/10.1016/0045-7930(73)90027-3} journal={arXiv preprint arXiv:2210.03179}, year={2022} } + +@article{zhang2022petscsf, +author = {J. Zhang and J. Brown and S. Balay and J. Faibussowitsch and M. Knepley and O. Marin and R. Mills and T. Munson and B. F. Smith and S. Zampini}, +journal = {IEEE Transactions on Parallel and Distributed Systems}, +title = {The {PetscSF} Scalable Communication Layer}, +year = {2022}, +volume = {33}, +number = {04}, +issn = {1558-2183}, +pages = {842-853}, +keywords = {libraries;programming;graphics processing units;forestry;electronics packaging;arrays;scalability}, +doi = {10.1109/TPDS.2021.3084070}, +publisher = {IEEE Computer Society}, +address = {Los Alamitos, CA, USA}, +month = {apr} +} diff --git a/include/deal.II/lac/petsc_communication_pattern.h b/include/deal.II/lac/petsc_communication_pattern.h new file mode 100644 index 0000000000..eabadfb448 --- /dev/null +++ b/include/deal.II/lac/petsc_communication_pattern.h @@ -0,0 +1,375 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#ifndef dealii_petsc_communication_pattern_h +#define dealii_petsc_communication_pattern_h + +#include + +#ifdef DEAL_II_WITH_PETSC + +# include +# include +# include + +# include + +# include + +# include + +DEAL_II_NAMESPACE_OPEN + +/** + * @addtogroup PETScWrappers + * @{ + */ +namespace PETScWrappers +{ + /** + * CommunicationPattern implementation based on the PetscSF object. + * This class implements the same communication patterns of + * Utilities::MPI::NoncontiguousPartitioner, internally using PetscSF + * API calls. + * + * For additional information, see the paper @cite zhang2022petscsf + */ + class CommunicationPattern : public Utilities::MPI::CommunicationPatternBase + { + public: + /** + * Default constructor. + */ + CommunicationPattern(); + + /** + * Destructor. + */ + virtual ~CommunicationPattern() override; + + virtual void + reinit(const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const MPI_Comm &communicator) override; + + /** + * Reinitialize the communication pattern. The argument @p indices_locally_owned + * and @p indices_want indicates the owned and required dofs, respectively. + * This allows the indices to not be sorted and to include entries with + * the value numbers::invalid_dof_index which do not take part of the index + * exchange but are present in the data vectors as padding. + * + * The export_to_ghost_array will populate an array with the values + * associated to the @p indices_want only. + * + * This emulates the corresponding constructor in + * Utilies::MPI::NoncontiguousPartitioner. + */ + void + reinit(const std::vector &indices_locally_owned, + const std::vector &indices_want, + const MPI_Comm & communicator); + + /** + * Reinitialization that takes the number of locally-owned degrees of + * freedom @p local_size and an index set for the required ghost indices + * @p ghost_indices. + * + * The local index range is translated to global indices in an ascending + * and one-to-one fashion, i.e., the indices of process $p$ sit exactly + * between the indices of the processes $p-1$ and $p+1$, respectively. + * + * The export_to_ghost_array will populate an array containing + * values from locally-owned AND ghost indices, as for the relevant + * set of dofs of a usual FEM simulation. + */ + void + reinit(const types::global_dof_index local_size, + const IndexSet & ghost_indices, + const MPI_Comm & communicator); + + /** + * Fill the vector @p ghost_array according to the precomputed communication + * pattern with values from @p locally_owned_array. + */ + template + void + export_to_ghosted_array(const ArrayView &locally_owned_array, + const ArrayView & ghost_array) const; + + /** + * Start the communication round to fill the vector @p ghost_array according + * to the precomputed communication pattern with values from + * @p locally_owned_array. It can be overlapped with other communications. + */ + template + void + export_to_ghosted_array_start( + const ArrayView &locally_owned_array, + const ArrayView & ghost_array) const; + + /** + * Finish the communication round to fill the vector @p ghost_array according + * to the precomputed communication pattern with values from + * @p locally_owned_array. It can be overlapped with other communications. + */ + template + void + export_to_ghosted_array_finish( + const ArrayView &locally_owned_array, + const ArrayView & ghost_array) const; + + /** + * Modify the vector @p locally_owned_array according to the precomputed communication + * pattern and the operation @ op with values from @p ghost_array. + */ + template + void + import_from_ghosted_array( + const VectorOperation::values op, + const ArrayView &ghost_array, + const ArrayView & locally_owned_array) const; + + /** + * Start the communication round to modify the vector @p locally_owned_array according + * to the precomputed communication pattern and the operation @ op with + * values from + * @p ghost_array. It can be overlapped with other communications. + */ + template + void + import_from_ghosted_array_start( + const VectorOperation::values op, + const ArrayView &ghost_array, + const ArrayView & locally_owned_array) const; + + /** + * Finish the communication round to modify the vector @p locally_owned_array according + * to the precomputed communication pattern and the operation @ op with + * values from + * @p ghost_array. It can be overlapped with other communications. + */ + template + void + import_from_ghosted_array_finish( + const VectorOperation::values op, + const ArrayView &ghost_array, + const ArrayView & locally_owned_array) const; + + /** + * Return the underlying MPI communicator. + */ + const MPI_Comm & + get_mpi_communicator() const override; + + /** + * Conversion operator to gain access to the underlying PETSc object. + */ + operator const PetscSF &() const + { + return sf; + } + + /** + * Reset the object. + */ + void + clear(); + + protected: + /** + * A generic PetscSF object that will perform the communication. + */ + PetscSF sf; + + /** + * General setup + */ + void + do_reinit(const std::vector &inidx, + const std::vector &inloc, + const std::vector &outidx, + const std::vector &outloc, + const MPI_Comm & communicator); + }; + + /** + * Partitioner implementation based on the PetscSF object. + * This class implements the same communication patterns of + * Utilities::MPI::Partitioner, internally using PetscSF + * API calls. + * Differently from the Utilities::MPI::Partitioner implementation, here we + * don't need to specify the communication channel, the temporary storage, + * and the MPI requests within export and import functions. Moreover, + * the import API does not zero the input ghost array. + * + * For additional information, see the paper @cite zhang2022petscsf + */ + class Partitioner : public Utilities::MPI::CommunicationPatternBase + { + public: + /** + * Default constructor. + */ + Partitioner(); + + /** + * Destructor. + */ + virtual ~Partitioner() override = default; + + /** + * Reinitialize the partitioner. As for the Utilities::MPI::Partitioner, + * any entry of @ ghost_indices that is also present in @ + * locally_owned_indices is discarded. + */ + virtual void + reinit(const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const MPI_Comm &communicator) override; + + /** + * Reinitialize the partitioner. As for the Utilities::MPI::Partitioner, + * any entry of @ ghost_indices that is also present in @ + * locally_owned_indices is discarded. This reinitialization will allow to + * perform communications either using a ghost data array of the size + * of @ ghost_indices or of @ larger_ghost_indices. + */ + void + reinit(const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const IndexSet &larger_ghost_indices, + const MPI_Comm &communicator); + + /** + * Return the actual number of ghost indices. + */ + unsigned int + n_ghost_indices() const + { + return n_ghost_indices_data; + } + + /** + * Return an IndexSet representation of the actual ghost indices. + */ + const IndexSet & + ghost_indices() const + { + return ghost_indices_data; + } + + /** + * Fill the vector @p ghost_array according to the precomputed communication + * pattern with values from @p locally_owned_array. + */ + template + void + export_to_ghosted_array(const ArrayView &locally_owned_array, + const ArrayView & ghost_array) const; + + /** + * Start the communication round to fill the vector @p ghost_array according + * to the precomputed communication pattern with values from + * @p locally_owned_array. It can be overlapped with other communications. + * Differently from the Utilities::MPI::Partitioner implementation, here we + * don't need to specify the communication channel, the temporary storage, + * and the MPI requests. + */ + template + void + export_to_ghosted_array_start( + const ArrayView &locally_owned_array, + const ArrayView & ghost_array) const; + + /** + * Finish the communication round to fill the vector @p ghost_array according + * to the precomputed communication pattern with values from + * @p locally_owned_array. It can be overlapped with other communications. + * Differently from the Utilities::MPI::Partitioner implementation, here we + * don't need to specify the communication channel, the temporary storage, + * and the MPI requests. + */ + template + void + export_to_ghosted_array_finish( + const ArrayView &locally_owned_array, + const ArrayView & ghost_array) const; + + /** + * Modify the vector @p locally_owned_array according to the precomputed communication + * pattern and the operation @ op with values from @p ghost_array. + */ + template + void + import_from_ghosted_array( + const VectorOperation::values op, + const ArrayView &ghost_array, + const ArrayView & locally_owned_array) const; + + /** + * Start the communication round to modify the vector @p locally_owned_array according + * to the precomputed communication pattern and the operation @ op with + * values from + * @p ghost_array. It can be overlapped with other communications. + * Differently from the Utilities::MPI::Partitioner implementation, here we + * don't need to specify the communication channel, the temporary storage, + * and the MPI requests. + */ + template + void + import_from_ghosted_array_start( + const VectorOperation::values op, + const ArrayView &ghost_array, + const ArrayView & locally_owned_array) const; + + /** + * Finish the communication round to modify the vector @p locally_owned_array according + * to the precomputed communication pattern and the operation @ op with + * values from + * @p ghost_array. It can be overlapped with other communications. + * Differently from the Utilities::MPI::Partitioner implementation, here we + * don't need to specify the communication channel, the temporary storage, + * and the MPI requests. + */ + template + void + import_from_ghosted_array_finish( + const VectorOperation::values op, + const ArrayView &ghost_array, + const ArrayView & locally_owned_array) const; + + /** + * Return the underlying MPI communicator. + */ + const MPI_Comm & + get_mpi_communicator() const override; + + protected: + CommunicationPattern ghost, larger_ghost; + IndexSet ghost_indices_data; + types::global_dof_index n_ghost_indices_data; + types::global_dof_index n_ghost_indices_larger; + }; + +} // namespace PETScWrappers + +/** @} */ + +DEAL_II_NAMESPACE_CLOSE + +#endif + +#endif diff --git a/source/lac/CMakeLists.txt b/source/lac/CMakeLists.txt index ecf52e97c6..da74b1d615 100644 --- a/source/lac/CMakeLists.txt +++ b/source/lac/CMakeLists.txt @@ -80,6 +80,7 @@ set(_inst la_parallel_vector.inst.in la_parallel_block_vector.inst.in precondition_block.inst.in + petsc_communication_pattern.inst.in relaxation_block.inst.in read_write_vector.inst.in scalapack.inst.in @@ -108,6 +109,7 @@ if(DEAL_II_WITH_PETSC) petsc_parallel_vector.cc petsc_precondition.cc petsc_solver.cc + petsc_communication_pattern.cc petsc_sparse_matrix.cc petsc_vector_base.cc ) diff --git a/source/lac/petsc_communication_pattern.cc b/source/lac/petsc_communication_pattern.cc new file mode 100644 index 0000000000..b2830a4f97 --- /dev/null +++ b/source/lac/petsc_communication_pattern.cc @@ -0,0 +1,492 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + +#include + +#ifdef DEAL_II_WITH_PETSC + +# include + +# include + +DEAL_II_NAMESPACE_OPEN +// Shorthand notation for PETSc error codes. +# define AssertPETSc(code) \ + do \ + { \ + PetscErrorCode __ierr = (code); \ + AssertThrow(__ierr == 0, ExcPETScError(__ierr)); \ + } \ + while (0) + +namespace PETScWrappers +{ + CommunicationPattern::CommunicationPattern() + : sf(nullptr) + {} + + CommunicationPattern::~CommunicationPattern() + { + clear(); + } + + void + CommunicationPattern::reinit(const types::global_dof_index local_size, + const IndexSet & ghost_indices, + const MPI_Comm & communicator) + { + clear(); + + PetscLayout layout; + AssertPETSc(PetscLayoutCreate(communicator, &layout)); + AssertPETSc(PetscLayoutSetLocalSize(layout, local_size)); + AssertPETSc(PetscLayoutSetUp(layout)); + + PetscInt start, end; + AssertPETSc(PetscLayoutGetRange(layout, &start, &end)); + + IndexSet want; + want.add_range(start, end); + want.add_indices(ghost_indices); + want.compress(); + + const PetscInt *idxs; + PetscInt n; + IS is = want.make_petsc_is(communicator); + AssertPETSc(ISGetLocalSize(is, &n)); + AssertPETSc(ISGetIndices(is, &idxs)); + + AssertPETSc(PetscSFCreate(communicator, &sf)); + AssertPETSc( + PetscSFSetGraphLayout(sf, layout, n, nullptr, PETSC_OWN_POINTER, idxs)); + AssertPETSc(PetscSFSetUp(sf)); + + AssertPETSc(ISRestoreIndices(is, &idxs)); + AssertPETSc(ISDestroy(&is)); + AssertPETSc(PetscLayoutDestroy(&layout)); + } + + void + CommunicationPattern::reinit(const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const MPI_Comm &communicator) + { + std::vector in_deal; + locally_owned_indices.fill_index_vector(in_deal); + std::vector in_petsc(in_deal.begin(), in_deal.end()); + + std::vector out_deal; + ghost_indices.fill_index_vector(out_deal); + std::vector out_petsc(out_deal.begin(), out_deal.end()); + + std::vector dummy; + + this->do_reinit(in_petsc, dummy, out_petsc, dummy, communicator); + } + + void + CommunicationPattern::reinit( + const std::vector &indices_has, + const std::vector &indices_want, + const MPI_Comm & communicator) + { + // Clean vectors from numbers::invalid_dof_index (indicating padding) + std::vector indices_has_clean, indices_has_loc; + std::vector indices_want_clean, indices_want_loc; + indices_want_clean.reserve(indices_want.size()); + indices_want_loc.reserve(indices_want.size()); + indices_has_clean.reserve(indices_has.size()); + indices_has_loc.reserve(indices_has.size()); + + PetscInt loc = 0; + bool has_invalid = false; + for (const auto i : indices_has) + { + if (i != numbers::invalid_dof_index) + { + indices_has_clean.push_back(static_cast(i)); + indices_has_loc.push_back(loc); + } + else + has_invalid = true; + loc++; + } + if (!has_invalid) + indices_has_loc.clear(); + + loc = 0; + has_invalid = false; + for (const auto i : indices_want) + { + if (i != numbers::invalid_dof_index) + { + indices_want_clean.push_back(static_cast(i)); + indices_want_loc.push_back(loc); + } + else + has_invalid = true; + loc++; + } + if (!has_invalid) + indices_want_loc.clear(); + + this->do_reinit(indices_has_clean, + indices_has_loc, + indices_want_clean, + indices_want_loc, + communicator); + } + + void + CommunicationPattern::do_reinit(const std::vector &inidx, + const std::vector &inloc, + const std::vector &outidx, + const std::vector &outloc, + const MPI_Comm & communicator) + { + clear(); + + // inidx is assumed to be unstructured and non-overlapping. + // However, it may have holes in it and not be a full cover. + // + // We create two PETSc SFs and compose them to get + // the final communication pattern + // + // sf1 : local distributed to tmp + // sf2 : tmp to local with ghosts + // sf(x) = sf2(sf1(x)) + PetscSF sf1, sf2; + + // First create an SF where leaves are inidx (at location inloc) + // and roots are unique indices in contiguous way + // Code adapted from MatZeroRowsMapLocal_Private in PETSc + PetscInt n = static_cast(inidx.size()); + PetscInt lN = n > 0 ? *std::max_element(inidx.begin(), inidx.end()) : -1; + PetscInt N, nl; + + Utilities::MPI::internal::all_reduce( + MPI_MAX, + ArrayView(&lN, 1), + communicator, + ArrayView(&N, 1)); + + PetscSFNode *remotes; + AssertPETSc(PetscMalloc1(n, &remotes)); + + PetscLayout layout; + AssertPETSc(PetscLayoutCreate(communicator, &layout)); + AssertPETSc(PetscLayoutSetSize(layout, N + 1)); + AssertPETSc(PetscLayoutSetUp(layout)); + AssertPETSc(PetscLayoutGetLocalSize(layout, &nl)); + + const PetscInt *ranges; + AssertPETSc(PetscLayoutGetRanges(layout, &ranges)); + + PetscInt cnt = 0; + PetscMPIInt owner = 0; + for (const auto idx : inidx) + { + // short-circuit the search if the last owner owns this index too + if (idx < ranges[owner] || ranges[owner + 1] <= idx) + { + AssertPETSc(PetscLayoutFindOwner(layout, idx, &owner)); + } + remotes[cnt].rank = owner; + remotes[cnt].index = idx - ranges[owner]; + cnt++; + } + + AssertPETSc(PetscSFCreate(communicator, &sf2)); + AssertPETSc(PetscSFSetGraph(sf2, + nl, + n, + const_cast( + inloc.size() > 0 ? inloc.data() : nullptr), + PETSC_COPY_VALUES, + remotes, + PETSC_OWN_POINTER)); + AssertPETSc(PetscSFSetUp(sf2)); + // We need to invert root and leaf space to create the first SF + AssertPETSc(PetscSFCreateInverseSF(sf2, &sf1)); + AssertPETSc(PetscSFDestroy(&sf2)); + + // Now create the SF from the contiguous space to the local output space + n = static_cast(outidx.size()); + AssertPETSc(PetscSFCreate(communicator, &sf2)); + AssertPETSc(PetscSFSetGraphLayout( + sf2, + layout, + n, + const_cast(outloc.size() > 0 ? outloc.data() : nullptr), + PETSC_COPY_VALUES, + const_cast(n > 0 ? outidx.data() : nullptr))); + AssertPETSc(PetscSFSetUp(sf2)); + + // The final SF is the composition of the two + AssertPETSc(PetscSFCompose(sf1, sf2, &sf)); + + // Cleanup + AssertPETSc(PetscLayoutDestroy(&layout)); + AssertPETSc(PetscSFDestroy(&sf1)); + AssertPETSc(PetscSFDestroy(&sf2)); + } + + void + CommunicationPattern::clear() + { + AssertPETSc(PetscSFDestroy(&sf)); + } + + const MPI_Comm & + CommunicationPattern::get_mpi_communicator() const + { + static MPI_Comm comm = PetscObjectComm(reinterpret_cast(sf)); + return comm; + } + + template + void + CommunicationPattern::export_to_ghosted_array_start( + const ArrayView &src, + const ArrayView & dst) const + { + auto datatype = Utilities::MPI::mpi_type_id_for_type; + +# if DEAL_II_PETSC_VERSION_LT(3, 15, 0) + AssertPETSc(PetscSFBcastBegin(sf, datatype, src.data(), dst.data())); +# else + AssertPETSc( + PetscSFBcastBegin(sf, datatype, src.data(), dst.data(), MPI_REPLACE)); +# endif + } + + template + void + CommunicationPattern::export_to_ghosted_array_finish( + const ArrayView &src, + const ArrayView & dst) const + { + auto datatype = Utilities::MPI::mpi_type_id_for_type; + +# if DEAL_II_PETSC_VERSION_LT(3, 15, 0) + AssertPETSc(PetscSFBcastEnd(sf, datatype, src.data(), dst.data())); +# else + AssertPETSc( + PetscSFBcastEnd(sf, datatype, src.data(), dst.data(), MPI_REPLACE)); +# endif + } + + template + void + CommunicationPattern::export_to_ghosted_array( + const ArrayView &src, + const ArrayView & dst) const + { + export_to_ghosted_array_start(src, dst); + export_to_ghosted_array_finish(src, dst); + } + + template + void + CommunicationPattern::import_from_ghosted_array_start( + const VectorOperation::values op, + const ArrayView &src, + const ArrayView & dst) const + { + MPI_Op mpiop = (op == VectorOperation::insert) ? MPI_REPLACE : MPI_SUM; + auto datatype = Utilities::MPI::mpi_type_id_for_type; + + AssertPETSc( + PetscSFReduceBegin(sf, datatype, src.data(), dst.data(), mpiop)); + } + + template + void + CommunicationPattern::import_from_ghosted_array_finish( + const VectorOperation::values op, + const ArrayView &src, + const ArrayView & dst) const + { + MPI_Op mpiop = (op == VectorOperation::insert) ? MPI_REPLACE : MPI_SUM; + auto datatype = Utilities::MPI::mpi_type_id_for_type; + + AssertPETSc(PetscSFReduceEnd(sf, datatype, src.data(), dst.data(), mpiop)); + } + + template + void + CommunicationPattern::import_from_ghosted_array( + const VectorOperation::values op, + const ArrayView &src, + const ArrayView & dst) const + { + import_from_ghosted_array_start(op, src, dst); + import_from_ghosted_array_finish(op, src, dst); + } + + // Partitioner + + Partitioner::Partitioner() + : ghost() + , larger_ghost() + , ghost_indices_data() + , n_ghost_indices_data(numbers::invalid_dof_index) + , n_ghost_indices_larger(numbers::invalid_dof_index) + {} + + void + Partitioner::reinit(const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const MPI_Comm &communicator) + { + ghost_indices_data = ghost_indices; + ghost_indices_data.subtract_set(locally_owned_indices); + ghost_indices_data.compress(); + + ghost.reinit(locally_owned_indices, ghost_indices_data, communicator); + larger_ghost.clear(); + + n_ghost_indices_data = ghost_indices_data.n_elements(); + n_ghost_indices_larger = numbers::invalid_dof_index; + } + + void + Partitioner::reinit(const IndexSet &locally_owned_indices, + const IndexSet &ghost_indices, + const IndexSet &larger_ghost_indices, + const MPI_Comm &communicator) + { + std::vector local_indices; + locally_owned_indices.fill_index_vector(local_indices); + + ghost_indices_data = ghost_indices; + ghost_indices_data.subtract_set(locally_owned_indices); + ghost_indices_data.compress(); + + std::vector expanded_ghost_indices( + larger_ghost_indices.n_elements(), numbers::invalid_dof_index); + for (auto index : ghost_indices_data) + { + Assert(larger_ghost_indices.is_element(index), + ExcMessage("The given larger ghost index set must contain " + "all indices in the actual index set.")); + auto tmp_index = larger_ghost_indices.index_within_set(index); + expanded_ghost_indices[tmp_index] = index; + } + + ghost.reinit(locally_owned_indices, ghost_indices_data, communicator); + larger_ghost.reinit(local_indices, expanded_ghost_indices, communicator); + n_ghost_indices_data = ghost_indices_data.n_elements(); + n_ghost_indices_larger = larger_ghost_indices.n_elements(); + } + + const MPI_Comm & + Partitioner::get_mpi_communicator() const + { + return ghost.get_mpi_communicator(); + } + + template + void + Partitioner::export_to_ghosted_array_start(const ArrayView &src, + const ArrayView &dst) const + { + if (dst.size() == n_ghost_indices_larger) + { + larger_ghost.export_to_ghosted_array_start(src, dst); + } + else + { + ghost.export_to_ghosted_array_start(src, dst); + } + } + + template + void + Partitioner::export_to_ghosted_array_finish( + const ArrayView &src, + const ArrayView & dst) const + { + if (dst.size() == n_ghost_indices_larger) + { + larger_ghost.export_to_ghosted_array_finish(src, dst); + } + else + { + ghost.export_to_ghosted_array_finish(src, dst); + } + } + + template + void + Partitioner::export_to_ghosted_array(const ArrayView &src, + const ArrayView & dst) const + { + export_to_ghosted_array_start(src, dst); + export_to_ghosted_array_finish(src, dst); + } + + template + void + Partitioner::import_from_ghosted_array_start( + const VectorOperation::values op, + const ArrayView &src, + const ArrayView & dst) const + { + if (src.size() == n_ghost_indices_larger) + { + larger_ghost.import_from_ghosted_array_start(op, src, dst); + } + else + { + ghost.import_from_ghosted_array_start(op, src, dst); + } + } + + template + void + Partitioner::import_from_ghosted_array_finish( + const VectorOperation::values op, + const ArrayView &src, + const ArrayView & dst) const + { + if (src.size() == n_ghost_indices_larger) + { + larger_ghost.import_from_ghosted_array_finish(op, src, dst); + } + else + { + ghost.import_from_ghosted_array_finish(op, src, dst); + } + } + + template + void + Partitioner::import_from_ghosted_array(const VectorOperation::values op, + const ArrayView &src, + const ArrayView &dst) const + { + import_from_ghosted_array_start(op, src, dst); + import_from_ghosted_array_finish(op, src, dst); + } + +} // namespace PETScWrappers + +// Explicit instantiations +# include "petsc_communication_pattern.inst" + +DEAL_II_NAMESPACE_CLOSE + +#endif // DEAL_II_WITH_PETSC diff --git a/source/lac/petsc_communication_pattern.inst.in b/source/lac/petsc_communication_pattern.inst.in new file mode 100644 index 0000000000..932a5967d2 --- /dev/null +++ b/source/lac/petsc_communication_pattern.inst.in @@ -0,0 +1,85 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +for (S : MPI_SCALARS) + { + namespace PETScWrappers + \{ + template void + CommunicationPattern::export_to_ghosted_array( + const ArrayView &src, + const ArrayView & dst) const; + + template void + CommunicationPattern::export_to_ghosted_array_start( + const ArrayView &src, + const ArrayView & dst) const; + + template void + CommunicationPattern::export_to_ghosted_array_finish( + const ArrayView &src, + const ArrayView & dst) const; + + template void + CommunicationPattern::import_from_ghosted_array( + const VectorOperation::values op, + const ArrayView & src, + const ArrayView & dst) const; + + template void + CommunicationPattern::import_from_ghosted_array_start( + const VectorOperation::values op, + const ArrayView & src, + const ArrayView & dst) const; + + template void + CommunicationPattern::import_from_ghosted_array_finish( + const VectorOperation::values op, + const ArrayView & src, + const ArrayView & dst) const; + + template void + Partitioner::export_to_ghosted_array(const ArrayView &src, + const ArrayView & dst) const; + + template void + Partitioner::export_to_ghosted_array_start(const ArrayView &src, + const ArrayView &dst) const; + + template void + Partitioner::export_to_ghosted_array_finish( + const ArrayView &src, + const ArrayView & dst) const; + + template void + Partitioner::import_from_ghosted_array(const VectorOperation::values op, + const ArrayView & src, + const ArrayView &dst) const; + + template void + Partitioner::import_from_ghosted_array_start( + const VectorOperation::values op, + const ArrayView & src, + const ArrayView & dst) const; + + template void + Partitioner::import_from_ghosted_array_finish( + const VectorOperation::values op, + const ArrayView & src, + const ArrayView & dst) const; + \} + } diff --git a/tests/petsc/petsc_noncontiguous_partitioner_01.cc b/tests/petsc/petsc_noncontiguous_partitioner_01.cc new file mode 100644 index 0000000000..31278a7627 --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_01.cc @@ -0,0 +1,78 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2023 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Test PETScWrappers::CommunicationPattern for non-contiguous index space. +// Copy-pasted from tests/base/mpi_noncontiguous_partitioner_01.cc + +#include + +#include + +#include "../tests.h" + + +void +test(const MPI_Comm comm) +{ + IndexSet index_set_has(4); + IndexSet index_set_want(4); + + if (Utilities::MPI::this_mpi_process(comm) == 0) + { + index_set_has.add_index(1); + index_set_want.add_index(2); + } + else + { + index_set_has.add_index(2); + index_set_want.add_index(1); + index_set_want.add_index(2); + } + + PETScWrappers::CommunicationPattern petscsf; + petscsf.reinit(index_set_has, index_set_want, comm); + + AlignedVector src(index_set_has.n_elements()); + AlignedVector dst(index_set_want.n_elements()); + + src[0] = Utilities::MPI::this_mpi_process(comm) * 100 + 1; + + petscsf.export_to_ghosted_array(ArrayView(src.data(), + src.size()), + ArrayView(dst.data(), dst.size())); + + for (size_t i = 0; i < src.size(); ++i) + deallog << static_cast(src[i]) << ' '; + deallog << std::endl; + for (size_t i = 0; i < dst.size(); ++i) + deallog << static_cast(dst[i]) << ' '; + deallog << std::endl; +} + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + const MPI_Comm comm = MPI_COMM_WORLD; + + { + deallog.push("all"); + test(comm); + deallog.pop(); + } +} diff --git a/tests/petsc/petsc_noncontiguous_partitioner_01.mpirun=2.output b/tests/petsc/petsc_noncontiguous_partitioner_01.mpirun=2.output new file mode 100644 index 0000000000..1ceffc9f3f --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_01.mpirun=2.output @@ -0,0 +1,7 @@ + +DEAL:0:all::1 +DEAL:0:all::101 + +DEAL:1:all::101 +DEAL:1:all::1 101 + diff --git a/tests/petsc/petsc_noncontiguous_partitioner_02.cc b/tests/petsc/petsc_noncontiguous_partitioner_02.cc new file mode 100644 index 0000000000..ff54429753 --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_02.cc @@ -0,0 +1,163 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2019 - 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Test PETScWrappers::CommunicationPattern for repartitioning of a +// degrees of freedom (Morton order -> layer partitioning) +// Copy-pasted from tests/base/mpi_noncontiguous_partitioner_02.cc + +#include + +#include + +#include + +#include + +#include + +#include + +#include "../tests.h" + + +template +void +test(const MPI_Comm &comm, const bool do_revert, const unsigned int dir) +{ + const unsigned int degree = 2; + const unsigned int n_refinements = 2; + const unsigned int n_cells_1D = Utilities::pow(2, n_refinements); + const unsigned int n_points_1D = (degree + 1) * n_cells_1D; + const double delta = 1.0 / n_cells_1D; + const unsigned int n_points_cell_1D = degree + 1; + const unsigned int n_points_cell = Utilities::pow(degree + 1, dim); + const unsigned int n_points_face = Utilities::pow(n_points_1D, dim - 1); + + const unsigned int n_procs = Utilities::MPI::n_mpi_processes(comm); + const unsigned int my_rank = + do_revert ? (n_procs - 1 - Utilities::MPI::this_mpi_process(comm)) : + Utilities::MPI::this_mpi_process(comm); + + parallel::distributed::Triangulation tria(comm); + + GridGenerator::hyper_cube(tria); + tria.refine_global(n_refinements); + + FE_DGQ fe(degree); + + DoFHandler dof_handler(tria); + dof_handler.distribute_dofs(fe); + + const unsigned n_dofs = dof_handler.n_dofs(); + + + std::vector indices_has, indices_want; + + auto norm_point_to_lex = [&](const Point &c) { + // convert normalized point [0, 1] to lex + if (dim == 2) + return std::floor(c[0]) + n_cells_1D * std::floor(c[1]); + else + return std::floor(c[0]) + n_cells_1D * std::floor(c[1]) + + n_cells_1D * n_cells_1D * std::floor(c[2]); + }; + + // ... has (symm) + for (const auto &cell : dof_handler.active_cell_iterators()) + if (cell->is_active() && cell->is_locally_owned()) + { + auto c = cell->center(); + for (unsigned int i = 0; i < dim; ++i) + c[i] = c[i] / delta; + + const auto lid = static_cast(norm_point_to_lex(c)); + + for (unsigned int i = lid * n_points_cell; + i < (lid + 1) * n_points_cell; + i++) + indices_has.push_back(i); + } + + + const unsigned int div = n_points_1D / n_procs; + const unsigned int rem = n_points_1D % n_procs; + + const unsigned int start = div * (my_rank + 0) + std::min(my_rank + 0, rem); + const unsigned int end = div * (my_rank + 1) + std::min(my_rank + 1, rem); + + if (dim == 2 && dir == 0) + { + for (unsigned int j = 0, c = start * n_points_face; j < end; ++j) + for (unsigned int i = start; i < n_points_1D; ++i) + indices_want.push_back(c++); + } + else if (dim == 2 && dir == 1) + { + for (unsigned int j = 0; j < n_points_1D; ++j) + for (unsigned int i = start; i < end; ++i) + indices_want.push_back(j * n_points_face + i); + } + else + Assert(false, StandardExceptions::ExcNotImplemented()); + + if (do_revert) + std::reverse(indices_want.begin(), indices_want.end()); + + PETScWrappers::CommunicationPattern vector; + vector.reinit(indices_has, indices_want, comm); + + AlignedVector src(indices_has.size()); + for (unsigned int i = 0; i < indices_has.size(); ++i) + src[i] = indices_has[i]; + + + AlignedVector dst(indices_want.size()); + + vector.export_to_ghosted_array(ArrayView(src.data(), + src.size()), + ArrayView(dst.data(), dst.size())); + + for (size_t i = 0; i < src.size(); ++i) + deallog << static_cast(src[i]) << ' '; + deallog << std::endl; + for (size_t i = 0; i < dst.size(); ++i) + deallog << static_cast(dst[i]) << ' '; + deallog << std::endl << std::endl; + + + for (size_t i = 0; i < dst.size(); ++i) + AssertDimension(dst[i], indices_want[i]); +} + +template +void +test_dim(const MPI_Comm &comm, const bool do_revert) +{ + for (int dir = 0; dir < dim; ++dir) + test(comm, do_revert, dir); +} + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + const MPI_Comm comm = MPI_COMM_WORLD; + + test_dim<2>(comm, /*do_revert=*/false); + test_dim<2>(comm, /*do_revert=*/true); +} diff --git a/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=1.output b/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=1.output new file mode 100644 index 0000000000..20818dd82b --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=1.output @@ -0,0 +1,13 @@ + +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:0::143 142 141 140 139 138 137 136 135 134 133 132 131 130 129 128 127 126 125 124 123 122 121 120 119 118 117 116 115 114 113 112 111 110 109 108 107 106 105 104 103 102 101 100 99 98 97 96 95 94 93 92 91 90 89 88 87 86 85 84 83 82 81 80 79 78 77 76 75 74 73 72 71 70 69 68 67 66 65 64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:0::143 142 141 140 139 138 137 136 135 134 133 132 131 130 129 128 127 126 125 124 123 122 121 120 119 118 117 116 115 114 113 112 111 110 109 108 107 106 105 104 103 102 101 100 99 98 97 96 95 94 93 92 91 90 89 88 87 86 85 84 83 82 81 80 79 78 77 76 75 74 73 72 71 70 69 68 67 66 65 64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 +DEAL:0:: diff --git a/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=2.output b/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=2.output new file mode 100644 index 0000000000..adc0250aad --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=2.output @@ -0,0 +1,27 @@ + +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:0::0 1 2 3 4 5 12 13 14 15 16 17 24 25 26 27 28 29 36 37 38 39 40 41 48 49 50 51 52 53 60 61 62 63 64 65 72 73 74 75 76 77 84 85 86 87 88 89 96 97 98 99 100 101 108 109 110 111 112 113 120 121 122 123 124 125 132 133 134 135 136 137 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:0::143 142 141 140 139 138 137 136 135 134 133 132 131 130 129 128 127 126 125 124 123 122 121 120 119 118 117 116 115 114 113 112 111 110 109 108 107 106 105 104 103 102 101 100 99 98 97 96 95 94 93 92 91 90 89 88 87 86 85 84 83 82 81 80 79 78 77 76 75 74 73 72 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:0::143 142 141 140 139 138 131 130 129 128 127 126 119 118 117 116 115 114 107 106 105 104 103 102 95 94 93 92 91 90 83 82 81 80 79 78 71 70 69 68 67 66 59 58 57 56 55 54 47 46 45 44 43 42 35 34 33 32 31 30 23 22 21 20 19 18 11 10 9 8 7 6 +DEAL:0:: + +DEAL:1::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:1::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:1:: +DEAL:1::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:1::6 7 8 9 10 11 18 19 20 21 22 23 30 31 32 33 34 35 42 43 44 45 46 47 54 55 56 57 58 59 66 67 68 69 70 71 78 79 80 81 82 83 90 91 92 93 94 95 102 103 104 105 106 107 114 115 116 117 118 119 126 127 128 129 130 131 138 139 140 141 142 143 +DEAL:1:: +DEAL:1::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:1::71 70 69 68 67 66 65 64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 +DEAL:1:: +DEAL:1::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:1::137 136 135 134 133 132 125 124 123 122 121 120 113 112 111 110 109 108 101 100 99 98 97 96 89 88 87 86 85 84 77 76 75 74 73 72 65 64 63 62 61 60 53 52 51 50 49 48 41 40 39 38 37 36 29 28 27 26 25 24 17 16 15 14 13 12 5 4 3 2 1 0 +DEAL:1:: + diff --git a/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=4.output b/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=4.output new file mode 100644 index 0000000000..4860113984 --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=4.output @@ -0,0 +1,55 @@ + +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 +DEAL:0::0 1 2 12 13 14 24 25 26 36 37 38 48 49 50 60 61 62 72 73 74 84 85 86 96 97 98 108 109 110 120 121 122 132 133 134 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 +DEAL:0::143 142 141 140 139 138 137 136 135 134 133 132 131 130 129 128 127 126 125 124 123 122 121 120 119 118 117 116 115 114 113 112 111 110 109 108 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 +DEAL:0::143 142 141 131 130 129 119 118 117 107 106 105 95 94 93 83 82 81 71 70 69 59 58 57 47 46 45 35 34 33 23 22 21 11 10 9 +DEAL:0:: + +DEAL:1::18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:1::36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 +DEAL:1:: +DEAL:1::18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:1::3 4 5 15 16 17 27 28 29 39 40 41 51 52 53 63 64 65 75 76 77 87 88 89 99 100 101 111 112 113 123 124 125 135 136 137 +DEAL:1:: +DEAL:1::18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:1::125 124 123 122 121 120 119 118 117 116 115 114 113 112 111 110 109 108 107 106 105 104 103 102 101 100 99 98 97 96 95 94 93 92 91 90 89 88 87 86 85 84 83 82 81 80 79 78 77 76 75 74 73 72 +DEAL:1:: +DEAL:1::18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:1::140 139 138 128 127 126 116 115 114 104 103 102 92 91 90 80 79 78 68 67 66 56 55 54 44 43 42 32 31 30 20 19 18 8 7 6 +DEAL:1:: + + +DEAL:2::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:2::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:2:: +DEAL:2::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:2::6 7 8 18 19 20 30 31 32 42 43 44 54 55 56 66 67 68 78 79 80 90 91 92 102 103 104 114 115 116 126 127 128 138 139 140 +DEAL:2:: +DEAL:2::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:2::89 88 87 86 85 84 83 82 81 80 79 78 77 76 75 74 73 72 71 70 69 68 67 66 65 64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 +DEAL:2:: +DEAL:2::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:2::137 136 135 125 124 123 113 112 111 101 100 99 89 88 87 77 76 75 65 64 63 53 52 51 41 40 39 29 28 27 17 16 15 5 4 3 +DEAL:2:: + + +DEAL:3::90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:3::108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:3:: +DEAL:3::90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:3::9 10 11 21 22 23 33 34 35 45 46 47 57 58 59 69 70 71 81 82 83 93 94 95 105 106 107 117 118 119 129 130 131 141 142 143 +DEAL:3:: +DEAL:3::90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:3::35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 +DEAL:3:: +DEAL:3::90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:3::134 133 132 122 121 120 110 109 108 98 97 96 86 85 84 74 73 72 62 61 60 50 49 48 38 37 36 26 25 24 14 13 12 2 1 0 +DEAL:3:: + diff --git a/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=5.output b/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=5.output new file mode 100644 index 0000000000..0cdd4ea23c --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_02.with_p4est=true.mpirun=5.output @@ -0,0 +1,69 @@ + +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 +DEAL:0::0 1 2 12 13 14 24 25 26 36 37 38 48 49 50 60 61 62 72 73 74 84 85 86 96 97 98 108 109 110 120 121 122 132 133 134 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 +DEAL:0::143 142 141 140 139 138 137 136 135 134 133 132 131 130 129 128 127 126 125 124 123 122 121 120 +DEAL:0:: +DEAL:0::0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 +DEAL:0::143 142 131 130 119 118 107 106 95 94 83 82 71 70 59 58 47 46 35 34 23 22 11 10 +DEAL:0:: + +DEAL:1::18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:1::36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 +DEAL:1:: +DEAL:1::18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:1::3 4 5 15 16 17 27 28 29 39 40 41 51 52 53 63 64 65 75 76 77 87 88 89 99 100 101 111 112 113 123 124 125 135 136 137 +DEAL:1:: +DEAL:1::18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:1::135 134 133 132 131 130 129 128 127 126 125 124 123 122 121 120 119 118 117 116 115 114 113 112 111 110 109 108 107 106 105 104 103 102 101 100 99 98 97 96 +DEAL:1:: +DEAL:1::18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 +DEAL:1::141 140 129 128 117 116 105 104 93 92 81 80 69 68 57 56 45 44 33 32 21 20 9 8 +DEAL:1:: + + +DEAL:2:: +DEAL:2::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 +DEAL:2:: +DEAL:2:: +DEAL:2::6 7 18 19 30 31 42 43 54 55 66 67 78 79 90 91 102 103 114 115 126 127 138 139 +DEAL:2:: +DEAL:2:: +DEAL:2::119 118 117 116 115 114 113 112 111 110 109 108 107 106 105 104 103 102 101 100 99 98 97 96 95 94 93 92 91 90 89 88 87 86 85 84 83 82 81 80 79 78 77 76 75 74 73 72 +DEAL:2:: +DEAL:2:: +DEAL:2::139 138 127 126 115 114 103 102 91 90 79 78 67 66 55 54 43 42 31 30 19 18 7 6 +DEAL:2:: + + +DEAL:3::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:3::96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 +DEAL:3:: +DEAL:3::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:3::8 9 20 21 32 33 44 45 56 57 68 69 80 81 92 93 104 105 116 117 128 129 140 141 +DEAL:3:: +DEAL:3::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:3::89 88 87 86 85 84 83 82 81 80 79 78 77 76 75 74 73 72 71 70 69 68 67 66 65 64 63 62 61 60 59 58 57 56 55 54 53 52 51 50 49 48 47 46 45 44 43 42 41 40 39 38 37 36 +DEAL:3:: +DEAL:3::72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 +DEAL:3::137 136 135 125 124 123 113 112 111 101 100 99 89 88 87 77 76 75 65 64 63 53 52 51 41 40 39 29 28 27 17 16 15 5 4 3 +DEAL:3:: + + +DEAL:4::90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:4::120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:4:: +DEAL:4::90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:4::10 11 22 23 34 35 46 47 58 59 70 71 82 83 94 95 106 107 118 119 130 131 142 143 +DEAL:4:: +DEAL:4::90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:4::35 34 33 32 31 30 29 28 27 26 25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 +DEAL:4:: +DEAL:4::90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 +DEAL:4::134 133 132 122 121 120 110 109 108 98 97 96 86 85 84 74 73 72 62 61 60 50 49 48 38 37 36 26 25 24 14 13 12 2 1 0 +DEAL:4:: + diff --git a/tests/petsc/petsc_noncontiguous_partitioner_03.cc b/tests/petsc/petsc_noncontiguous_partitioner_03.cc new file mode 100644 index 0000000000..03ec0809c8 --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_03.cc @@ -0,0 +1,92 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2019 - 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// Test PETScWrappers::CommunicationPattern for padding. +// Copy-pasted from tests/base/mpi_noncontiguous_partitioner_03.cc + +#include + +#include + +#include "../tests.h" + + +void +test(const MPI_Comm comm, + std::vector index_set_has, + std::vector index_set_want) +{ + PETScWrappers::CommunicationPattern vector; + vector.reinit(index_set_has, index_set_want, comm); + + AlignedVector src(index_set_has.size(), 0); + AlignedVector dst(index_set_want.size(), 0); + + for (unsigned int i = 0; i < index_set_has.size(); ++i) + src[i] = Utilities::MPI::this_mpi_process(comm) * 100 + i; + + vector.export_to_ghosted_array(ArrayView(src.data(), + src.size()), + ArrayView(dst.data(), dst.size())); + + for (size_t i = 0; i < src.size(); ++i) + deallog << static_cast(src[i]) << ' '; + deallog << std::endl; + for (size_t i = 0; i < dst.size(); ++i) + deallog << static_cast(dst[i]) << ' '; + deallog << std::endl; +} + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll all; + + const MPI_Comm comm = MPI_COMM_WORLD; + + const unsigned int rank = Utilities::MPI::this_mpi_process(comm); + + { + deallog.push("padding-non"); + + if (rank == 0) + test(comm, {0, 1, 2, 3}, {4, 5, 6, 7}); + else + test(comm, {4, 5, 6, 7}, {0, 1, 2, 3}); + deallog.pop(); + } + + { + deallog.push("padding-src"); + + if (rank == 0) + test(comm, {0, 1, numbers::invalid_dof_index, 2, 3}, {4, 5, 6, 7}); + else + test(comm, {4, 5, 6, 7}, {0, 1, 2, 3}); + deallog.pop(); + } + + { + deallog.push("padding-dst"); + + if (rank == 0) + test(comm, {0, 1, 2, 3}, {4, 5, numbers::invalid_dof_index, 6, 7}); + else + test(comm, {4, 5, 6, 7}, {0, 1, 2, 3}); + deallog.pop(); + } +} diff --git a/tests/petsc/petsc_noncontiguous_partitioner_03.mpirun=2.output b/tests/petsc/petsc_noncontiguous_partitioner_03.mpirun=2.output new file mode 100644 index 0000000000..bfef1b6851 --- /dev/null +++ b/tests/petsc/petsc_noncontiguous_partitioner_03.mpirun=2.output @@ -0,0 +1,15 @@ + +DEAL:0:padding-non::0 1 2 3 +DEAL:0:padding-non::100 101 102 103 +DEAL:0:padding-src::0 1 2 3 4 +DEAL:0:padding-src::100 101 102 103 +DEAL:0:padding-dst::0 1 2 3 +DEAL:0:padding-dst::100 101 0 102 103 + +DEAL:1:padding-non::100 101 102 103 +DEAL:1:padding-non::0 1 2 3 +DEAL:1:padding-src::100 101 102 103 +DEAL:1:padding-src::0 1 3 4 +DEAL:1:padding-dst::100 101 102 103 +DEAL:1:padding-dst::0 1 2 3 + diff --git a/tests/petsc/petsc_partitioner_06.cc b/tests/petsc/petsc_partitioner_06.cc new file mode 100644 index 0000000000..e5041918d0 --- /dev/null +++ b/tests/petsc/petsc_partitioner_06.cc @@ -0,0 +1,168 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 - 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// test for the Partitioner with a smaller ghost index set within a larger one +// regarding the export_to_ghosted_array() calls +// Copy-pasted from tests/mpi/parallel_partitioner_06.c +#include +#include + +#include + +#include +#include +#include + +#include "../tests.h" + + +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + Assert(numproc > 2, ExcNotImplemented()); + + const unsigned int set = 200; + AssertIndexRange(numproc, set - 2); + const unsigned int local_size = set - myid; + types::global_dof_index global_size = 0; + types::global_dof_index my_start = 0; + for (unsigned int i = 0; i < numproc; ++i) + { + global_size += set - i; + if (i < myid) + my_start += set - i; + } + + // each processor owns some indices and all are ghosting elements from three + // processors (the second). some entries are right around the border between + // two processors + IndexSet local_owned(global_size); + local_owned.add_range(my_start, my_start + local_size); + IndexSet local_relevant_1(global_size), local_relevant_2(global_size); + local_relevant_1 = local_owned; + types::global_dof_index ghost_indices[10] = {1, + 2, + 13, + set - 2, + set - 1, + set, + set + 1, + 2 * set, + 2 * set + 1, + 2 * set + 3}; + local_relevant_1.add_indices(&ghost_indices[0], ghost_indices + 10); + if (myid > 0) + local_relevant_1.add_range(my_start - 10, my_start); + if (myid < numproc - 1) + local_relevant_1.add_range(my_start + local_size, + my_start + local_size + 10); + + local_relevant_2 = local_owned; + local_relevant_2.add_indices(&ghost_indices[0], ghost_indices + 10); + if (myid > 0) + local_relevant_2.add_index(my_start - 10); + if (myid < numproc - 1) + local_relevant_2.add_index(my_start + local_size + 9); + + IndexSet local_relevant_3(global_size); + local_relevant_3.add_index(2); + if (myid > 0 && my_start > 0) + local_relevant_3.add_range(my_start - 10, my_start); + + PETScWrappers::Partitioner v, w, x; + v.reinit(local_owned, local_relevant_1, MPI_COMM_WORLD); + w.reinit(local_owned, local_relevant_2, v.ghost_indices(), MPI_COMM_WORLD); + x.reinit(local_owned, local_relevant_3, v.ghost_indices(), MPI_COMM_WORLD); + + // set up a locally owned array with some entries + std::vector locally_owned_data_field(local_size); + for (unsigned int i = 0; i < local_size; ++i) + locally_owned_data_field[i] = my_start + i; + const std::vector &locally_owned_data(locally_owned_data_field); + + // set up a ghost array + std::vector ghosts(v.n_ghost_indices()); + + // send the full array + v.export_to_ghosted_array_start(make_array_view(locally_owned_data), + make_array_view(ghosts)); + v.export_to_ghosted_array_finish(make_array_view(locally_owned_data), + make_array_view(ghosts)); + deallog << "All ghosts: "; + for (unsigned int i = 0; i < ghosts.size(); ++i) + deallog << ghosts[i] << ' '; + deallog << std::endl; + + // send only the array in w + std::fill(ghosts.begin(), ghosts.end(), 0); + + w.export_to_ghosted_array_start(make_array_view(locally_owned_data), + make_array_view(ghosts)); + + + // start a second send operation for the x partitioner in parallel to make + // sure communication does not get messed up + std::vector ghosts2(x.n_ghost_indices()); + + x.export_to_ghosted_array_start(make_array_view(locally_owned_data), + make_array_view(ghosts2)); + + w.export_to_ghosted_array_finish(make_array_view(locally_owned_data), + make_array_view(ghosts)); + deallog << "Ghosts on reduced 1: "; + for (unsigned int i = 0; i < ghosts.size(); ++i) + deallog << ghosts[i] << ' '; + deallog << std::endl; + + std::fill(ghosts.begin(), ghosts.end(), 0); + + x.export_to_ghosted_array_start(make_array_view(locally_owned_data), + make_array_view(ghosts)); + x.export_to_ghosted_array_finish(make_array_view(locally_owned_data), + make_array_view(ghosts)); + deallog << "Ghosts on reduced 2: "; + for (unsigned int i = 0; i < ghosts.size(); ++i) + deallog << ghosts[i] << ' '; + deallog << std::endl; + + x.export_to_ghosted_array_finish(make_array_view(locally_owned_data), + make_array_view(ghosts2)); + deallog << "Ghosts on reduced 2 without excess entries: "; + for (unsigned int i = 0; i < ghosts2.size(); ++i) + deallog << ghosts2[i] << ' '; + deallog << std::endl; + + x.export_to_ghosted_array_start(make_array_view(locally_owned_data), + make_array_view(ghosts)); + x.export_to_ghosted_array_finish(make_array_view(locally_owned_data), + make_array_view(ghosts)); + deallog << "Ghosts on reduced 2: "; + for (unsigned int i = 0; i < ghosts.size(); ++i) + deallog << ghosts[i] << ' '; + deallog << std::endl; +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi(argc, argv); + MPILogInitAll log; + test(); +} diff --git a/tests/petsc/petsc_partitioner_06.mpirun=4.output b/tests/petsc/petsc_partitioner_06.mpirun=4.output new file mode 100644 index 0000000000..1564165b12 --- /dev/null +++ b/tests/petsc/petsc_partitioner_06.mpirun=4.output @@ -0,0 +1,27 @@ + +DEAL:0::All ghosts: 200 201 202 203 204 205 206 207 208 209 400 401 403 +DEAL:0::Ghosts on reduced 1: 200 201 0 0 0 0 0 0 0 209 400 401 403 +DEAL:0::Ghosts on reduced 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:0::Ghosts on reduced 2 without excess entries: +DEAL:0::Ghosts on reduced 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 + +DEAL:1::All ghosts: 1 2 13 190 191 192 193 194 195 196 197 198 199 399 400 401 402 403 404 405 406 407 408 +DEAL:1::Ghosts on reduced 1: 1 2 13 190 0 0 0 0 0 0 0 198 199 0 400 401 0 403 0 0 0 0 408 +DEAL:1::Ghosts on reduced 2: 0 2 0 190 191 192 193 194 195 196 197 198 199 0 0 0 0 0 0 0 0 0 0 +DEAL:1::Ghosts on reduced 2 without excess entries: 2 190 191 192 193 194 195 196 197 198 199 +DEAL:1::Ghosts on reduced 2: 0 2 0 190 191 192 193 194 195 196 197 198 199 0 0 0 0 0 0 0 0 0 0 + + +DEAL:2::All ghosts: 1 2 13 198 199 200 201 389 390 391 392 393 394 395 396 397 398 597 598 599 600 601 602 603 604 605 606 +DEAL:2::Ghosts on reduced 1: 1 2 13 198 199 200 201 389 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 606 +DEAL:2::Ghosts on reduced 2: 0 2 0 0 0 0 0 389 390 391 392 393 394 395 396 397 398 0 0 0 0 0 0 0 0 0 0 +DEAL:2::Ghosts on reduced 2 without excess entries: 2 389 390 391 392 393 394 395 396 397 398 +DEAL:2::Ghosts on reduced 2: 0 2 0 0 0 0 0 389 390 391 392 393 394 395 396 397 398 0 0 0 0 0 0 0 0 0 0 + + +DEAL:3::All ghosts: 1 2 13 198 199 200 201 400 401 403 587 588 589 590 591 592 593 594 595 596 +DEAL:3::Ghosts on reduced 1: 1 2 13 198 199 200 201 400 401 403 587 0 0 0 0 0 0 0 0 0 +DEAL:3::Ghosts on reduced 2: 0 2 0 0 0 0 0 0 0 0 587 588 589 590 591 592 593 594 595 596 +DEAL:3::Ghosts on reduced 2 without excess entries: 2 587 588 589 590 591 592 593 594 595 596 +DEAL:3::Ghosts on reduced 2: 0 2 0 0 0 0 0 0 0 0 587 588 589 590 591 592 593 594 595 596 + diff --git a/tests/petsc/petsc_partitioner_07.cc b/tests/petsc/petsc_partitioner_07.cc new file mode 100644 index 0000000000..3aca0033a5 --- /dev/null +++ b/tests/petsc/petsc_partitioner_07.cc @@ -0,0 +1,181 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 - 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// test for the Partitioner with a smaller ghost index set within a larger one +// regarding the import_from_ghosted_array() calls +// Copy-pasted from tests/mpi/parallel_partitioner_07.c +#include +#include + +#include + +#include +#include +#include + +#include "../tests.h" + + +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + unsigned int numproc = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + Assert(numproc > 2, ExcNotImplemented()); + + const unsigned int set = 50; + AssertIndexRange(numproc, set - 2); + const unsigned int local_size = set - myid; + types::global_dof_index global_size = 0; + types::global_dof_index my_start = 0; + for (unsigned int i = 0; i < numproc; ++i) + { + global_size += set - i; + if (i < myid) + my_start += set - i; + } + + // each processor owns some indices and all are ghosting elements from three + // processors (the second). some entries are right around the border between + // two processors + IndexSet local_owned(global_size); + local_owned.add_range(my_start, my_start + local_size); + IndexSet local_relevant_1(global_size), local_relevant_2(global_size); + local_relevant_1 = local_owned; + types::global_dof_index ghost_indices[10] = {1, + 2, + 13, + set - 2, + set - 1, + set, + set + 1, + 2 * set, + 2 * set + 1, + 2 * set + 3}; + local_relevant_1.add_indices(&ghost_indices[0], ghost_indices + 10); + if (myid > 0) + local_relevant_1.add_range(my_start - 10, my_start); + if (myid < numproc - 1) + local_relevant_1.add_range(my_start + local_size, + my_start + local_size + 10); + + local_relevant_2 = local_owned; + local_relevant_2.add_indices(&ghost_indices[0], ghost_indices + 10); + if (myid > 0) + local_relevant_2.add_index(my_start - 10); + if (myid < numproc - 1) + local_relevant_2.add_index(my_start + local_size + 9); + + IndexSet local_relevant_3(global_size); + local_relevant_3.add_index(2); + if (myid > 0 && my_start > 0) + local_relevant_3.add_range(my_start - 10, my_start); + + PETScWrappers::Partitioner v, w, x; + v.reinit(local_owned, local_relevant_1, MPI_COMM_WORLD); + w.reinit(local_owned, local_relevant_2, v.ghost_indices(), MPI_COMM_WORLD); + x.reinit(local_owned, local_relevant_3, v.ghost_indices(), MPI_COMM_WORLD); + + // set up a ghost array with some entries + std::vector ghost_array(v.n_ghost_indices(), 1); + + // set up other arrays + std::vector locally_owned_array(local_size); + + // send the full array + { + std::vector ghosts(ghost_array); + v.import_from_ghosted_array_start(VectorOperation::add, + make_const_array_view(ghosts), + make_array_view(locally_owned_array)); + v.import_from_ghosted_array_finish(VectorOperation::add, + make_const_array_view(ghosts), + make_array_view(locally_owned_array)); + // check that the ghost entries are zeroed out in these calls + // NOT IMPLEMENTED + // for (unsigned int i = 0; i < v.n_ghost_indices(); ++i) + // AssertDimension(ghosts[i], 0); + } + deallog << "From all ghosts: "; + for (unsigned int i = 0; i < locally_owned_array.size(); ++i) + deallog << locally_owned_array[i] << ' '; + deallog << std::endl; + + // send only the array in w + std::fill(locally_owned_array.begin(), locally_owned_array.end(), 0); + { + std::vector ghosts(ghost_array); + w.import_from_ghosted_array_start(VectorOperation::add, + make_const_array_view(ghosts), + make_array_view(locally_owned_array)); + w.import_from_ghosted_array_finish(VectorOperation::add, + make_const_array_view(ghosts), + make_array_view(locally_owned_array)); + + // check that the ghost entries are zeroed out in these calls + // NOT IMPLEMENTED + // for (unsigned int i = 0; i < w.n_ghost_indices(); ++i) + // AssertDimension(ghosts[i], 0); + } + deallog << "From reduced ghosts 1: "; + for (unsigned int i = 0; i < locally_owned_array.size(); ++i) + deallog << locally_owned_array[i] << ' '; + deallog << std::endl; + + // send only the array in x + std::fill(locally_owned_array.begin(), locally_owned_array.end(), 0); + { + std::vector ghosts(ghost_array); + x.import_from_ghosted_array_start(VectorOperation::add, + make_const_array_view(ghosts), + make_array_view(locally_owned_array)); + x.import_from_ghosted_array_finish(VectorOperation::add, + make_const_array_view(ghosts), + make_array_view(locally_owned_array)); + + // check that the ghost entries are zeroed out in these calls + // NOT IMPLEMENTED + // for (unsigned int i = 0; i < x.n_ghost_indices(); ++i) + // AssertDimension(ghosts[i], 0); + } + deallog << "From reduced ghosts 2: "; + for (unsigned int i = 0; i < locally_owned_array.size(); ++i) + deallog << locally_owned_array[i] << ' '; + deallog << std::endl; + + // now send a tight array from x and add into the existing entries + std::vector ghosts(x.n_ghost_indices(), 1); + x.import_from_ghosted_array_start(VectorOperation::add, + make_const_array_view(ghosts), + make_array_view(locally_owned_array)); + x.import_from_ghosted_array_finish(VectorOperation::add, + make_const_array_view(ghosts), + make_array_view(locally_owned_array)); + deallog << "From tight reduced ghosts 2: "; + for (unsigned int i = 0; i < locally_owned_array.size(); ++i) + deallog << locally_owned_array[i] << ' '; + deallog << std::endl; +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi(argc, argv); + MPILogInitAll log; + test(); +} diff --git a/tests/petsc/petsc_partitioner_07.mpirun=4.output b/tests/petsc/petsc_partitioner_07.mpirun=4.output new file mode 100644 index 0000000000..37a384eeec --- /dev/null +++ b/tests/petsc/petsc_partitioner_07.mpirun=4.output @@ -0,0 +1,23 @@ + +DEAL:0::From all ghosts: 0 3 3 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 3 3 +DEAL:0::From reduced ghosts 1: 0 3 3 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 3 3 +DEAL:0::From reduced ghosts 2: 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:0::From tight reduced ghosts 2: 0 0 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2 + +DEAL:1::From all ghosts: 3 3 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:1::From reduced ghosts 1: 3 3 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 +DEAL:1::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:1::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2 + + +DEAL:2::From all ghosts: 1 3 3 1 3 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:2::From reduced ghosts 1: 0 3 3 0 3 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 +DEAL:2::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 +DEAL:2::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2 + + +DEAL:3::From all ghosts: 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:3::From reduced ghosts 1: 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:3::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +DEAL:3::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + -- 2.39.5