From: Marc Fehling Date: Tue, 20 Dec 2022 21:03:30 +0000 (-0700) Subject: Added partitioner reinit to LA::distributed::BlockVector(). X-Git-Tag: v9.5.0-rc1~163^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1cfb59464efc7d8ee295feea40e085aea9c0eefc;p=dealii.git Added partitioner reinit to LA::distributed::BlockVector(). --- diff --git a/doc/news/changes/minor/20221220Fehling b/doc/news/changes/minor/20221220Fehling new file mode 100644 index 0000000000..b38b5b79aa --- /dev/null +++ b/doc/news/changes/minor/20221220Fehling @@ -0,0 +1,4 @@ +New: LinearAlgebra::distributed::BlockVector::reinit now accepts +a vector of Utilities::MPI::Partitioner objects. +
+(Marc Fehling, 2022/12/20) diff --git a/include/deal.II/lac/la_parallel_block_vector.h b/include/deal.II/lac/la_parallel_block_vector.h index c250cf4004..14bb9fc0c8 100644 --- a/include/deal.II/lac/la_parallel_block_vector.h +++ b/include/deal.II/lac/la_parallel_block_vector.h @@ -176,6 +176,22 @@ namespace LinearAlgebra BlockVector(const std::vector &local_ranges, const MPI_Comm & communicator); + /** + * Construct a block vector with a Utilities::MPI::Partitioner for each + * block. + * + * The optional argument @p comm_sm, which consists of processes on + * the same shared-memory domain, allows users have read-only access to + * both locally-owned and ghost values of processes combined in the + * shared-memory communicator. See the general documentation of the + * LinearAlgebra::distributed::Vector class for more information about + * this argument. + */ + BlockVector( + const std::vector> + & partitioners, + const MPI_Comm &comm_sm = MPI_COMM_SELF); + /** * Destructor. * @@ -328,6 +344,25 @@ namespace LinearAlgebra reinit(const std::vector &local_ranges, const MPI_Comm & communicator); + /** + * Initialize each block with the corresponding parallel partitioning + * in @p partitioners. The input arguments are shared pointers, which + * store the partitioner data only once and can be shared between several + * vectors with the same layout. + * + * The optional argument @p comm_sm, which consists of processes on + * the same shared-memory domain, allows users have read-only access to + * both locally-owned and ghost values of processes combined in the + * shared-memory communicator. See the general documentation of the + * LinearAlgebra::distributed::Vector class for more information about + * this argument. + */ + void + reinit( + const std::vector> + & partitioners, + const MPI_Comm &comm_sm = MPI_COMM_SELF); + /** * This function copies the data that has accumulated in the data buffer * for ghost indices to the owning processor. For the meaning of the diff --git a/include/deal.II/lac/la_parallel_block_vector.templates.h b/include/deal.II/lac/la_parallel_block_vector.templates.h index d530d1819b..95392eaedc 100644 --- a/include/deal.II/lac/la_parallel_block_vector.templates.h +++ b/include/deal.II/lac/la_parallel_block_vector.templates.h @@ -75,6 +75,17 @@ namespace LinearAlgebra + template + BlockVector::BlockVector( + const std::vector> + & partitioners, + const MPI_Comm &comm_sm) + { + reinit(partitioners, comm_sm); + } + + + template BlockVector::BlockVector(const BlockVector &v) : BlockVectorBase>() @@ -188,6 +199,27 @@ namespace LinearAlgebra + template + void + BlockVector::reinit( + const std::vector> + & partitioners, + const MPI_Comm &comm_sm) + { + // update the number of blocks + this->block_indices.reinit(partitioners.size(), 0); + + // initialize each block + this->components.resize(this->n_blocks()); + for (unsigned int i = 0; i < this->n_blocks(); ++i) + this->components[i].reinit(partitioners[i], comm_sm); + + // update block_indices content + this->collect_sizes(); + } + + + template BlockVector & BlockVector::operator=(const value_type s) diff --git a/include/deal.II/lac/la_parallel_vector.h b/include/deal.II/lac/la_parallel_vector.h index cca0fe7736..74df6dac39 100644 --- a/include/deal.II/lac/la_parallel_vector.h +++ b/include/deal.II/lac/la_parallel_vector.h @@ -391,8 +391,8 @@ namespace LinearAlgebra /** * Initialize the vector given to the parallel partitioning described in * @p partitioner. The input argument is a shared pointer, which stores - * the partitioner data only once and share it between several vectors - * with the same layout. + * the partitioner data only once and can be shared between several + * vectors with the same layout. * * The optional argument @p comm_sm, which consists of processes on * the same shared-memory domain, allows users have read-only access to diff --git a/tests/mpi/parallel_block_vector_14.cc b/tests/mpi/parallel_block_vector_14.cc new file mode 100644 index 0000000000..071c0d44d9 --- /dev/null +++ b/tests/mpi/parallel_block_vector_14.cc @@ -0,0 +1,99 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2022 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + +// test various constructors and reinit functions + +#include +#include + +#include + +#include + +#include "../tests.h" + + +void +test() +{ + MPI_Comm comm = MPI_COMM_WORLD; + + const unsigned int myid = Utilities::MPI::this_mpi_process(comm); + const unsigned int n_procs = Utilities::MPI::n_mpi_processes(comm); + + constexpr unsigned int n_blocks = 3; + constexpr unsigned int n_indices_per_proc_and_block = 2; + + const unsigned int n_indices_per_block = + n_indices_per_proc_and_block * n_procs; + const unsigned int n_indices = n_blocks * n_indices_per_block; + + // set up partitioning + std::vector owned_indexsets; + std::vector relevant_indexsets; + std::vector> partitioners; + for (unsigned int b = 0; b < n_blocks; ++b) + { + const unsigned int begin = b * n_indices_per_block; + + IndexSet owned(n_indices); + owned.add_range(begin + n_indices_per_proc_and_block * myid, + begin + n_indices_per_proc_and_block * (myid + 1)); + + IndexSet relevant(n_indices); + relevant.add_range(begin + 1, begin + 2); + + partitioners.push_back( + std::make_shared(owned, + relevant, + comm)); + owned_indexsets.push_back(std::move(owned)); + relevant_indexsets.push_back(std::move(relevant)); + } + + // create block vectors using different constructors + { + LinearAlgebra::distributed::BlockVector block_vector( + owned_indexsets, comm); + deallog << "w/o ghost indices: OK" << std::endl; + } + + { + LinearAlgebra::distributed::BlockVector block_vector( + owned_indexsets, relevant_indexsets, comm); + deallog << "w/ ghost indices: OK" << std::endl; + } + + { + LinearAlgebra::distributed::BlockVector block_vector(partitioners); + deallog << "partitioner: OK" << std::endl; + } +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + + const unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + + if (myid == 0) + initlog(); + + test(); +} diff --git a/tests/mpi/parallel_block_vector_14.mpirun=10.output b/tests/mpi/parallel_block_vector_14.mpirun=10.output new file mode 100644 index 0000000000..9d10939766 --- /dev/null +++ b/tests/mpi/parallel_block_vector_14.mpirun=10.output @@ -0,0 +1,4 @@ + +DEAL::w/o ghost indices: OK +DEAL::w/ ghost indices: OK +DEAL::partitioner: OK