--- /dev/null
+New: Add Utilities::create_evenly_distributed_partitioning(my_partition_id, n_partitions, total_size)
+to create a one-to-one evenly distributed ascending partitioning from total size.
+Add Utilities::MPI::create_evenly_distributed_partitioning(comm, total_size) to use processor ID and
+number of MPI processes to determine partitioning.
+<br>
+(Doug Shi-Dong, 2020/04/29)
namespace Utilities
{
+ /**
+ * Given the total number of elements @p total_size, create an evenly
+ * distributed 1:1 partitioning of the elements for across @p n_partitions.
+ * The local sizes will be equal to the @p total_size divided by the number
+ * of partitions plus the remainder being divided amongst the first
+ * processes. Each process will store a contiguous subset of indices, and the
+ * index set on process p+1 starts at the index one larger than the last one
+ * stored on process p.
+ * For example, a @p total_size of 11 with 3 processes will result
+ * in the IndexSets { [0,4), [4,8), [8,11)] }, and this function will
+ * return the @p my_partition_id 's IndexSet.
+ */
+ IndexSet
+ create_evenly_distributed_partitioning(const unsigned int my_partition_id,
+ const unsigned int n_partitions,
+ const IndexSet::size_type total_size);
+
/**
* A namespace for utility functions that abstract certain operations using
* the Message Passing Interface (MPI) or provide fallback operations in
create_ascending_partitioning(const MPI_Comm & comm,
const IndexSet::size_type local_size);
+ /**
+ * Given the total number of elements @p total_size, create an evenly
+ * distributed 1:1 partitioning of the elements across the
+ * MPI communicator @p comm.
+ * Uses @p comm to determine number of partitions and processor ID to call the
+ * @p create_evenly_distributed_partitioning() function above.
+ */
+ IndexSet
+ create_evenly_distributed_partitioning(
+ const MPI_Comm & comm,
+ const IndexSet::size_type total_size);
+
#ifdef DEAL_II_WITH_MPI
/**
* Calculate mean and standard deviation across the MPI communicator @p comm
namespace Utilities
{
+ IndexSet
+ create_evenly_distributed_partitioning(const unsigned int my_partition_id,
+ const unsigned int n_partitions,
+ const IndexSet::size_type total_size)
+ {
+ const unsigned int remain = total_size % n_partitions;
+
+ const IndexSet::size_type min_size = total_size / n_partitions;
+
+ const IndexSet::size_type begin =
+ min_size * my_partition_id + std::min(my_partition_id, remain);
+ const IndexSet::size_type end =
+ min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
+ IndexSet result(total_size);
+ result.add_range(begin, end);
+ return result;
+ }
+
namespace MPI
{
MinMaxAvg
return res;
}
+ IndexSet
+ create_evenly_distributed_partitioning(const MPI_Comm & comm,
+ const IndexSet::size_type total_size)
+ {
+ const unsigned int this_proc = this_mpi_process(comm);
+ const unsigned int n_proc = n_mpi_processes(comm);
+
+ return Utilities::create_evenly_distributed_partitioning(this_proc,
+ n_proc,
+ total_size);
+ }
+
/**
return std::vector<IndexSet>(1, complete_index_set(local_size));
}
+ IndexSet
+ create_evenly_distributed_partitioning(const MPI_Comm & /*comm*/,
+ const IndexSet::size_type total_size)
+ {
+ return complete_index_set(total_size);
+ }
+
MPI_Comm
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2020 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test Utilities::MPI::create_evenly_distributed_partitioning()
+
+#include <deal.II/base/mpi.h>
+
+#include <vector>
+
+#include "../tests.h"
+
+void
+test()
+{
+ MPI_Comm comm(MPI_COMM_WORLD);
+ const auto my_proc = Utilities::MPI::this_mpi_process(comm);
+ const auto n_proc = Utilities::MPI::n_mpi_processes(comm);
+
+ const auto total_size = 17;
+ const IndexSet index_set =
+ Utilities::MPI::create_evenly_distributed_partitioning(comm, total_size);
+
+ std::vector<IndexSet> vec(n_proc);
+ for (unsigned int i_proc = 0; i_proc < n_proc; ++i_proc)
+ {
+ vec[i_proc] =
+ Utilities::create_evenly_distributed_partitioning(i_proc,
+ n_proc,
+ total_size);
+ }
+ // correct number:
+ AssertThrow(index_set == vec[my_proc], ExcInternalError());
+
+ // ascending one-to-one
+ AssertThrow(vec[my_proc].is_ascending_and_one_to_one(comm),
+ ExcInternalError());
+
+ // same size:
+ const auto size = vec[0].size();
+ for (unsigned int i = 1; i < vec.size(); ++i)
+ AssertThrow(size == vec[i].size(), ExcInternalError());
+
+ // Local sizes differ by one or less
+ auto local_size_1 = vec[0].n_elements();
+ for (unsigned int i = 1; i < vec.size(); ++i)
+ {
+ auto local_size_2 = vec[i].n_elements();
+ AssertThrow(std::abs((int)local_size_1 - (int)local_size_2) <= 1,
+ ExcInternalError());
+ }
+
+ // 1:1
+ for (unsigned int i = 0; i < vec.size(); ++i)
+ for (unsigned int j = i + 1; j < vec.size(); ++j)
+ {
+ const IndexSet intersection = vec[i] & vec[j];
+ AssertThrow(intersection.is_empty(), ExcInternalError());
+ }
+
+ // union of all
+ IndexSet all(size);
+ for (unsigned int i = 0; i < vec.size(); ++i)
+ all.add_indices(vec[i]);
+
+ AssertThrow(all == complete_index_set(size), ExcInternalError());
+
+ // finally print:
+ deallog << "Total size=" << size << std::endl;
+ for (unsigned int p = 0; p < vec.size(); ++p)
+ {
+ deallog << "p=" << p << ":" << std::endl;
+ vec[p].print(deallog.get_file_stream());
+ }
+}
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll log;
+
+ test();
+ return 0;
+}
--- /dev/null
+
+DEAL:0::Total size=17
+DEAL:0::p=0:
+{[0,16]}
--- /dev/null
+
+DEAL:0::Total size=17
+DEAL:0::p=0:
+{[0,8]}
+DEAL:0::p=1:
+{[9,16]}
+
+DEAL:1::Total size=17
+DEAL:1::p=0:
+{[0,8]}
+DEAL:1::p=1:
+{[9,16]}
+
--- /dev/null
+
+DEAL:0::Total size=17
+DEAL:0::p=0:
+{[0,4]}
+DEAL:0::p=1:
+{[5,8]}
+DEAL:0::p=2:
+{[9,12]}
+DEAL:0::p=3:
+{[13,16]}
+
+DEAL:1::Total size=17
+DEAL:1::p=0:
+{[0,4]}
+DEAL:1::p=1:
+{[5,8]}
+DEAL:1::p=2:
+{[9,12]}
+DEAL:1::p=3:
+{[13,16]}
+
+
+DEAL:2::Total size=17
+DEAL:2::p=0:
+{[0,4]}
+DEAL:2::p=1:
+{[5,8]}
+DEAL:2::p=2:
+{[9,12]}
+DEAL:2::p=3:
+{[13,16]}
+
+
+DEAL:3::Total size=17
+DEAL:3::p=0:
+{[0,4]}
+DEAL:3::p=1:
+{[5,8]}
+DEAL:3::p=2:
+{[9,12]}
+DEAL:3::p=3:
+{[13,16]}
+