class SymmetricTensor;
template <typename Number>
class SparseMatrix;
+class IndexSet;
namespace Utilities
{
MPI_Comm * new_comm);
#endif
+ /**
+ * Given the number of locally owned elements @p local_size,
+ * create a 1:1 partitioning of the of elements across the MPI communicator @p comm.
+ * The total size of elements is the sum of @p local_size across the MPI communicator.
+ * Each process will store contiguous subset of indices, and the index set
+ * on process p+1 starts at the index one larger than the last one stored on
+ * process p.
+ */
+ std::vector<IndexSet>
+ create_ascending_partitioning(const MPI_Comm & comm,
+ const IndexSet::size_type &local_size);
+
/**
* Return the sum over all processors of the value @p t. This function is
* collective over all processors given in the
#include <deal.II/base/exceptions.h>
+#include <deal.II/base/index_set.h>
#include <deal.II/base/mpi.h>
#include <deal.II/base/mpi.templates.h>
#include <deal.II/base/multithread_info.h>
+ std::vector<IndexSet>
+ create_ascending_partitioning(const MPI_Comm & comm,
+ const IndexSet::size_type &local_size)
+ {
+ const unsigned int n_proc = n_mpi_processes(comm);
+ const std::vector<IndexSet::size_type> sizes =
+ all_gather(comm, local_size);
+ const auto total_size =
+ std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
+
+ std::vector<IndexSet> res(n_proc, IndexSet(total_size));
+
+ IndexSet::size_type begin = 0;
+ for (unsigned int i = 0; i < n_proc; ++i)
+ {
+ res[i].add_range(begin, begin + sizes[i]);
+ begin = begin + sizes[i];
+ }
+
+ return res;
+ }
+
+
+
std::vector<unsigned int>
compute_point_to_point_communication_pattern(
const MPI_Comm & mpi_comm,
}
+ std::vector<IndexSet>
+ create_ascending_partitioning(const MPI_Comm & /*comm*/,
+ const IndexSet::size_type &local_size)
+ {
+ return std::vector<IndexSet>(1, complete_index_set(local_size));
+ }
+
+
MPI_Comm
duplicate_communicator(const MPI_Comm &mpi_communicator)
{
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE.md at
+// the top level directory of deal.II.
+//
+// ---------------------------------------------------------------------
+
+// Test Utilities::MPI::create_ascending_partitioning()
+
+#include <deal.II/base/mpi.h>
+#include <deal.II/base/point.h>
+
+#include <vector>
+
+#include "../tests.h"
+
+void
+test()
+{
+ MPI_Comm comm(MPI_COMM_WORLD);
+ const auto my_proc = Utilities::MPI::this_mpi_process(comm);
+ const auto n_proc = Utilities::MPI::n_mpi_processes(comm);
+
+ const auto my_size = (my_proc == 2 ? 0 : my_proc * 2 + 5);
+ const auto vec = Utilities::MPI::create_ascending_partitioning(comm, my_size);
+
+ // correct number:
+ AssertThrow(vec.size() == n_proc, ExcInternalError());
+
+ // ascending one-to-one
+ AssertThrow(vec[my_proc].is_ascending_and_one_to_one(comm),
+ ExcInternalError());
+
+ // same size:
+ const auto size = vec[0].size();
+ for (unsigned int i = 1; i < vec.size(); ++i)
+ AssertThrow(size == vec[i].size(), ExcInternalError());
+
+ // 1:1
+ for (unsigned int i = 0; i < vec.size(); ++i)
+ for (unsigned int j = i + 1; j < vec.size(); ++j)
+ {
+ const IndexSet intersection = vec[i] & vec[j];
+ AssertThrow(intersection.is_empty(), ExcInternalError());
+ }
+
+ // union of all
+ IndexSet all(size);
+ for (unsigned int i = 0; i < vec.size(); ++i)
+ all.add_indices(vec[i]);
+
+ AssertThrow(all == complete_index_set(size), ExcInternalError());
+
+ // finally print:
+ deallog << "Total size=" << size << std::endl;
+ for (unsigned int p = 0; p < vec.size(); ++p)
+ {
+ deallog << "p=" << p << ":" << std::endl;
+ vec[p].print(deallog.get_file_stream());
+ }
+}
+
+int
+main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
+
+ MPILogInitAll log;
+
+ test();
+ return 0;
+}