--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// test for the Partitioner with a smaller ghost index set within a larger one
+// regarding the export_to_ghosted_array() calls
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/partitioner.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+ Assert(numproc > 2, ExcNotImplemented());
+
+ const unsigned int set = 200;
+ AssertIndexRange (numproc, set-2);
+ const unsigned int local_size = set - myid;
+ types::global_dof_index global_size = 0;
+ types::global_dof_index my_start = 0;
+ for (unsigned int i=0; i<numproc; ++i)
+ {
+ global_size += set - i;
+ if (i<myid)
+ my_start += set - i;
+ }
+
+ // each processor owns some indices and all are ghosting elements from three
+ // processors (the second). some entries are right around the border between
+ // two processors
+ IndexSet local_owned(global_size);
+ local_owned.add_range(my_start, my_start + local_size);
+ IndexSet local_relevant_1(global_size), local_relevant_2(global_size);
+ local_relevant_1 = local_owned;
+ types::global_dof_index ghost_indices [10] = { 1, 2, 13, set-2, set-1, set, set+1, 2*set,
+ 2*set+1, 2*set+3
+ };
+ local_relevant_1.add_indices (&ghost_indices[0], ghost_indices+10);
+ if (myid > 0)
+ local_relevant_1.add_range(my_start-10, my_start);
+ if (myid < numproc - 1)
+ local_relevant_1.add_range(my_start+local_size, my_start+local_size+10);
+
+ local_relevant_2 = local_owned;
+ local_relevant_2.add_indices (&ghost_indices[0], ghost_indices+10);
+ if (myid > 0)
+ local_relevant_2.add_index(my_start-10);
+ if (myid < numproc - 1)
+ local_relevant_2.add_index(my_start+local_size+9);
+
+ Utilities::MPI::Partitioner v(local_owned, local_relevant_1, MPI_COMM_WORLD);
+ Utilities::MPI::Partitioner w(local_owned, MPI_COMM_WORLD);
+ w.set_ghost_indices(local_relevant_2, v.ghost_indices());
+
+ IndexSet local_relevant_3(global_size);
+ local_relevant_3.add_index(2);
+ if (myid > 0 < my_start)
+ local_relevant_3.add_range(my_start-10, my_start);
+ Utilities::MPI::Partitioner x(local_owned, MPI_COMM_WORLD);
+ x.set_ghost_indices(local_relevant_3, v.ghost_indices());
+
+ // set up a locally owned array with some entries
+ std::vector<unsigned int> locally_owned_data_field(local_size);
+ for (unsigned int i=0; i<local_size; ++i)
+ locally_owned_data_field[i] = my_start + i;
+ const std::vector<unsigned int> &locally_owned_data(locally_owned_data_field);
+
+ // set up a ghost array
+ std::vector<unsigned int> ghosts(v.n_ghost_indices());
+ std::vector<unsigned int> temp_array(v.n_import_indices());
+ std::vector<MPI_Request> requests;
+
+ // send the full array
+ v.export_to_ghosted_array_start(3,
+ make_array_view(locally_owned_data),
+ make_array_view(temp_array),
+ make_array_view(ghosts),
+ requests);
+ v.export_to_ghosted_array_finish(make_array_view(ghosts), requests);
+ deallog << "All ghosts: ";
+ for (unsigned int i=0; i<ghosts.size(); ++i)
+ deallog << ghosts[i] << " ";
+ deallog << std::endl;
+
+ // send only the array in w
+ std::fill(ghosts.begin(), ghosts.end(), 0);
+
+ temp_array.resize(w.n_import_indices());
+ w.export_to_ghosted_array_start(3,
+ make_array_view(locally_owned_data),
+ make_array_view(temp_array),
+ make_array_view(ghosts),
+ requests);
+
+ // start a second send operation for the x partitioner in parallel to make
+ // sure communication does not get messed up
+ std::vector<unsigned int> temp_array2(x.n_import_indices());
+ std::vector<unsigned int> ghosts2(x.n_ghost_indices());
+
+ std::vector<MPI_Request> requests2;
+ x.export_to_ghosted_array_start(4,
+ make_array_view(locally_owned_data),
+ make_array_view(temp_array2),
+ make_array_view(ghosts2),
+ requests2);
+
+ w.export_to_ghosted_array_finish(make_array_view(ghosts), requests);
+ deallog << "Ghosts on reduced 1: ";
+ for (unsigned int i=0; i<ghosts.size(); ++i)
+ deallog << ghosts[i] << " ";
+ deallog << std::endl;
+
+ std::fill(ghosts.begin(), ghosts.end(), 0);
+
+ temp_array.resize(x.n_import_indices());
+ x.export_to_ghosted_array_start(3,
+ make_array_view(locally_owned_data),
+ make_array_view(temp_array),
+ make_array_view(ghosts),
+ requests);
+ x.export_to_ghosted_array_finish(make_array_view(ghosts), requests);
+ deallog << "Ghosts on reduced 2: ";
+ for (unsigned int i=0; i<ghosts.size(); ++i)
+ deallog << ghosts[i] << " ";
+ deallog << std::endl;
+
+ x.export_to_ghosted_array_finish(make_array_view(ghosts2), requests2);
+ deallog << "Ghosts on reduced 2 without excess entries: ";
+ for (unsigned int i=0; i<ghosts2.size(); ++i)
+ deallog << ghosts2[i] << " ";
+ deallog << std::endl;
+
+ x.export_to_ghosted_array_start(3,
+ make_array_view(locally_owned_data),
+ make_array_view(temp_array),
+ make_array_view(ghosts),
+ requests);
+ x.export_to_ghosted_array_finish(make_array_view(ghosts), requests);
+ deallog << "Ghosts on reduced 2: ";
+ for (unsigned int i=0; i<ghosts.size(); ++i)
+ deallog << ghosts[i] << " ";
+ deallog << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi(argc, argv);
+ MPILogInitAll log;
+ test();
+}
--- /dev/null
+
+DEAL:0::All ghosts: 200 201 202 203 204 205 206 207 208 209 400 401 403
+DEAL:0::Ghosts on reduced 1: 200 201 0 0 0 0 0 0 0 209 400 401 403
+DEAL:0::Ghosts on reduced 2: 0 0 0 0 0 0 0 0 0 0 0 0 0
+DEAL:0::Ghosts on reduced 2 without excess entries:
+DEAL:0::Ghosts on reduced 2: 0 0 0 0 0 0 0 0 0 0 0 0 0
+
+DEAL:1::All ghosts: 1 2 13 190 191 192 193 194 195 196 197 198 199 399 400 401 402 403 404 405 406 407 408
+DEAL:1::Ghosts on reduced 1: 1 2 13 190 0 0 0 0 0 0 0 198 199 0 400 401 0 403 0 0 0 0 408
+DEAL:1::Ghosts on reduced 2: 0 2 0 190 191 192 193 194 195 196 197 198 199 0 0 0 0 0 0 0 0 0 0
+DEAL:1::Ghosts on reduced 2 without excess entries: 2 190 191 192 193 194 195 196 197 198 199
+DEAL:1::Ghosts on reduced 2: 0 2 0 190 191 192 193 194 195 196 197 198 199 0 0 0 0 0 0 0 0 0 0
+
+
+DEAL:2::All ghosts: 1 2 13 198 199 200 201 389 390 391 392 393 394 395 396 397 398 597 598 599 600 601 602 603 604 605 606
+DEAL:2::Ghosts on reduced 1: 1 2 13 198 199 200 201 389 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 606
+DEAL:2::Ghosts on reduced 2: 0 2 0 0 0 0 0 389 390 391 392 393 394 395 396 397 398 0 0 0 0 0 0 0 0 0 0
+DEAL:2::Ghosts on reduced 2 without excess entries: 2 389 390 391 392 393 394 395 396 397 398
+DEAL:2::Ghosts on reduced 2: 0 2 0 0 0 0 0 389 390 391 392 393 394 395 396 397 398 0 0 0 0 0 0 0 0 0 0
+
+
+DEAL:3::All ghosts: 1 2 13 198 199 200 201 400 401 403 587 588 589 590 591 592 593 594 595 596
+DEAL:3::Ghosts on reduced 1: 1 2 13 198 199 200 201 400 401 403 587 0 0 0 0 0 0 0 0 0
+DEAL:3::Ghosts on reduced 2: 0 2 0 0 0 0 0 0 0 0 587 588 589 590 591 592 593 594 595 596
+DEAL:3::Ghosts on reduced 2 without excess entries: 2 587 588 589 590 591 592 593 594 595 596
+DEAL:3::Ghosts on reduced 2: 0 2 0 0 0 0 0 0 0 0 587 588 589 590 591 592 593 594 595 596
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// test for the Partitioner with a smaller ghost index set within a larger one
+// regarding the import_from_ghosted_array() calls
+#include "../tests.h"
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/index_set.h>
+#include <deal.II/base/partitioner.h>
+#include <fstream>
+#include <iostream>
+#include <vector>
+
+
+void test ()
+{
+ unsigned int myid = Utilities::MPI::this_mpi_process (MPI_COMM_WORLD);
+ unsigned int numproc = Utilities::MPI::n_mpi_processes (MPI_COMM_WORLD);
+ Assert(numproc > 2, ExcNotImplemented());
+
+ const unsigned int set = 50;
+ AssertIndexRange (numproc, set-2);
+ const unsigned int local_size = set - myid;
+ types::global_dof_index global_size = 0;
+ types::global_dof_index my_start = 0;
+ for (unsigned int i=0; i<numproc; ++i)
+ {
+ global_size += set - i;
+ if (i<myid)
+ my_start += set - i;
+ }
+
+ // each processor owns some indices and all are ghosting elements from three
+ // processors (the second). some entries are right around the border between
+ // two processors
+ IndexSet local_owned(global_size);
+ local_owned.add_range(my_start, my_start + local_size);
+ IndexSet local_relevant_1(global_size), local_relevant_2(global_size);
+ local_relevant_1 = local_owned;
+ types::global_dof_index ghost_indices [10] = { 1, 2, 13, set-2, set-1, set, set+1, 2*set,
+ 2*set+1, 2*set+3
+ };
+ local_relevant_1.add_indices (&ghost_indices[0], ghost_indices+10);
+ if (myid > 0)
+ local_relevant_1.add_range(my_start-10, my_start);
+ if (myid < numproc - 1)
+ local_relevant_1.add_range(my_start+local_size, my_start+local_size+10);
+
+ local_relevant_2 = local_owned;
+ local_relevant_2.add_indices (&ghost_indices[0], ghost_indices+10);
+ if (myid > 0)
+ local_relevant_2.add_index(my_start-10);
+ if (myid < numproc - 1)
+ local_relevant_2.add_index(my_start+local_size+9);
+
+ Utilities::MPI::Partitioner v(local_owned, local_relevant_1, MPI_COMM_WORLD);
+ Utilities::MPI::Partitioner w(local_owned, MPI_COMM_WORLD);
+ w.set_ghost_indices(local_relevant_2, v.ghost_indices());
+
+ IndexSet local_relevant_3(global_size);
+ local_relevant_3.add_index(2);
+ if (myid > 0 < my_start)
+ local_relevant_3.add_range(my_start-10, my_start);
+ Utilities::MPI::Partitioner x(local_owned, MPI_COMM_WORLD);
+ x.set_ghost_indices(local_relevant_3, v.ghost_indices());
+
+ // set up a ghost array with some entries
+ std::vector<unsigned int> ghost_array(v.n_ghost_indices(), 1);
+
+ // set up other arrays
+ std::vector<unsigned int> locally_owned_array(local_size);
+ std::vector<unsigned int> temp_array(v.n_import_indices());
+ std::vector<MPI_Request> requests;
+
+ // send the full array
+ {
+ std::vector<unsigned int> ghosts (ghost_array);
+ v.import_from_ghosted_array_start(VectorOperation::add, 3,
+ make_array_view(ghosts),
+ make_array_view(temp_array),
+ requests);
+ v.import_from_ghosted_array_finish(VectorOperation::add,
+ ArrayView<const unsigned int>(temp_array.data(),
+ temp_array.size()),
+ make_array_view(locally_owned_array),
+ make_array_view(ghosts), requests);
+ }
+ deallog << "From all ghosts: ";
+ for (unsigned int i=0; i<locally_owned_array.size(); ++i)
+ deallog << locally_owned_array[i] << " ";
+ deallog << std::endl;
+
+ // send only the array in w
+ std::fill(locally_owned_array.begin(), locally_owned_array.end(), 0);
+ temp_array.resize(w.n_import_indices());
+ {
+ std::vector<unsigned int> ghosts (ghost_array);
+ w.import_from_ghosted_array_start(VectorOperation::add, 3,
+ make_array_view(ghosts),
+ make_array_view(temp_array),
+ requests);
+ w.import_from_ghosted_array_finish(VectorOperation::add,
+ ArrayView<const unsigned int>(temp_array.data(),
+ temp_array.size()),
+ make_array_view(locally_owned_array),
+ make_array_view(ghosts), requests);
+ }
+ deallog << "From reduced ghosts 1: ";
+ for (unsigned int i=0; i<locally_owned_array.size(); ++i)
+ deallog << locally_owned_array[i] << " ";
+ deallog << std::endl;
+
+ // send only the array in x
+ std::fill(locally_owned_array.begin(), locally_owned_array.end(), 0);
+ temp_array.resize(x.n_import_indices());
+ {
+ std::vector<unsigned int> ghosts (ghost_array);
+ x.import_from_ghosted_array_start(VectorOperation::add, 3,
+ make_array_view(ghosts),
+ make_array_view(temp_array),
+ requests);
+ x.import_from_ghosted_array_finish(VectorOperation::add,
+ ArrayView<const unsigned int>(temp_array.data(),
+ temp_array.size()),
+ make_array_view(locally_owned_array),
+ make_array_view(ghosts), requests);
+ }
+ deallog << "From reduced ghosts 2: ";
+ for (unsigned int i=0; i<locally_owned_array.size(); ++i)
+ deallog << locally_owned_array[i] << " ";
+ deallog << std::endl;
+
+ // now send a tight array from x and add into the existing entries
+ std::vector<unsigned int> ghosts(x.n_ghost_indices(), 1);
+ x.import_from_ghosted_array_start(VectorOperation::add, 3,
+ make_array_view(ghosts),
+ make_array_view(temp_array),
+ requests);
+ x.import_from_ghosted_array_finish(VectorOperation::add,
+ ArrayView<const unsigned int>(temp_array.data(),
+ temp_array.size()),
+ make_array_view(locally_owned_array),
+ make_array_view(ghosts), requests);
+ deallog << "From tight reduced ghosts 2: ";
+ for (unsigned int i=0; i<locally_owned_array.size(); ++i)
+ deallog << locally_owned_array[i] << " ";
+ deallog << std::endl;
+}
+
+
+
+int main (int argc, char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi(argc, argv);
+ MPILogInitAll log;
+ test();
+}
--- /dev/null
+
+DEAL:0::From all ghosts: 0 3 3 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 3 3
+DEAL:0::From reduced ghosts 1: 0 3 3 0 0 0 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 3 3
+DEAL:0::From reduced ghosts 2: 0 0 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1
+DEAL:0::From tight reduced ghosts 2: 0 0 6 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2
+
+DEAL:1::From all ghosts: 3 3 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1
+DEAL:1::From reduced ghosts 1: 3 3 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
+DEAL:1::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1
+DEAL:1::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2
+
+
+DEAL:2::From all ghosts: 1 3 3 1 3 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1
+DEAL:2::From reduced ghosts 1: 0 3 3 0 3 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
+DEAL:2::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1
+DEAL:2::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 2 2 2 2 2 2 2 2 2 2
+
+
+DEAL:3::From all ghosts: 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+DEAL:3::From reduced ghosts 1: 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+DEAL:3::From reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+DEAL:3::From tight reduced ghosts 2: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+