}
#endif
- // Check that all the processors have a contiguous range of values. Otherwise,
- // we risk to call different Epetra_Map on different processors and the code
- // hangs.
+ // Find out if the IndexSet is linear
+ // If there is only one process, the IndexSet is always linear
+ const unsigned int n_ranks = Utilities::MPI::n_mpi_processes(communicator);
+ bool is_linear = (n_ranks==1);
const bool all_contiguous = (Utilities::MPI::min (is_contiguous() ? 1 : 0, communicator) == 1);
- if ((all_contiguous) && (!overlapping))
+ // overlapping IndexSets or non_contiguous IndexSets can't be linear
+ if ((all_contiguous) && (!overlapping) && (!is_linear))
+ {
+ is_linear = true;
+ // we know that there is only one interval
+ types::global_dof_index first_local_dof
+ = (n_elements()>0) ? *(begin_intervals()->begin()) : numbers::invalid_dof_index ;
+ types::global_dof_index last_local_dof
+ = (n_elements()>0) ? begin_intervals()->last() : numbers::invalid_dof_index;
+
+ // sent and receive the elements to the neighbors
+ const unsigned int my_rank = Utilities::MPI::this_mpi_process(communicator);
+ if (my_rank == 0)
+ {
+ types::global_dof_index first_neighbor_dof = last_local_dof;
+ MPI_Status last_send_status, first_recv_status;
+ MPI_Request last_send_request, first_recv_request;
+ MPI_Isend(&last_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ my_rank+1,1,MPI_COMM_WORLD,&last_send_request);
+ MPI_Irecv(&first_neighbor_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ my_rank+1,0,MPI_COMM_WORLD,&first_recv_request);
+ MPI_Wait(&last_send_request,&last_send_status);
+ MPI_Wait(&first_recv_request,&first_recv_status);
+ if (last_local_dof != numbers::invalid_dof_index &&
+ first_neighbor_dof != numbers::invalid_dof_index &&
+ last_local_dof != first_neighbor_dof+1)
+ is_linear = false;
+ }
+ else if (my_rank == n_ranks-1)
+ {
+ types::global_dof_index last_neighbor_dof = first_local_dof;
+ MPI_Status first_send_status, last_recv_status;
+ MPI_Request first_send_request, last_recv_request;
+ MPI_Isend(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ my_rank-1,0,MPI_COMM_WORLD,&first_send_request);
+ MPI_Irecv(&last_neighbor_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ my_rank-1,1,MPI_COMM_WORLD,&last_recv_request);
+ MPI_Wait(&first_send_request,&first_send_status);
+ MPI_Wait(&last_recv_request,&last_recv_status);
+ if (first_local_dof != numbers::invalid_dof_index &&
+ last_neighbor_dof != numbers::invalid_dof_index &&
+ first_local_dof != last_neighbor_dof-1)
+ is_linear = false;
+ }
+ else
+ {
+ types::global_dof_index first_neighbor_dof = last_local_dof;
+ types::global_dof_index last_neighbor_dof = first_local_dof;
+ MPI_Status first_send_status, last_send_status;
+ MPI_Status first_recv_status, last_recv_status;
+ MPI_Request first_send_request, last_send_request;
+ MPI_Request first_recv_request, last_recv_request;
+ MPI_Isend(&first_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ my_rank-1,0,MPI_COMM_WORLD,&first_send_request);
+ MPI_Isend(&last_local_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ my_rank+1,1,MPI_COMM_WORLD,&last_send_request);
+ MPI_Irecv(&first_neighbor_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ my_rank+1,0,MPI_COMM_WORLD,&first_recv_request);
+ MPI_Irecv(&last_neighbor_dof, 1, DEAL_II_DOF_INDEX_MPI_TYPE,
+ my_rank-1,1,MPI_COMM_WORLD,&last_recv_request);
+ MPI_Wait(&first_send_request,&first_send_status);
+ MPI_Wait(&last_send_request,&last_send_status);
+ MPI_Wait(&first_recv_request,&first_recv_status);
+ MPI_Wait(&last_recv_request,&last_recv_status);
+ if (first_local_dof != numbers::invalid_dof_index &&
+ last_neighbor_dof != numbers::invalid_dof_index &&
+ first_local_dof != last_neighbor_dof-1)
+ is_linear = false;
+ if (last_local_dof != numbers::invalid_dof_index &&
+ first_neighbor_dof != numbers::invalid_dof_index &&
+ last_local_dof != first_neighbor_dof+1)
+ is_linear = false;
+ }
+ }
+ const bool all_linear = (Utilities::MPI::min (is_linear ? 1 : 0, communicator) == 1);
+
+ if (all_linear)
return Epetra_Map (TrilinosWrappers::types::int_type(size()),
TrilinosWrappers::types::int_type(n_elements()),
0,
{
std::vector<size_type> indices;
fill_index_vector(indices);
-
return Epetra_Map (TrilinosWrappers::types::int_type(-1),
TrilinosWrappers::types::int_type(n_elements()),
(n_elements() > 0
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2009 - 2016 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+// Test that dof renumbering is also reflected in TrilinosWrappers::SparseMatrix
+
+#include "../tests.h"
+
+#include <deal.II/dofs/dof_handler.h>
+#include <deal.II/dofs/dof_tools.h>
+#include <deal.II/fe/fe.h>
+#include <deal.II/fe/fe_q.h>
+#include <deal.II/grid/tria.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/distributed/shared_tria.h>
+#include <deal.II/lac/constraint_matrix.h>
+#include <deal.II/lac/trilinos_vector.h>
+#include <deal.II/lac/trilinos_sparse_matrix.h>
+#include <deal.II/numerics/vector_tools.h>
+#include <deal.II/numerics/matrix_tools.h>
+#include <deal.II/lac/sparsity_tools.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/dofs/dof_renumbering.h>
+
+#include <mpi.h>
+
+#include <vector>
+#include <iostream>
+
+using namespace dealii;
+
+class Test
+{
+private:
+ MPI_Comm mpi_communicator;
+
+ const unsigned int rank;
+ const unsigned int n_ranks;
+
+ parallel::distributed::Triangulation<2> triangulation;
+
+ DoFHandler<2> dof_handler;
+
+ FE_Q<2> fe;
+
+ ConstraintMatrix constraints;
+
+ IndexSet locally_owned_dofs;
+ IndexSet locally_relevant_dofs;
+
+ TrilinosWrappers::SparseMatrix system_matrix;
+
+public:
+ Test(const bool do_renumber) :
+ mpi_communicator(MPI_COMM_WORLD),
+ rank(Utilities::MPI::this_mpi_process(mpi_communicator)),
+ n_ranks(Utilities::MPI::n_mpi_processes(mpi_communicator)),
+ triangulation(mpi_communicator),
+ dof_handler(triangulation),
+ fe(1)
+ {
+ deallog << "Start";
+
+ if (do_renumber)
+ deallog << " with renumbering" << std::endl;
+ else
+ deallog << " without renumbering" << std::endl;
+
+ GridGenerator::hyper_cube(triangulation);
+ triangulation.refine_global(2);
+
+ dof_handler.distribute_dofs(fe);
+
+ constraints.clear();
+ constraints.close();
+
+ if (do_renumber) renumber();
+
+ init_structures();
+
+ deallog << "Finished";
+
+ if (do_renumber)
+ deallog << " with renumbering" << std::endl;
+ else
+ deallog << " without renumbering" << std::endl;
+ }
+
+ ~Test ()
+ {
+ dof_handler.clear();
+ }
+
+private:
+
+ void init_structures()
+ {
+ locally_owned_dofs = dof_handler.locally_owned_dofs();
+ locally_owned_dofs.print(deallog);
+ DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs);
+ locally_relevant_dofs.print(deallog);
+
+ DynamicSparsityPattern sparsity_pattern (locally_relevant_dofs);
+ DoFTools::make_sparsity_pattern (dof_handler, sparsity_pattern,
+ constraints, /*keep constrained dofs*/ false);
+ SparsityTools::distribute_sparsity_pattern (sparsity_pattern,
+ dof_handler.n_locally_owned_dofs_per_processor(),
+ MPI_COMM_WORLD,
+ locally_relevant_dofs);
+
+ system_matrix.reinit (locally_owned_dofs,
+ locally_owned_dofs,
+ sparsity_pattern,
+ MPI_COMM_WORLD);
+ deallog << "local_range: " << system_matrix.local_range().first << " - "
+ << system_matrix.local_range().second << std::endl;
+ }
+
+ void renumber()
+ {
+ //DoFRenumbering::Cuthill_McKee(dof_handler);
+
+ locally_owned_dofs = dof_handler.locally_owned_dofs();
+
+ std::vector<types::global_dof_index> new_number(dof_handler.n_dofs());
+ for (types::global_dof_index i = 0; i < dof_handler.n_dofs(); i++)
+ new_number[i] = dof_handler.n_dofs() - i - 1;
+
+ std::vector<types::global_dof_index> local_new_number;
+ for (IndexSet::ElementIterator dof = locally_owned_dofs.begin();
+ dof != locally_owned_dofs.end(); ++dof)
+ local_new_number.push_back(new_number[*dof]);
+
+ deallog << "n_dofs = " << dof_handler.n_dofs() << std::endl;
+ deallog << "before renumbering:" << std::endl;
+ locally_owned_dofs.print(dealii::deallog);
+ dof_handler.renumber_dofs(local_new_number);
+
+ deallog << "after renumbering:" << std::endl;
+ locally_owned_dofs = dof_handler.locally_owned_dofs();
+ locally_owned_dofs.print(dealii::deallog);
+ }
+};
+
+int main(int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi(argc, argv);
+ MPILogInitAll log;
+
+ Test test1(false);
+ MPI_Barrier(MPI_COMM_WORLD);
+ Test test2(true);
+
+ return 0;
+}
--- /dev/null
+
+DEAL:0::Start without renumbering
+DEAL:0::{[0,24]}
+DEAL:0::{[0,24]}
+DEAL:0::local_range: 0 - 25
+DEAL:0::Finished without renumbering
+DEAL:0::Start with renumbering
+DEAL:0::n_dofs = 25
+DEAL:0::before renumbering:
+DEAL:0::{[0,24]}
+DEAL:0::after renumbering:
+DEAL:0::{[0,24]}
+DEAL:0::{[0,24]}
+DEAL:0::{[0,24]}
+DEAL:0::local_range: 0 - 25
+DEAL:0::Finished with renumbering
--- /dev/null
+
+DEAL:0::Start without renumbering
+DEAL:0::{[0,14]}
+DEAL:0::{[0,17], [21,22]}
+DEAL:0::local_range: 0 - 15
+DEAL:0::Finished without renumbering
+DEAL:0::Start with renumbering
+DEAL:0::n_dofs = 25
+DEAL:0::before renumbering:
+DEAL:0::{[0,14]}
+DEAL:0::after renumbering:
+DEAL:0::{[10,24]}
+DEAL:0::{[10,24]}
+DEAL:0::{[2,3], [7,24]}
+DEAL:0::local_range: 10 - 25
+DEAL:0::Finished with renumbering
+
+DEAL:1::Start without renumbering
+DEAL:1::{[15,24]}
+DEAL:1::{[2,3], [5,8], 10, [12,24]}
+DEAL:1::local_range: 15 - 25
+DEAL:1::Finished without renumbering
+DEAL:1::Start with renumbering
+DEAL:1::n_dofs = 25
+DEAL:1::before renumbering:
+DEAL:1::{[15,24]}
+DEAL:1::after renumbering:
+DEAL:1::{[0,9]}
+DEAL:1::{[0,9]}
+DEAL:1::{[0,12], 14, [16,19], [21,22]}
+DEAL:1::local_range: 0 - 10
+DEAL:1::Finished with renumbering
+