+ template <int dim, int spacedim>
+ std::map<unsigned int,types::global_vertex_index>
+ compute_local_to_global_vertex_index_map(
+ const parallel::distributed::Triangulation<dim,spacedim> &triangulation)
+ {
+ std::map<unsigned int,types::global_vertex_index> local_to_global_vertex_index;
+
+ typedef typename Triangulation<dim,spacedim>::active_cell_iterator active_cell_iterator;
+ std::vector<std::set<active_cell_iterator> > vertex_to_cell = vertex_to_cell_map(triangulation);
+
+ // Create a local index for the locally "owned" vertices
+ types::global_vertex_index next_index = 0;
+ unsigned int max_cellid_size = 0;
+ std::set<std::tuple<types::subdomain_id,types::global_vertex_index> > vertices_added;
+ std::map<types::subdomain_id,std::set<unsigned int> > vertices_to_recv;
+ std::map<types::subdomain_id,std::vector<std::tuple<types::global_vertex_index,
+ types::global_vertex_index,std::string> > > vertices_to_send;
+ active_cell_iterator cell = triangulation.begin_active(),
+ endc = triangulation.end();
+ std::set<active_cell_iterator> missing_vert_cells;
+ std::set<unsigned int> used_vertex_index;
+ for (; cell!=endc; ++cell)
+ {
+ if (cell->is_locally_owned())
+ {
+ for (unsigned int i=0; i<GeometryInfo<dim>::vertices_per_cell; ++i)
+ {
+ types::subdomain_id lowest_subdomain_id = cell->subdomain_id();
+ typename std::set<active_cell_iterator>::iterator
+ adjacent_cell = vertex_to_cell[cell->vertex_index(i)].begin(),
+ end_adj_cell = vertex_to_cell[cell->vertex_index(i)].end();
+ bool can_be_recv = false;
+ for (; adjacent_cell!=end_adj_cell; ++adjacent_cell)
+ lowest_subdomain_id = std::min(lowest_subdomain_id,
+ (*adjacent_cell)->subdomain_id());
+
+ // See if I "own" this vertex
+ if (lowest_subdomain_id==cell->subdomain_id())
+ {
+ // Check that the vertex we are working on a vertex that has not be
+ // dealt with yet
+ if (used_vertex_index.count(cell->vertex_index(i))==0)
+ {
+ // Set the local index
+ local_to_global_vertex_index[cell->vertex_index(i)] = next_index++;
+
+ // Store the informations that will be send to the adjacent cells
+ // on other subdomains
+ adjacent_cell = vertex_to_cell[cell->vertex_index(i)].begin();
+ std::vector<types::subdomain_id> subdomain_ids;
+ for (; adjacent_cell!=end_adj_cell; ++adjacent_cell)
+ if ((*adjacent_cell)->subdomain_id()!=cell->subdomain_id())
+ {
+ std::tuple<types::subdomain_id,types::global_vertex_index>
+ tmp((*adjacent_cell)->subdomain_id(), cell->vertex_index(i));
+ if (vertices_added.count(tmp)==0)
+ {
+ vertices_to_send[(*adjacent_cell)->subdomain_id()].push_back(
+ std::tuple<types::global_vertex_index,types::global_vertex_index,
+ std::string> (i,cell->vertex_index(i),
+ cell->id().to_string()));
+ if (cell->id().to_string().size() > max_cellid_size)
+ max_cellid_size = cell->id().to_string().size();
+ vertices_added.insert(tmp);
+ }
+ }
+ used_vertex_index.insert(cell->vertex_index(i));
+ }
+ }
+ else
+ {
+ // We don't own the vertex so we will receive its global index
+ vertices_to_recv[lowest_subdomain_id].insert(cell->vertex_index(i));
+ missing_vert_cells.insert(cell);
+ }
+ }
+ }
+
+ // Some hanging nodes are vertices of ghost cells. They need to be
+ // received.
+ if (cell->is_ghost())
+ {
+ for (unsigned int i=0; i<GeometryInfo<dim>::faces_per_cell; ++i)
+ {
+ if (cell->at_boundary(i)==false)
+ {
+ if (cell->neighbor(i)->active())
+ {
+ typename Triangulation<dim,spacedim>::active_cell_iterator adjacent_cell =
+ cell->neighbor(i);
+ if ((adjacent_cell->is_locally_owned()))
+ {
+ types::subdomain_id adj_subdomain_id = adjacent_cell->subdomain_id();
+ if (cell->subdomain_id()<adj_subdomain_id)
+ for (unsigned int j=0; j<GeometryInfo<dim>::vertices_per_face; ++j)
+ {
+ vertices_to_recv[cell->subdomain_id()].insert(cell->face(i)->vertex_index(j));
+ missing_vert_cells.insert(cell);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ // Get the size of the largest CellID string
+ MPI_Allreduce(MPI_IN_PLACE, &max_cellid_size, 1, MPI_UNSIGNED, MPI_MAX,
+ triangulation.get_communicator());
+
+
+ // Make indices global by getting the number of vertices owned by each
+ // processors and shifting the indices accordingly
+ const unsigned int n_cpu = Utilities::MPI::n_mpi_processes(triangulation.get_communicator());
+ std::vector<types::global_vertex_index> indices(n_cpu);
+ MPI_Allgather(&next_index, 1, MPI_UNSIGNED_LONG_LONG, &indices[0],
+ indices.size(), MPI_UNSIGNED_LONG_LONG, triangulation.get_communicator());
+ const types::global_vertex_index shift = std::accumulate(&indices[0],
+ &indices[0]+triangulation.locally_owned_subdomain(),0);
+
+ std::map<unsigned int,types::global_vertex_index>::iterator
+ global_index_it = local_to_global_vertex_index.begin(),
+ global_index_end = local_to_global_vertex_index.end();
+ for (; global_index_it!=global_index_end; ++global_index_it)
+ global_index_it->second += shift;
+
+ // In a first message, send the global ID of the vertices and the local
+ // positions in the cells. In a second messages, send the cell ID as a
+ // resize string. This is done in two messages so that types are not mixed
+
+ // Send the first message
+ types::global_vertex_index **vertices_send_buffers;
+ vertices_send_buffers = new types::global_vertex_index* [vertices_to_send.size()];
+ MPI_Request *first_requests;
+ first_requests = new MPI_Request[vertices_to_send.size()];
+ typename std::map<types::subdomain_id,
+ std::vector<std::tuple<types::global_vertex_index,
+ types::global_vertex_index,std::string> > >::iterator
+ vert_to_send_it = vertices_to_send.begin(),
+ vert_to_send_end = vertices_to_send.end();
+ for (unsigned int i=0; vert_to_send_it!=vert_to_send_end;
+ ++vert_to_send_it, ++i)
+ {
+ int destination = vert_to_send_it->first;
+ const unsigned int n_vertices = vert_to_send_it->second.size();
+ const int buffer_size = 2*n_vertices;
+ vertices_send_buffers[i] = new types::global_dof_index [buffer_size];
+
+ // fill the buffer
+ for (unsigned int j=0; j<n_vertices; ++j)
+ {
+ vertices_send_buffers[i][2*j] = std::get<0>(vert_to_send_it->second[j]);
+ vertices_send_buffers[i][2*j+1] =
+ local_to_global_vertex_index[std::get<1>(vert_to_send_it->second[j])];
+ }
+
+ // Send the message
+ MPI_Isend(vertices_send_buffers[i],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
+ destination, 0, triangulation.get_communicator(), &first_requests[i]);
+ }
+
+ // Receive the first message
+ types::global_vertex_index **vertices_recv_buffers;
+ vertices_recv_buffers = new types::global_vertex_index* [vertices_to_recv.size()];
+ typename std::map<types::subdomain_id,std::set<unsigned int> >::iterator
+ vert_to_recv_it = vertices_to_recv.begin(),
+ vert_to_recv_end = vertices_to_recv.end();
+ for (unsigned int i=0; vert_to_recv_it!=vert_to_recv_end; ++vert_to_recv_it, ++i)
+ {
+ int source = vert_to_recv_it->first;
+ const unsigned int n_vertices = vert_to_recv_it->second.size();
+ const int buffer_size = 2*n_vertices;
+ vertices_recv_buffers[i] = new types::global_dof_index[buffer_size];
+
+ // Receive the message
+ MPI_Recv(vertices_recv_buffers[i],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
+ source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+ }
+
+
+ // Send second message
+ char **cellids_send_buffers = new char *[vertices_to_send.size()];
+ MPI_Request *second_requests;
+ second_requests = new MPI_Request[vertices_to_send.size()];
+ vert_to_send_it = vertices_to_send.begin();
+ for (unsigned int i=0; vert_to_send_it!=vert_to_send_end;
+ ++vert_to_send_it, ++i)
+ {
+ int destination = vert_to_send_it->first;
+ const unsigned int n_vertices = vert_to_send_it->second.size();
+ const int buffer_size = max_cellid_size*n_vertices;
+ cellids_send_buffers[i] = new char [buffer_size];
+
+ // fill the buffer
+ unsigned int pos = 0;
+ for (unsigned int j=0; j<n_vertices; ++j)
+ {
+ std::string cell_id = std::get<2>(vert_to_send_it->second[j]);
+ for (unsigned int k=0; k<max_cellid_size; ++k, ++pos)
+ {
+ if (k<cell_id.size())
+ cellids_send_buffers[i][pos] = cell_id[k];
+ // if necessary fill up the reserved part of the buffer with an
+ // invalid value
+ else
+ cellids_send_buffers[i][pos] = '-';
+ }
+ }
+
+ // Send the message
+ MPI_Isend(cellids_send_buffers[i],buffer_size,MPI_CHAR,
+ destination, 0, triangulation.get_communicator(), &second_requests[i]);
+ }
+
+ // Receive the second message
+ char **cellids_recv_buffers = new char *[vertices_to_recv.size()];
+ vert_to_recv_it = vertices_to_recv.begin();
+ for (unsigned int i=0; vert_to_recv_it!=vert_to_recv_end; ++vert_to_recv_it, ++i)
+ {
+ int source = vert_to_recv_it->first;
+ const unsigned int n_vertices = vert_to_recv_it->second.size();
+ const int buffer_size = max_cellid_size*n_vertices;
+ cellids_recv_buffers[i] = new char[buffer_size];
+
+ // Receive the message
+ MPI_Recv(cellids_recv_buffers[i],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE,
+ source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE);
+ }
+
+
+ // Match the data received with the required vertices
+ vert_to_recv_it = vertices_to_recv.begin();
+ for (unsigned int i=0; vert_to_recv_it!=vert_to_recv_end; ++i, ++vert_to_recv_it)
+ {
+ for (unsigned int j=0; j<vert_to_recv_it->second.size(); ++j)
+ {
+ const unsigned int local_pos_recv = vertices_recv_buffers[i][2*j];
+ const types::global_vertex_index global_id_recv = vertices_recv_buffers[i][2*j+1];
+ const std::string cellid_recv(&cellids_recv_buffers[i][max_cellid_size*j],
+ &cellids_recv_buffers[i][max_cellid_size*(j+1)]);
+ bool found = false;
+ typename std::set<active_cell_iterator>::iterator
+ cell_set_it = missing_vert_cells.begin(),
+ end_cell_set = missing_vert_cells.end();
+ for (; (found==false) && (cell_set_it!=end_cell_set); ++cell_set_it)
+ {
+ typename std::set<active_cell_iterator>::iterator
+ candidate_cell = vertex_to_cell[(*cell_set_it)->vertex_index(i)].begin(),
+ end_cell = vertex_to_cell[(*cell_set_it)->vertex_index(i)].end();
+ for (; candidate_cell!=end_cell; ++candidate_cell)
+ {
+ std::string current_cellid = (*candidate_cell)->id().to_string();
+ current_cellid.resize(max_cellid_size,'-');
+ if (current_cellid.compare(cellid_recv)==0)
+ {
+ local_to_global_vertex_index[(*candidate_cell)->vertex_index(local_pos_recv)] =
+ global_id_recv;
+ found = true;
+
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Free the buffers used to send the messages
+ for (unsigned int i=0; i<vertices_to_send.size(); ++i)
+ {
+ delete [] vertices_send_buffers[i];
+ delete [] cellids_send_buffers[i];
+ }
+ delete [] vertices_send_buffers;
+ vertices_send_buffers = NULL;
+ delete [] first_requests;
+ first_requests = NULL;
+ delete [] cellids_send_buffers;
+ cellids_send_buffers = NULL;
+ delete [] second_requests;
+ second_requests = NULL;
+
+
+ // Free the buffers used to receive the messages
+ for (unsigned int i=0; i<vertices_to_recv.size(); ++i)
+ {
+ delete [] vertices_recv_buffers[i];
+ delete [] cellids_recv_buffers[i];
+ }
+ delete [] vertices_recv_buffers;
+ delete [] cellids_recv_buffers;
+ vertices_recv_buffers = NULL;
+ cellids_recv_buffers = NULL;
+
+
+ return local_to_global_vertex_index;
+ }
+
+
+
template <int dim, int spacedim>
void
get_face_connectivity_of_cells (const Triangulation<dim,spacedim> &triangulation,
--- /dev/null
+//
+// Copyright (C) 2015 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/distributed/tria.h>
+#include <deal.II/grid/grid_generator.h>
+#include <deal.II/grid/grid_tools.h>
+#include <deal.II/grid/grid_out.h>
+#include <deal.II/grid/cell_id.h>
+
+#include <map>
+#include <vector>
+
+
+void test()
+{
+ const MPI_Comm mpi_comm = MPI_COMM_WORLD;
+
+ parallel::distributed::Triangulation<2> tria(mpi_comm);
+ GridGenerator::hyper_cube(tria);
+ tria.refine_global(2);
+ unsigned int rank = Utilities::MPI::this_mpi_process(mpi_comm);
+ if (rank==0)
+ {
+ typename Triangulation<2>::active_cell_iterator cell = tria.begin_active(),
+ end_cell = tria.end();
+ for (; cell!=end_cell; ++cell)
+ if (cell->is_locally_owned())
+ cell->set_refine_flag();
+ }
+
+ tria.execute_coarsening_and_refinement();
+
+
+ std::map<unsigned int, types::global_dof_index> local_to_global_id =
+ GridTools::compute_local_to_global_vertex_index_map(tria);
+
+ std::vector<types::global_vertex_index> vertex_global_index;
+ typename Triangulation<2>::active_cell_iterator cell = tria.begin_active(),
+ endc = tria.end();
+ for (; cell!=endc; ++cell)
+ {
+ if (cell->is_locally_owned())
+ {
+ for (unsigned int i=0; i<GeometryInfo<2>::lines_per_cell; ++i)
+ {
+ vertex_global_index.push_back(local_to_global_id[cell->line(i)->vertex_index(0)]);
+ vertex_global_index.push_back(local_to_global_id[cell->line(i)->vertex_index(1)]);
+ if (cell->line(i)->has_children())
+ vertex_global_index.push_back(local_to_global_id[cell->line(i)->child(0)->vertex_index(1)]);
+ }
+ }
+ }
+ std::sort(vertex_global_index.begin(),vertex_global_index.end());
+ std::vector<types::global_dof_index>::iterator it;
+ it = std::unique(vertex_global_index.begin(),vertex_global_index.end());
+ vertex_global_index.resize(it-vertex_global_index.begin());
+
+ if (rank==0)
+ {
+ std::vector<types::global_dof_index> reference;
+ for (unsigned int i=0; i<31; ++i)
+ reference.push_back(i);
+ for (unsigned int i=0; i<vertex_global_index.size(); ++i)
+ AssertThrow(reference[i]==vertex_global_index[i],ExcMessage("Wrong global index"));
+ }
+ if (rank==1)
+ {
+ std::vector<types::global_dof_index> reference;
+ reference.push_back(14);
+ reference.push_back(18);
+ reference.push_back(19);
+ reference.push_back(20);
+ reference.push_back(22);
+ reference.push_back(23);
+ reference.push_back(24);
+ for (unsigned int i=27; i<55; ++i)
+ reference.push_back(i);
+ for (unsigned int i=0; i<vertex_global_index.size(); ++i)
+ AssertThrow(reference[i]==vertex_global_index[i],ExcMessage("Wrong global index"));
+ }
+}
+
+
+int main (int argc, char *argv[])
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization (argc, argv, 1);
+ MPILogInitAll log;
+ deallog.depth_console(0);
+
+ test();
+
+ deallog << "OK" << std::endl;
+
+ return 0;
+}