From: Bruno Turcksin Date: Mon, 5 Oct 2015 17:47:26 +0000 (-0500) Subject: Add a function in GridTools that computes a global index for the vertices. X-Git-Tag: v8.4.0-rc2~326^2~1 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=87213177e0f6e34b9a99eaa4768715ccd11e0f3b;p=dealii.git Add a function in GridTools that computes a global index for the vertices. --- diff --git a/include/deal.II/base/types.h b/include/deal.II/base/types.h index a23bbe87ff..410360b1cf 100644 --- a/include/deal.II/base/types.h +++ b/include/deal.II/base/types.h @@ -41,6 +41,12 @@ namespace types */ typedef unsigned int subdomain_id; + /** + * The type used for global indices of vertices. + */ + typedef unsigned long long int global_vertex_index; +# define DEAL_II_VERTEX_INDEX_MPI_TYPE MPI_UNSIGNED_LONG_LONG + #ifdef DEAL_II_WITH_64BIT_INDICES /** * The type used for global indices of degrees of freedom. While in diff --git a/include/deal.II/grid/grid_tools.h b/include/deal.II/grid/grid_tools.h index 38ec4d33cd..bc675fb322 100644 --- a/include/deal.II/grid/grid_tools.h +++ b/include/deal.II/grid/grid_tools.h @@ -643,6 +643,17 @@ namespace GridTools std::vector::active_cell_iterator> > vertex_to_cell_map(const Triangulation &triangulation); + /** + * Compute a global unique index for each vertex and hanging node associated + * to a locally owned active cell. The key of the map is the local index of + * the vertex and the value is the global index. The indices need to be + * recomputed after refinement or coarsening and may be different. + */ + template + std::map + compute_local_to_global_vertex_index_map( + const parallel::distributed::Triangulation &triangulation); + /*@}*/ /** diff --git a/source/grid/grid_tools.cc b/source/grid/grid_tools.cc index 71f0dadbeb..1842b58f5e 100644 --- a/source/grid/grid_tools.cc +++ b/source/grid/grid_tools.cc @@ -1638,6 +1638,304 @@ next_cell: + template + std::map + compute_local_to_global_vertex_index_map( + const parallel::distributed::Triangulation &triangulation) + { + std::map local_to_global_vertex_index; + + typedef typename Triangulation::active_cell_iterator active_cell_iterator; + std::vector > vertex_to_cell = vertex_to_cell_map(triangulation); + + // Create a local index for the locally "owned" vertices + types::global_vertex_index next_index = 0; + unsigned int max_cellid_size = 0; + std::set > vertices_added; + std::map > vertices_to_recv; + std::map > > vertices_to_send; + active_cell_iterator cell = triangulation.begin_active(), + endc = triangulation.end(); + std::set missing_vert_cells; + std::set used_vertex_index; + for (; cell!=endc; ++cell) + { + if (cell->is_locally_owned()) + { + for (unsigned int i=0; i::vertices_per_cell; ++i) + { + types::subdomain_id lowest_subdomain_id = cell->subdomain_id(); + typename std::set::iterator + adjacent_cell = vertex_to_cell[cell->vertex_index(i)].begin(), + end_adj_cell = vertex_to_cell[cell->vertex_index(i)].end(); + bool can_be_recv = false; + for (; adjacent_cell!=end_adj_cell; ++adjacent_cell) + lowest_subdomain_id = std::min(lowest_subdomain_id, + (*adjacent_cell)->subdomain_id()); + + // See if I "own" this vertex + if (lowest_subdomain_id==cell->subdomain_id()) + { + // Check that the vertex we are working on a vertex that has not be + // dealt with yet + if (used_vertex_index.count(cell->vertex_index(i))==0) + { + // Set the local index + local_to_global_vertex_index[cell->vertex_index(i)] = next_index++; + + // Store the informations that will be send to the adjacent cells + // on other subdomains + adjacent_cell = vertex_to_cell[cell->vertex_index(i)].begin(); + std::vector subdomain_ids; + for (; adjacent_cell!=end_adj_cell; ++adjacent_cell) + if ((*adjacent_cell)->subdomain_id()!=cell->subdomain_id()) + { + std::tuple + tmp((*adjacent_cell)->subdomain_id(), cell->vertex_index(i)); + if (vertices_added.count(tmp)==0) + { + vertices_to_send[(*adjacent_cell)->subdomain_id()].push_back( + std::tuple (i,cell->vertex_index(i), + cell->id().to_string())); + if (cell->id().to_string().size() > max_cellid_size) + max_cellid_size = cell->id().to_string().size(); + vertices_added.insert(tmp); + } + } + used_vertex_index.insert(cell->vertex_index(i)); + } + } + else + { + // We don't own the vertex so we will receive its global index + vertices_to_recv[lowest_subdomain_id].insert(cell->vertex_index(i)); + missing_vert_cells.insert(cell); + } + } + } + + // Some hanging nodes are vertices of ghost cells. They need to be + // received. + if (cell->is_ghost()) + { + for (unsigned int i=0; i::faces_per_cell; ++i) + { + if (cell->at_boundary(i)==false) + { + if (cell->neighbor(i)->active()) + { + typename Triangulation::active_cell_iterator adjacent_cell = + cell->neighbor(i); + if ((adjacent_cell->is_locally_owned())) + { + types::subdomain_id adj_subdomain_id = adjacent_cell->subdomain_id(); + if (cell->subdomain_id()::vertices_per_face; ++j) + { + vertices_to_recv[cell->subdomain_id()].insert(cell->face(i)->vertex_index(j)); + missing_vert_cells.insert(cell); + } + } + } + } + } + } + } + // Get the size of the largest CellID string + MPI_Allreduce(MPI_IN_PLACE, &max_cellid_size, 1, MPI_UNSIGNED, MPI_MAX, + triangulation.get_communicator()); + + + // Make indices global by getting the number of vertices owned by each + // processors and shifting the indices accordingly + const unsigned int n_cpu = Utilities::MPI::n_mpi_processes(triangulation.get_communicator()); + std::vector indices(n_cpu); + MPI_Allgather(&next_index, 1, MPI_UNSIGNED_LONG_LONG, &indices[0], + indices.size(), MPI_UNSIGNED_LONG_LONG, triangulation.get_communicator()); + const types::global_vertex_index shift = std::accumulate(&indices[0], + &indices[0]+triangulation.locally_owned_subdomain(),0); + + std::map::iterator + global_index_it = local_to_global_vertex_index.begin(), + global_index_end = local_to_global_vertex_index.end(); + for (; global_index_it!=global_index_end; ++global_index_it) + global_index_it->second += shift; + + // In a first message, send the global ID of the vertices and the local + // positions in the cells. In a second messages, send the cell ID as a + // resize string. This is done in two messages so that types are not mixed + + // Send the first message + types::global_vertex_index **vertices_send_buffers; + vertices_send_buffers = new types::global_vertex_index* [vertices_to_send.size()]; + MPI_Request *first_requests; + first_requests = new MPI_Request[vertices_to_send.size()]; + typename std::map > >::iterator + vert_to_send_it = vertices_to_send.begin(), + vert_to_send_end = vertices_to_send.end(); + for (unsigned int i=0; vert_to_send_it!=vert_to_send_end; + ++vert_to_send_it, ++i) + { + int destination = vert_to_send_it->first; + const unsigned int n_vertices = vert_to_send_it->second.size(); + const int buffer_size = 2*n_vertices; + vertices_send_buffers[i] = new types::global_dof_index [buffer_size]; + + // fill the buffer + for (unsigned int j=0; j(vert_to_send_it->second[j]); + vertices_send_buffers[i][2*j+1] = + local_to_global_vertex_index[std::get<1>(vert_to_send_it->second[j])]; + } + + // Send the message + MPI_Isend(vertices_send_buffers[i],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE, + destination, 0, triangulation.get_communicator(), &first_requests[i]); + } + + // Receive the first message + types::global_vertex_index **vertices_recv_buffers; + vertices_recv_buffers = new types::global_vertex_index* [vertices_to_recv.size()]; + typename std::map >::iterator + vert_to_recv_it = vertices_to_recv.begin(), + vert_to_recv_end = vertices_to_recv.end(); + for (unsigned int i=0; vert_to_recv_it!=vert_to_recv_end; ++vert_to_recv_it, ++i) + { + int source = vert_to_recv_it->first; + const unsigned int n_vertices = vert_to_recv_it->second.size(); + const int buffer_size = 2*n_vertices; + vertices_recv_buffers[i] = new types::global_dof_index[buffer_size]; + + // Receive the message + MPI_Recv(vertices_recv_buffers[i],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE, + source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE); + } + + + // Send second message + char **cellids_send_buffers = new char *[vertices_to_send.size()]; + MPI_Request *second_requests; + second_requests = new MPI_Request[vertices_to_send.size()]; + vert_to_send_it = vertices_to_send.begin(); + for (unsigned int i=0; vert_to_send_it!=vert_to_send_end; + ++vert_to_send_it, ++i) + { + int destination = vert_to_send_it->first; + const unsigned int n_vertices = vert_to_send_it->second.size(); + const int buffer_size = max_cellid_size*n_vertices; + cellids_send_buffers[i] = new char [buffer_size]; + + // fill the buffer + unsigned int pos = 0; + for (unsigned int j=0; j(vert_to_send_it->second[j]); + for (unsigned int k=0; kfirst; + const unsigned int n_vertices = vert_to_recv_it->second.size(); + const int buffer_size = max_cellid_size*n_vertices; + cellids_recv_buffers[i] = new char[buffer_size]; + + // Receive the message + MPI_Recv(cellids_recv_buffers[i],buffer_size,DEAL_II_VERTEX_INDEX_MPI_TYPE, + source, 0, triangulation.get_communicator(), MPI_STATUS_IGNORE); + } + + + // Match the data received with the required vertices + vert_to_recv_it = vertices_to_recv.begin(); + for (unsigned int i=0; vert_to_recv_it!=vert_to_recv_end; ++i, ++vert_to_recv_it) + { + for (unsigned int j=0; jsecond.size(); ++j) + { + const unsigned int local_pos_recv = vertices_recv_buffers[i][2*j]; + const types::global_vertex_index global_id_recv = vertices_recv_buffers[i][2*j+1]; + const std::string cellid_recv(&cellids_recv_buffers[i][max_cellid_size*j], + &cellids_recv_buffers[i][max_cellid_size*(j+1)]); + bool found = false; + typename std::set::iterator + cell_set_it = missing_vert_cells.begin(), + end_cell_set = missing_vert_cells.end(); + for (; (found==false) && (cell_set_it!=end_cell_set); ++cell_set_it) + { + typename std::set::iterator + candidate_cell = vertex_to_cell[(*cell_set_it)->vertex_index(i)].begin(), + end_cell = vertex_to_cell[(*cell_set_it)->vertex_index(i)].end(); + for (; candidate_cell!=end_cell; ++candidate_cell) + { + std::string current_cellid = (*candidate_cell)->id().to_string(); + current_cellid.resize(max_cellid_size,'-'); + if (current_cellid.compare(cellid_recv)==0) + { + local_to_global_vertex_index[(*candidate_cell)->vertex_index(local_pos_recv)] = + global_id_recv; + found = true; + + break; + } + } + } + } + } + + // Free the buffers used to send the messages + for (unsigned int i=0; i void get_face_connectivity_of_cells (const Triangulation &triangulation, diff --git a/source/grid/grid_tools.inst.in b/source/grid/grid_tools.inst.in index 5e63a31814..1f7ce2649a 100644 --- a/source/grid/grid_tools.inst.in +++ b/source/grid/grid_tools.inst.in @@ -103,6 +103,10 @@ for (deal_II_dimension : DIMENSIONS ; deal_II_space_dimension : SPACE_DIMENSIONS std::vector >::type> compute_ghost_cell_halo_layer (const parallel::distributed::Triangulation &); + template + std::map + compute_local_to_global_vertex_index_map(const parallel::distributed::Triangulation &triangulation); + template std::list::cell_iterator, parallel::distributed::Triangulation::cell_iterator> > get_finest_common_cells (const parallel::distributed::Triangulation &mesh_1, diff --git a/tests/grid/grid_tools_local_to_global_vertex_id.cc b/tests/grid/grid_tools_local_to_global_vertex_id.cc new file mode 100644 index 0000000000..22b52166c1 --- /dev/null +++ b/tests/grid/grid_tools_local_to_global_vertex_id.cc @@ -0,0 +1,110 @@ +// +// Copyright (C) 2015 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + + + +#include "../tests.h" +#include +#include +#include +#include +#include +#include + +#include +#include + + +void test() +{ + const MPI_Comm mpi_comm = MPI_COMM_WORLD; + + parallel::distributed::Triangulation<2> tria(mpi_comm); + GridGenerator::hyper_cube(tria); + tria.refine_global(2); + unsigned int rank = Utilities::MPI::this_mpi_process(mpi_comm); + if (rank==0) + { + typename Triangulation<2>::active_cell_iterator cell = tria.begin_active(), + end_cell = tria.end(); + for (; cell!=end_cell; ++cell) + if (cell->is_locally_owned()) + cell->set_refine_flag(); + } + + tria.execute_coarsening_and_refinement(); + + + std::map local_to_global_id = + GridTools::compute_local_to_global_vertex_index_map(tria); + + std::vector vertex_global_index; + typename Triangulation<2>::active_cell_iterator cell = tria.begin_active(), + endc = tria.end(); + for (; cell!=endc; ++cell) + { + if (cell->is_locally_owned()) + { + for (unsigned int i=0; i::lines_per_cell; ++i) + { + vertex_global_index.push_back(local_to_global_id[cell->line(i)->vertex_index(0)]); + vertex_global_index.push_back(local_to_global_id[cell->line(i)->vertex_index(1)]); + if (cell->line(i)->has_children()) + vertex_global_index.push_back(local_to_global_id[cell->line(i)->child(0)->vertex_index(1)]); + } + } + } + std::sort(vertex_global_index.begin(),vertex_global_index.end()); + std::vector::iterator it; + it = std::unique(vertex_global_index.begin(),vertex_global_index.end()); + vertex_global_index.resize(it-vertex_global_index.begin()); + + if (rank==0) + { + std::vector reference; + for (unsigned int i=0; i<31; ++i) + reference.push_back(i); + for (unsigned int i=0; i reference; + reference.push_back(14); + reference.push_back(18); + reference.push_back(19); + reference.push_back(20); + reference.push_back(22); + reference.push_back(23); + reference.push_back(24); + for (unsigned int i=27; i<55; ++i) + reference.push_back(i); + for (unsigned int i=0; i