const int mpi_tag_reply =
Utilities::MPI::internal::Tags::exchange_cell_data_reply;
- // send our requests:
+ // send our requests
std::vector<MPI_Request> requests(ghost_owners.size());
{
unsigned int idx = 0;
}
}
- using DestinationToBufferMap =
- std::map<dealii::types::subdomain_id,
- GridTools::CellDataTransferBuffer<dim, DataType>>;
- DestinationToBufferMap destination_to_data_buffer_map;
-
- // receive requests and reply with the ghost indices
- std::vector<std::vector<typename CellId::binary_type>> cell_data_to_send(
- ghost_owners.size());
- std::vector<std::vector<types::global_dof_index>>
- send_dof_numbers_and_indices(ghost_owners.size());
+ // receive requests and reply with the results
std::vector<MPI_Request> reply_requests(ghost_owners.size());
std::vector<std::vector<char>> sendbuffers(ghost_owners.size());
int len;
ierr = MPI_Get_count(&status, MPI_BYTE, &len);
AssertThrowMPI(ierr);
- Assert(len % sizeof(cell_data_to_send[idx][0]) == 0,
+ Assert(len % sizeof(typename CellId::binary_type) == 0,
ExcInternalError());
const unsigned int n_cells =
len / sizeof(typename CellId::binary_type);
- cell_data_to_send[idx].resize(n_cells);
+ std::vector<typename CellId::binary_type> cells_with_requests(
+ n_cells);
+ std::vector<DataType> data_to_send;
+ data_to_send.reserve(n_cells);
+ std::vector<bool> cell_carries_data(n_cells, false);
- ierr = MPI_Recv(cell_data_to_send[idx].data(),
+ ierr = MPI_Recv(cells_with_requests.data(),
len,
MPI_BYTE,
status.MPI_SOURCE,
for (unsigned int c = 0; c < static_cast<unsigned int>(n_cells); ++c)
{
const auto cell =
- tria->create_cell_iterator(CellId(cell_data_to_send[idx][c]));
+ tria->create_cell_iterator(CellId(cells_with_requests[c]));
- MeshCellIteratorType mesh_it(tria,
+ MeshCellIteratorType mesh_it(tria,
cell->level(),
cell->index(),
&mesh);
- const std_cxx17::optional<DataType> data = pack(mesh_it);
+ const std_cxx17::optional<DataType> data = pack(mesh_it);
if (data)
{
- typename DestinationToBufferMap::iterator p =
- destination_to_data_buffer_map
- .insert(std::make_pair(
- idx,
- GridTools::CellDataTransferBuffer<dim, DataType>()))
- .first;
-
- p->second.cell_ids.emplace_back(cell->id());
- p->second.data.emplace_back(*data);
+ data_to_send.emplace_back(*data);
+ cell_carries_data[c] = true;
}
}
- // send reply
- GridTools::CellDataTransferBuffer<dim, DataType> &data =
- destination_to_data_buffer_map[idx];
-
- sendbuffers[idx] =
- Utilities::pack(data, /*enable_compression*/ false);
+ // collect data for sending the reply in a buffer
+
+ // (a) make room for storing the local offsets in case we receive
+ // other data
+ sendbuffers[idx].resize(sizeof(std::size_t));
+
+ // (b) append the actual data and store how much memory it
+ // corresponds to, which we then insert into the leading position of
+ // the sendbuffer
+ std::size_t size_of_send =
+ Utilities::pack(data_to_send,
+ sendbuffers[idx],
+ /*enable_compression*/ false);
+ std::memcpy(sendbuffers[idx].data(),
+ &size_of_send,
+ sizeof(std::size_t));
+
+ // (c) append information of certain cells that got left out in case
+ // we need it
+ if (data_to_send.size() < n_cells)
+ Utilities::pack(cell_carries_data,
+ sendbuffers[idx],
+ /*enable_compression*/ false);
+
+ // send data
ierr = MPI_Isend(sendbuffers[idx].data(),
sendbuffers[idx].size(),
MPI_BYTE,
// finally receive the replies
std::vector<char> receive;
- for (unsigned int idx = 0; idx < ghost_owners.size(); ++idx)
+ for (unsigned int id = 0; id < neighbor_cell_list.size(); ++id)
{
MPI_Status status;
int ierr = MPI_Probe(MPI_ANY_SOURCE,
receive.resize(len);
- char *ptr = receive.data();
- ierr = MPI_Recv(ptr,
+ ierr = MPI_Recv(receive.data(),
len,
MPI_BYTE,
status.MPI_SOURCE,
&status);
AssertThrowMPI(ierr);
- auto cellinfo =
- Utilities::unpack<CellDataTransferBuffer<dim, DataType>>(
- receive, /*enable_compression*/ false);
-
- DataType *data = cellinfo.data.data();
- for (unsigned int c = 0; c < cellinfo.cell_ids.size(); ++c, ++data)
+ // (a) first determine the length of the data section in the
+ // received buffer
+ auto data_iterator = receive.begin();
+ std::size_t size_of_received_data =
+ Utilities::unpack<std::size_t>(data_iterator,
+ data_iterator + sizeof(std::size_t));
+ data_iterator += sizeof(std::size_t);
+
+ // (b) unpack the data section in the indicated region
+ auto received_data = Utilities::unpack<std::vector<DataType>>(
+ data_iterator,
+ data_iterator + size_of_received_data,
+ /*enable_compression*/ false);
+ data_iterator += size_of_received_data;
+
+ // (c) check if the received data contained fewer entries than the
+ // number of cells we identified in the beginning, in which case we
+ // need to extract the boolean vector with the relevant information
+ const std::vector<typename CellId::binary_type> &this_cell_list =
+ neighbor_cell_list[status.MPI_SOURCE];
+ AssertIndexRange(received_data.size(), this_cell_list.size() + 1);
+ std::vector<bool> cells_with_data;
+ if (received_data.size() < this_cell_list.size())
{
- const typename Triangulation<dim, spacedim>::cell_iterator
- tria_cell = tria->create_cell_iterator(cellinfo.cell_ids[c]);
+ cells_with_data = Utilities::unpack<std::vector<bool>>(
+ data_iterator, receive.end(), /*enable_compression*/ false);
+ AssertDimension(cells_with_data.size(), this_cell_list.size());
+ }
- MeshCellIteratorType cell(tria,
- tria_cell->level(),
- tria_cell->index(),
- &mesh);
+ // (d) go through the received data and call the user-provided
+ // unpack function
+ auto received_data_iterator = received_data.begin();
+ for (unsigned int c = 0; c < this_cell_list.size(); ++c)
+ if (cells_with_data.empty() || cells_with_data[c])
+ {
+ const typename Triangulation<dim, spacedim>::cell_iterator
+ tria_cell = tria->create_cell_iterator(this_cell_list[c]);
- unpack(cell, *data);
- }
+ MeshCellIteratorType cell(tria,
+ tria_cell->level(),
+ tria_cell->index(),
+ &mesh);
+
+ unpack(cell, *received_data_iterator);
+ ++received_data_iterator;
+ }
}
// make sure that all communication is finished