From: Marc Fehling Date: Fri, 6 Jul 2018 21:11:33 +0000 (-0600) Subject: Support for transfer of variable size data. X-Git-Tag: v9.1.0-rc1~890^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=refs%2Fpull%2F6920%2Fhead;p=dealii.git Support for transfer of variable size data. --- diff --git a/doc/news/changes/incompatibilities/20180707Fehling b/doc/news/changes/incompatibilities/20180720Fehling similarity index 65% rename from doc/news/changes/incompatibilities/20180707Fehling rename to doc/news/changes/incompatibilities/20180720Fehling index 3cce8be13a..98d1ebd976 100644 --- a/doc/news/changes/incompatibilities/20180707Fehling +++ b/doc/news/changes/incompatibilities/20180720Fehling @@ -8,4 +8,9 @@ const boost::iterator_range::const_iterator &>`, where the last argument describes an iterator range, from which the callback function is allowed to read.
-(Marc Fehling, 2018/07/07) +Further, the `register_data_attach()` function now requires a boolean +argument `returns_variable_size_data`, which denotes if the registered +pack_callback function interacts with the fixed size (`=false`) or +variable size (`=true`) buffer for data transfer. +
+(Marc Fehling, 2018/07/20) diff --git a/include/deal.II/base/quadrature_point_data.h b/include/deal.II/base/quadrature_point_data.h index f30fbc79b0..71ae9104f7 100644 --- a/include/deal.II/base/quadrature_point_data.h +++ b/include/deal.II/base/quadrature_point_data.h @@ -794,11 +794,13 @@ namespace parallel matrix_dofs_child.reinit(dofs_per_cell, number_of_values); matrix_quadrature.reinit(n_q_points, number_of_values); - handle = triangulation->register_data_attach(std::bind( - &ContinuousQuadratureDataTransfer::pack_function, - this, - std::placeholders::_1, - std::placeholders::_2)); + handle = triangulation->register_data_attach( + std::bind( + &ContinuousQuadratureDataTransfer::pack_function, + this, + std::placeholders::_1, + std::placeholders::_2), + /*returns_variable_size_data=*/false); } diff --git a/include/deal.II/distributed/tria.h b/include/deal.II/distributed/tria.h index 8c0d8a3850..b093783fde 100644 --- a/include/deal.II/distributed/tria.h +++ b/include/deal.II/distributed/tria.h @@ -721,6 +721,11 @@ namespace parallel * format `std::vector`, representing the packed data on a * certain cell. * + * The second parameter @p returns_variable_size_data indicates whether + * the returned size of the memory region from the callback function + * varies by cell (=true) or stays constant on each one + * throughout the whole domain (=false). + * * @note The purpose of this function is to register intent to * attach data for a single, subsequent call to * execute_coarsening_and_refinement() and notify_ready_to_unpack(), @@ -733,8 +738,8 @@ namespace parallel unsigned int register_data_attach( const std::function(const cell_iterator &, - const CellStatus)> - &pack_callback); + const CellStatus)> &pack_callback, + const bool returns_variable_size_data); /** * This function is the opposite of register_data_attach(). It is called @@ -896,10 +901,11 @@ namespace parallel CellStatus)>; /** - * These callback functions will be stored in the order on how they have - * been registered with the register_data_attach() function. + * These callback functions will be stored in the order in which they + * have been registered with the register_data_attach() function. */ - std::vector pack_callbacks; + std::vector pack_callbacks_fixed; + std::vector pack_callbacks_variable; }; CellAttachedData cell_attached_data; @@ -952,24 +958,30 @@ namespace parallel DataTransfer(MPI_Comm mpi_communicator); /** - * Prepare any data transfer by calling the pack callback function of - * each entry of @p pack_callback_sets on each cell registered in - * @p quad_cell_relations. + * Prepare data transfer by calling the pack callback functions on each + * cell + * in @p quad_cell_relations. + * + * All registered callback functions in @p pack_callbacks_fixed will write + * into the fixed size buffer, whereas each entry of @p pack_callbacks_variable + * will write its data into the variable size buffer. */ void pack_data( const std::vector &quad_cell_relations, const std::vector - &pack_callbacks); + &pack_callbacks_fixed, + const std::vector + &pack_callbacks_variable); /** * Transfer data across forests. * - * Besides the actual @parallel_forest, which has been already refined + * Besides the actual @p parallel_forest, which has been already refined * and repartitioned, this function also needs information about its * previous state, i.e. the locally owned intervals in p4est's - * sc_array of each processor. These information need to be memcopyied - * out of the old p4est object and provided via the parameter + * sc_array of each processor. This information needs to be memcopyied + * out of the old p4est object and has to be provided via the parameter * @p previous_global_first_quadrant. * * Data has to be previously packed with pack_data(). @@ -1066,6 +1078,11 @@ namespace parallel private: MPI_Comm mpi_communicator; + /** + * Flag that denotes if variable size data has been packed. + */ + bool variable_size_data_stored; + /** * Cumulative size in bytes that those functions that have called * register_data_attach() want to attach to each cell. This number @@ -1074,9 +1091,9 @@ namespace parallel * * The last entry of this container corresponds to the data size * packed per cell in the fixed size buffer (which can be accessed - * calling cumulative_sizes_fixed.back()). + * calling sizes_fixed_cumulative.back()). */ - std::vector cumulative_sizes_fixed; + std::vector sizes_fixed_cumulative; /** * Consecutive buffers designed for the fixed size transfer @@ -1085,10 +1102,14 @@ namespace parallel std::vector src_data_fixed; std::vector dest_data_fixed; - // TODO: buffers for p4est variable size transfer - // std::vector> cumulative_sizes_var_per_cell; - // std::vector src_sizes_var, dest_sizes_var; - // std::vector src_data_var, dest_data_var; + /** + * Consecutive buffers designed for the variable size transfer + * functions of p4est. + */ + std::vector src_sizes_variable; + std::vector dest_sizes_variable; + std::vector src_data_variable; + std::vector dest_data_variable; }; DataTransfer data_transfer; @@ -1294,7 +1315,8 @@ namespace parallel const std::function( const typename dealii::Triangulation<1, spacedim>::cell_iterator &, const typename dealii::Triangulation<1, spacedim>::CellStatus)> - &pack_callback); + & pack_callback, + const bool returns_variable_size_data); /** * This function is not implemented, but needs to be present for the diff --git a/source/distributed/solution_transfer.cc b/source/distributed/solution_transfer.cc index 7341cb6aaf..fc255e8bdb 100644 --- a/source/distributed/solution_transfer.cc +++ b/source/distributed/solution_transfer.cc @@ -85,11 +85,13 @@ namespace parallel *>(&dof_handler->get_triangulation()))); Assert(tria != nullptr, ExcInternalError()); - handle = tria->register_data_attach(std::bind( - &SolutionTransfer::pack_callback, - this, - std::placeholders::_1, - std::placeholders::_2)); + handle = tria->register_data_attach( + std::bind( + &SolutionTransfer::pack_callback, + this, + std::placeholders::_1, + std::placeholders::_2), + /*returns_variable_size_data=*/false); } diff --git a/source/distributed/tria.cc b/source/distributed/tria.cc index 143ad41e40..2609b936de 100644 --- a/source/distributed/tria.cc +++ b/source/distributed/tria.cc @@ -1136,6 +1136,7 @@ namespace parallel Triangulation::DataTransfer::DataTransfer( MPI_Comm mpi_communicator) : mpi_communicator(mpi_communicator) + , variable_size_data_stored(false) {} @@ -1145,10 +1146,27 @@ namespace parallel Triangulation::DataTransfer::pack_data( const std::vector &quad_cell_relations, const std::vector - &pack_callbacks) + &pack_callbacks_fixed, + const std::vector + &pack_callbacks_variable) { Assert(src_data_fixed.size() == 0, ExcMessage("Previously packed data has not been released yet!")); + Assert(src_sizes_variable.size() == 0, ExcInternalError()); + + const unsigned int n_callbacks_fixed = pack_callbacks_fixed.size(); + const unsigned int n_callbacks_variable = pack_callbacks_variable.size(); + + // Store information that we packed variable size data in + // a member variable for later. + variable_size_data_stored = (n_callbacks_variable > 0); + + // If variable transfer is scheduled, we will store the data size that + // each variable size callback function writes in this auxiliary + // container. The information will be stored by each cell in this vector + // temporarily. + std::vector cell_sizes_variable_cumulative( + n_callbacks_variable); // Prepare the buffer structure, in which each callback function will // store its data for each active cell. @@ -1162,16 +1180,22 @@ namespace parallel // || callback_1 || callback_2 |...| || callback_1 || callback_2 |...| ... // |||char|char|...|||char|char|...|...| |||char|char|...|||char|char|...|...| ... /* clang-format on */ - std::vector>> packed_data( + std::vector>> packed_fixed_size_data( quad_cell_relations.size()); + std::vector>> packed_variable_size_data( + variable_size_data_stored ? quad_cell_relations.size() : 0); + // + // --------- Pack data for fixed and variable size transfer --------- + // // Iterate over all cells, call all callback functions on each cell, // and store their data in the corresponding buffer scope. { - auto quad_cell_rel_it = quad_cell_relations.cbegin(); - auto data_cell_it = packed_data.begin(); + auto quad_cell_rel_it = quad_cell_relations.cbegin(); + auto data_cell_fixed_it = packed_fixed_size_data.begin(); + auto data_cell_variable_it = packed_variable_size_data.begin(); for (; quad_cell_rel_it != quad_cell_relations.cend(); - ++quad_cell_rel_it, ++data_cell_it) + ++quad_cell_rel_it, ++data_cell_fixed_it) { const auto &cell_status = std::get<1>(*quad_cell_rel_it); const auto &dealii_cell = std::get<2>(*quad_cell_rel_it); @@ -1213,25 +1237,30 @@ namespace parallel // Reserve memory corresponding to the number of callback // functions that will be called. + // If variable size transfer is scheduled, we need to leave + // room for an array that holds information about how many + // bytes each of the variable size callback functions will + // write. // On cells flagged with CELL_INVALID, only its CellStatus // will be stored. - const unsigned int n_callbacks_on_cell = + const unsigned int n_fixed_size_data_sets_on_cell = 1 + ((cell_status == parallel::distributed::Triangulation::CELL_INVALID) ? 0 : - pack_callbacks.size()); - data_cell_it->resize(n_callbacks_on_cell); + ((variable_size_data_stored ? 1 : 0) + n_callbacks_fixed)); + data_cell_fixed_it->resize(n_fixed_size_data_sets_on_cell); // We continue with packing all data on this specific cell. + auto data_fixed_it = data_cell_fixed_it->begin(); + // First, we pack the CellStatus information. - auto data_it = data_cell_it->begin(); // to get consistent data sizes on each cell for the fixed size // transfer, we won't allow compression - *data_it = + *data_fixed_it = Utilities::pack(cell_status, /*allow_compression=*/false); - ++data_it; + ++data_fixed_it; // Proceed with all registered callback functions. // Skip cells with the CELL_INVALID flag. @@ -1239,16 +1268,81 @@ namespace parallel parallel::distributed::Triangulation::CELL_INVALID) { - for (auto callback_it = pack_callbacks.cbegin(); - callback_it != pack_callbacks.cend(); - ++callback_it, ++data_it) + // Pack fixed size data. + for (auto callback_it = pack_callbacks_fixed.cbegin(); + callback_it != pack_callbacks_fixed.cend(); + ++callback_it, ++data_fixed_it) + { + *data_fixed_it = (*callback_it)(dealii_cell, cell_status); + } + + // Pack variable size data. + // If we store variable size data, we need to transfer + // the sizes of each corresponding callback function + // via fixed size transfer as well. + if (variable_size_data_stored) { - *data_it = (*callback_it)(dealii_cell, cell_status); + const unsigned int n_variable_size_data_sets_on_cell = + ((cell_status == + parallel::distributed::Triangulation:: + CELL_INVALID) ? + 0 : + n_callbacks_variable); + data_cell_variable_it->resize( + n_variable_size_data_sets_on_cell); + + auto callback_it = pack_callbacks_variable.cbegin(); + auto data_variable_it = data_cell_variable_it->begin(); + auto sizes_variable_it = + cell_sizes_variable_cumulative.begin(); + for (; callback_it != pack_callbacks_variable.cend(); + ++callback_it, ++data_variable_it, ++sizes_variable_it) + { + *data_variable_it = + (*callback_it)(dealii_cell, cell_status); + + // Store data sizes for each callback function first. + // Make it cumulative below. + *sizes_variable_it = data_variable_it->size(); + } + + // Turn size vector into its cumulative representation. + std::partial_sum(cell_sizes_variable_cumulative.begin(), + cell_sizes_variable_cumulative.end(), + cell_sizes_variable_cumulative.begin()); + + // Serialize cumulative variable size vector value-by-value. + // This way we can circumvent the overhead of storing the + // container object as a whole, since we know its size by + // the number of registered callback functions. + data_fixed_it->resize(n_callbacks_variable * + sizeof(unsigned int)); + for (unsigned int i = 0; i < n_callbacks_variable; ++i) + std::memcpy(&(data_fixed_it->at(i * + sizeof(unsigned int))), + &(cell_sizes_variable_cumulative.at(i)), + sizeof(unsigned int)); + + ++data_fixed_it; } + + // Double check that we packed everything we wanted + // in the fixed size buffers. + Assert(data_fixed_it == data_cell_fixed_it->end(), + ExcInternalError()); } + + // Increment the variable size data iterator + // only if we actually pack this kind of data + // to avoid getting out of bounds. + if (variable_size_data_stored) + ++data_cell_variable_it; } // loop over quad_cell_relations } + // + // ----------- Gather data sizes for fixed size transfer ------------ + // // Generate a vector which stores the sizes of each callback function, // including the packed CellStatus transfer. // Find the very first cell that we wrote to with all callback @@ -1259,19 +1353,20 @@ namespace parallel // any cell at all, we will exchange the information about the data sizes // among them later. The code inbetween is still well-defined, since the // following loops will be skipped. - std::vector local_sizes_fixed(1 + pack_callbacks.size()); - for (auto data_cell_it = packed_data.cbegin(); - data_cell_it != packed_data.cend(); - ++data_cell_it) + std::vector local_sizes_fixed( + 1 + n_callbacks_fixed + (variable_size_data_stored ? 1 : 0)); + for (auto data_cell_fixed_it = packed_fixed_size_data.cbegin(); + data_cell_fixed_it != packed_fixed_size_data.cend(); + ++data_cell_fixed_it) { - if (data_cell_it->size() == local_sizes_fixed.size()) + if (data_cell_fixed_it->size() == local_sizes_fixed.size()) { auto sizes_fixed_it = local_sizes_fixed.begin(); - auto data_it = data_cell_it->cbegin(); - for (; data_it != data_cell_it->cend(); - ++data_it, ++sizes_fixed_it) + auto data_fixed_it = data_cell_fixed_it->cbegin(); + for (; data_fixed_it != data_cell_fixed_it->cend(); + ++data_fixed_it, ++sizes_fixed_it) { - *sizes_fixed_it = data_it->size(); + *sizes_fixed_it = data_fixed_it->size(); } break; @@ -1279,12 +1374,12 @@ namespace parallel } // Check if all cells have valid sizes. - for (auto data_cell_it = packed_data.cbegin(); - data_cell_it != packed_data.cend(); - ++data_cell_it) + for (auto data_cell_fixed_it = packed_fixed_size_data.cbegin(); + data_cell_fixed_it != packed_fixed_size_data.cend(); + ++data_cell_fixed_it) { - Assert(data_cell_it->size() == 1 || - data_cell_it->size() == local_sizes_fixed.size(), + Assert((data_cell_fixed_it->size() == 1) || + (data_cell_fixed_it->size() == local_sizes_fixed.size()), ExcInternalError()); } @@ -1298,34 +1393,66 @@ namespace parallel // Construct cumulative sizes, since this is the only information // we need from now on. - cumulative_sizes_fixed.resize(global_sizes_fixed.size()); + sizes_fixed_cumulative.resize(global_sizes_fixed.size()); std::partial_sum(global_sizes_fixed.begin(), global_sizes_fixed.end(), - cumulative_sizes_fixed.begin()); - - // Move every piece of packed data into the consecutive buffer. - src_data_fixed.reserve(quad_cell_relations.size() * - cumulative_sizes_fixed.back()); - for (auto data_cell_it = packed_data.begin(); - data_cell_it != packed_data.end(); - ++data_cell_it) + sizes_fixed_cumulative.begin()); + + // + // ---------- Gather data sizes for variable size transfer ---------- + // + if (variable_size_data_stored) + { + src_sizes_variable.reserve(packed_variable_size_data.size()); + for (auto data_cell_variable_it = packed_variable_size_data.cbegin(); + data_cell_variable_it != packed_variable_size_data.cend(); + ++data_cell_variable_it) + { + int variable_data_size_on_cell = 0; + + for (auto data_variable_it = data_cell_variable_it->cbegin(); + data_variable_it != data_cell_variable_it->cend(); + ++data_variable_it) + variable_data_size_on_cell += data_variable_it->size(); + + src_sizes_variable.push_back(variable_data_size_on_cell); + } + } + + // + // ------------------------ Build buffers --------------------------- + // + const unsigned int expected_size_fixed = + quad_cell_relations.size() * sizes_fixed_cumulative.back(); + const unsigned int expected_size_variable = + std::accumulate(src_sizes_variable.begin(), + src_sizes_variable.end(), + std::vector::size_type(0)); + + // Move every piece of packed fixed size data into the consecutive buffer. + src_data_fixed.reserve(expected_size_fixed); + for (auto data_cell_fixed_it = packed_fixed_size_data.begin(); + data_cell_fixed_it != packed_fixed_size_data.end(); + ++data_cell_fixed_it) { // Move every fraction of packed data into the buffer // reserved for this particular cell. - for (auto data_it = data_cell_it->begin(); - data_it != data_cell_it->end(); - ++data_it) - std::move(data_it->begin(), - data_it->end(), + for (auto data_fixed_it = data_cell_fixed_it->begin(); + data_fixed_it != data_cell_fixed_it->end(); + ++data_fixed_it) + std::move(data_fixed_it->begin(), + data_fixed_it->end(), std::back_inserter(src_data_fixed)); // If we only packed the CellStatus information // (i.e. encountered a cell flagged CELL_INVALID), // fill the remaining space with invalid entries. - if (data_cell_it->size() == 1) + // We can skip this if there is nothing else to pack. + if ((data_cell_fixed_it->size() == 1) && + (sizes_fixed_cumulative.size() > 1)) { const std::size_t bytes_skipped = - cumulative_sizes_fixed.back() - cumulative_sizes_fixed.front(); + sizes_fixed_cumulative.back() - sizes_fixed_cumulative.front(); src_data_fixed.insert(src_data_fixed.end(), bytes_skipped, @@ -1333,8 +1460,29 @@ namespace parallel } } + // Move every piece of packed variable size data into the consecutive + // buffer. + if (variable_size_data_stored) + { + src_data_variable.reserve(expected_size_variable); + for (auto data_cell_variable_it = packed_variable_size_data.begin(); + data_cell_variable_it != packed_variable_size_data.end(); + ++data_cell_variable_it) + { + // Move every fraction of packed data into the buffer + // reserved for this particular cell. + for (auto data_variable_it = data_cell_variable_it->begin(); + data_variable_it != data_cell_variable_it->end(); + ++data_variable_it) + std::move(data_variable_it->begin(), + data_variable_it->end(), + std::back_inserter(src_data_variable)); + } + } + // Double check that we packed everything correctly. - Assert(src_data_fixed.size() == src_data_fixed.capacity(), + Assert(src_data_fixed.size() == expected_size_fixed, ExcInternalError()); + Assert(src_data_variable.size() == expected_size_variable, ExcInternalError()); } @@ -1348,31 +1496,84 @@ namespace parallel const typename dealii::internal::p4est::types::gloidx *previous_global_first_quadrant) { - Assert(cumulative_sizes_fixed.size() > 0, + Assert(sizes_fixed_cumulative.size() > 0, ExcMessage("No data has been packed!")); // Resize memory according to the data that we will receive. dest_data_fixed.resize(parallel_forest->local_num_quadrants * - cumulative_sizes_fixed.back()); + sizes_fixed_cumulative.back()); + + // Execute non-blocking fixed size transfer. + typename dealii::internal::p4est::types::transfer_context + *tf_context; + tf_context = + dealii::internal::p4est::functions::transfer_fixed_begin( + parallel_forest->global_first_quadrant, + previous_global_first_quadrant, + parallel_forest->mpicomm, + 0, + dest_data_fixed.data(), + src_data_fixed.data(), + sizes_fixed_cumulative.back()); + + if (variable_size_data_stored) + { + // Resize memory according to the data that we will receive. + dest_sizes_variable.resize(parallel_forest->local_num_quadrants); + + // Execute fixed size transfer of data sizes for variable size + // transfer. + dealii::internal::p4est::functions::transfer_fixed( + parallel_forest->global_first_quadrant, + previous_global_first_quadrant, + parallel_forest->mpicomm, + 1, + dest_sizes_variable.data(), + src_sizes_variable.data(), + sizeof(int)); + } - // Execute fixed size transfer. - dealii::internal::p4est::functions::transfer_fixed( - parallel_forest->global_first_quadrant, - previous_global_first_quadrant, - parallel_forest->mpicomm, - 0, - dest_data_fixed.data(), - src_data_fixed.data(), - cumulative_sizes_fixed.back()); + dealii::internal::p4est::functions::transfer_fixed_end(tf_context); // Release memory of previously packed data. src_data_fixed.clear(); src_data_fixed.shrink_to_fit(); - // TODO: for variable transfer - // allocate memory for transfer - // p4est transfer fixed_begin for transfer of var data sizes - // p4est transfer custom for transfer of var data + if (variable_size_data_stored) + { + // Resize memory according to the data that we will receive. + dest_data_variable.resize( + std::accumulate(dest_sizes_variable.begin(), + dest_sizes_variable.end(), + std::vector::size_type(0))); + + // ----- WORKAROUND ----- + // An assertion in p4est prevents us from sending/receiving no data + // at all, which is mandatory if one of our processes does not own + // any quadrant. This bypasses the assertion from being triggered. + // - see: https://github.com/cburstedde/p4est/issues/48 + if (src_sizes_variable.size() == 0) + src_sizes_variable.resize(1); + if (dest_sizes_variable.size() == 0) + dest_sizes_variable.resize(1); + + // Execute variable size transfer. + dealii::internal::p4est::functions::transfer_custom( + parallel_forest->global_first_quadrant, + previous_global_first_quadrant, + parallel_forest->mpicomm, + 1, + dest_data_variable.data(), + dest_sizes_variable.data(), + src_data_variable.data(), + src_sizes_variable.data()); + + // Release memory of previously packed data. + src_sizes_variable.clear(); + src_sizes_variable.shrink_to_fit(); + src_data_variable.clear(); + src_data_variable.shrink_to_fit(); + } } @@ -1382,15 +1583,16 @@ namespace parallel Triangulation::DataTransfer::unpack_cell_status( std::vector &quad_cell_relations) const { - Assert(cumulative_sizes_fixed.size() > 0, + Assert(sizes_fixed_cumulative.size() > 0, ExcMessage("No data has been packed!")); - Assert((quad_cell_relations.size() == 0) || (dest_data_fixed.size() > 0), - ExcMessage("No data has been received!")); + if (quad_cell_relations.size() > 0) + { + Assert(dest_data_fixed.size() > 0, + ExcMessage("No data has been received!")); + } // Size of CellStatus object that will be unpacked on each cell. - const std::size_t size = sizeof( - typename parallel::distributed::Triangulation::CellStatus); + const unsigned int size = sizes_fixed_cumulative.front(); // Iterate over all cells and overwrite the CellStatus // information from the transferred data. @@ -1399,7 +1601,7 @@ namespace parallel auto quad_cell_rel_it = quad_cell_relations.begin(); auto dest_fixed_it = dest_data_fixed.cbegin(); for (; quad_cell_rel_it != quad_cell_relations.end(); - ++quad_cell_rel_it, dest_fixed_it += cumulative_sizes_fixed.back()) + ++quad_cell_rel_it, dest_fixed_it += sizes_fixed_cumulative.back()) { std::get<1>(*quad_cell_rel_it) = // cell_status Utilities::unpack::const_iterator> &)> &unpack_callback) const { - Assert(cumulative_sizes_fixed.size() > 0, + // We decode the handle returned by register_data_attach() back into + // a format we can use. All even handles belong to those callback + // functions which write/read variable size data, all odd handles interact + // with fixed size buffers. + const bool callback_variable_transfer = (handle % 2 == 0); + const unsigned int callback_index = handle / 2; + + Assert(sizes_fixed_cumulative.size() > 0, ExcMessage("No data has been packed!")); - Assert((quad_cell_relations.size() == 0) || (dest_data_fixed.size() > 0), - ExcMessage("No data has been received!")); + if (quad_cell_relations.size() > 0) + { + Assert(dest_data_fixed.size() > 0, + ExcMessage("No data has been received!")); + + if (callback_variable_transfer) + Assert(dest_data_variable.size() > 0, + ExcMessage("No data has been received!")); + } + + std::vector::const_iterator dest_data_it; + std::vector::const_iterator dest_sizes_cell_it; - // Calculate offset and size from cumulated sizes. - const unsigned int offset = cumulative_sizes_fixed[handle]; - const unsigned int size = cumulative_sizes_fixed[handle + 1] - offset; + // Depending on whether our callback function unpacks fixed or + // variable size data, we have to pursue different approaches + // to localize the correct fraction of the buffer from which + // we are allowed to read. + unsigned int offset = numbers::invalid_unsigned_int; + unsigned int size = numbers::invalid_unsigned_int; + unsigned int data_increment = numbers::invalid_unsigned_int; + + if (callback_variable_transfer) + { + // For the variable size data, we need to extract the + // data size from the fixed size buffer on each cell. + // + // We packed this information last, so the last packed + // object in the fixed size buffer corresponds to the + // variable data sizes. + // + // The last entry of sizes_fixed_cumulative corresponds + // to the size of all fixed size data packed on the cell. + // To get the offset for the last packed object, we need + // to get the next-to-last entry. + const unsigned int offset_variable_data_sizes = + sizes_fixed_cumulative[sizes_fixed_cumulative.size() - 2]; + + // This iterator points to the data size that the + // callback_function packed for each specific cell. + // Adjust buffer iterator to the offset of the callback + // function so that we only have to advance its position + // to the next cell after each iteration. + dest_sizes_cell_it = dest_data_fixed.cbegin() + + offset_variable_data_sizes + + callback_index * sizeof(unsigned int); + + // Let the data iterator point to the correct buffer. + dest_data_it = dest_data_variable.cbegin(); + } + else + { + // For the fixed size data, we can get the information about + // the buffer location on each cell directly from the + // sizes_fixed_cumulative vector. + offset = sizes_fixed_cumulative[callback_index]; + size = sizes_fixed_cumulative[callback_index + 1] - offset; + data_increment = sizes_fixed_cumulative.back(); + + // Let the data iterator point to the correct buffer. + // Adjust buffer iterator to the offset of the callback + // function so that we only have to advance its position + // to the next cell after each iteration. + dest_data_it = dest_data_fixed.cbegin() + offset; + } // Iterate over all cells and unpack the transferred data. - // Adjust buffer iterator to the offset of the callback - // function and advance its position to the next cell - // after each iteration. auto quad_cell_rel_it = quad_cell_relations.begin(); - auto dest_fixed_it = dest_data_fixed.cbegin() + offset; + auto dest_sizes_it = dest_sizes_variable.cbegin(); for (; quad_cell_rel_it != quad_cell_relations.end(); - ++quad_cell_rel_it, dest_fixed_it += cumulative_sizes_fixed.back()) + ++quad_cell_rel_it, dest_data_it += data_increment) { const auto &cell_status = std::get<1>(*quad_cell_rel_it); const auto &dealii_cell = std::get<2>(*quad_cell_rel_it); + if (callback_variable_transfer) + { + // Update the increment according to the whole data size + // of the current cell. + data_increment = *dest_sizes_it; + + if (cell_status != + parallel::distributed::Triangulation::CELL_INVALID) + { + // Extract the corresponding values for offset and size from + // the cumulative sizes array stored in the fixed size buffer. + if (callback_index == 0) + offset = 0; + else + std::memcpy(&offset, + &(*(dest_sizes_cell_it - sizeof(unsigned int))), + sizeof(unsigned int)); + + std::memcpy(&size, + &(*dest_sizes_cell_it), + sizeof(unsigned int)); + + size -= offset; + + // Move the data iterator to the corresponding position + // of the callback function and adjust the increment + // accordingly. + dest_data_it += offset; + data_increment -= offset; + } + + // Advance data size iterators to the next cell. + dest_sizes_cell_it += sizes_fixed_cumulative.back(); + ++dest_sizes_it; + } + switch (cell_status) { case parallel::distributed::Triangulation::CELL_COARSEN: unpack_callback(dealii_cell, cell_status, - boost::make_iterator_range(dest_fixed_it, - dest_fixed_it + + boost::make_iterator_range(dest_data_it, + dest_data_it + size)); break; @@ -1461,8 +1762,8 @@ namespace parallel spacedim>::CELL_REFINE: unpack_callback(dealii_cell->parent(), cell_status, - boost::make_iterator_range(dest_fixed_it, - dest_fixed_it + + boost::make_iterator_range(dest_data_it, + dest_data_it + size)); break; @@ -1487,7 +1788,9 @@ namespace parallel * parallel_forest, const char *filename) const { - Assert(cumulative_sizes_fixed.size() > 0, + Assert(variable_size_data_stored == false, ExcNotImplemented()); + + Assert(sizes_fixed_cumulative.size() > 0, ExcMessage("No data has been packed!")); const std::string fname_fixed = std::string(filename) + "_fixed.data"; @@ -1528,8 +1831,8 @@ namespace parallel { ierr = MPI_File_write_at(fh, 0, - cumulative_sizes_fixed.data(), - cumulative_sizes_fixed.size(), + sizes_fixed_cumulative.data(), + sizes_fixed_cumulative.size(), MPI_UNSIGNED, MPI_STATUS_IGNORE); AssertThrowMPI(ierr); @@ -1537,12 +1840,12 @@ namespace parallel // Write packed data to file simultaneously. const unsigned int offset = - cumulative_sizes_fixed.size() * sizeof(unsigned int); + sizes_fixed_cumulative.size() * sizeof(unsigned int); ierr = MPI_File_write_at( fh, offset + parallel_forest->global_first_quadrant[myrank] * - cumulative_sizes_fixed.back(), // global position in file + sizes_fixed_cumulative.back(), // global position in file src_data_fixed.data(), src_data_fixed.size(), // local buffer MPI_CHAR, @@ -1594,27 +1897,27 @@ namespace parallel // Read cumulative sizes from file. // Since all processors need the same information about the data sizes, // let each of them gain it by reading from the same location in the file. - cumulative_sizes_fixed.resize(1 + n_attached_deserialize); + sizes_fixed_cumulative.resize(1 + n_attached_deserialize); ierr = MPI_File_read_at(fh, 0, - cumulative_sizes_fixed.data(), - cumulative_sizes_fixed.size(), + sizes_fixed_cumulative.data(), + sizes_fixed_cumulative.size(), MPI_UNSIGNED, MPI_STATUS_IGNORE); AssertThrowMPI(ierr); // Allocate sufficient memory. dest_data_fixed.resize(parallel_forest->local_num_quadrants * - cumulative_sizes_fixed.back()); + sizes_fixed_cumulative.back()); // Read packed data from file simultaneously. const unsigned int offset = - cumulative_sizes_fixed.size() * sizeof(unsigned int); + sizes_fixed_cumulative.size() * sizeof(unsigned int); ierr = MPI_File_read_at( fh, offset + parallel_forest->global_first_quadrant[myrank] * - cumulative_sizes_fixed.back(), // global position in file + sizes_fixed_cumulative.back(), // global position in file dest_data_fixed.data(), dest_data_fixed.size(), // local buffer MPI_CHAR, @@ -1631,25 +1934,31 @@ namespace parallel void Triangulation::DataTransfer::clear() { - cumulative_sizes_fixed.clear(); - cumulative_sizes_fixed.shrink_to_fit(); + variable_size_data_stored = false; + // free information about data sizes + sizes_fixed_cumulative.clear(); + sizes_fixed_cumulative.shrink_to_fit(); + + // free fixed size transfer data src_data_fixed.clear(); src_data_fixed.shrink_to_fit(); dest_data_fixed.clear(); dest_data_fixed.shrink_to_fit(); - // TODO: for variable transfer - // src_sizes_var.clear(); - // src_sizes_var.shrink_to_fit(); - // src_data_var.clear(); - // src_data_var.shrink_to_fit(); - // - // dest_sizes_var.clear(); - // dest_sizes_var.shrink_to_fit(); - // dest_data_var.clear(); - // dest_data_var.shrink_to_fit(); + // free variable size transfer data + src_sizes_variable.clear(); + src_sizes_variable.shrink_to_fit(); + + src_data_variable.clear(); + src_data_variable.shrink_to_fit(); + + dest_sizes_variable.clear(); + dest_sizes_variable.shrink_to_fit(); + + dest_data_variable.clear(); + dest_data_variable.shrink_to_fit(); } @@ -1679,7 +1988,7 @@ namespace parallel , triangulation_has_content(false) , connectivity(nullptr) , parallel_forest(nullptr) - , cell_attached_data({0, 0, {}}) + , cell_attached_data({0, 0, {}, {}}) , data_transfer(mpi_communicator) { parallel_ghost = nullptr; @@ -2108,6 +2417,9 @@ namespace parallel { triangulation_has_content = false; + cell_attached_data = {0, 0, {}, {}}; + data_transfer.clear(); + if (parallel_ghost != nullptr) { dealii::internal::p4est::functions::ghost_destroy( @@ -2221,7 +2533,7 @@ namespace parallel f << "version nproc n_attached_objs n_coarse_cells" << std::endl << 3 << " " << Utilities::MPI::n_mpi_processes(this->mpi_communicator) << " " - << cell_attached_data.pack_callbacks.size() << " " + << cell_attached_data.pack_callbacks_fixed.size() << " " << this->n_cells(0) << std::endl; } @@ -2243,8 +2555,10 @@ namespace parallel this); // pack attached data first - tria->data_transfer.pack_data(local_quadrant_cell_relations, - cell_attached_data.pack_callbacks); + tria->data_transfer.pack_data( + local_quadrant_cell_relations, + cell_attached_data.pack_callbacks_fixed, + cell_attached_data.pack_callbacks_variable); // then store buffers in file tria->data_transfer.save(parallel_forest, filename); @@ -2266,7 +2580,7 @@ namespace parallel this); tria->cell_attached_data.n_attached_data_sets = 0; - tria->cell_attached_data.pack_callbacks.clear(); + tria->cell_attached_data.pack_callbacks_fixed.clear(); } } @@ -3426,7 +3740,8 @@ namespace parallel if (cell_attached_data.n_attached_data_sets > 0) { data_transfer.pack_data(local_quadrant_cell_relations, - cell_attached_data.pack_callbacks); + cell_attached_data.pack_callbacks_fixed, + cell_attached_data.pack_callbacks_variable); // before repartitioning the p4est object, save a copy of the // positions of the global first quadrants for data transfer later @@ -3600,7 +3915,8 @@ namespace parallel if (cell_attached_data.n_attached_data_sets > 0) { data_transfer.pack_data(local_quadrant_cell_relations, - cell_attached_data.pack_callbacks); + cell_attached_data.pack_callbacks_fixed, + cell_attached_data.pack_callbacks_variable); // before repartitioning the p4est object, save a copy of the // positions of quadrant for data transfer later @@ -3845,14 +4161,28 @@ namespace parallel unsigned int Triangulation::register_data_attach( const std::function(const cell_iterator &, - const CellStatus)> &pack_callback) + const CellStatus)> &pack_callback, + const bool returns_variable_size_data) { - // add new callback_set - cell_attached_data.pack_callbacks.push_back(pack_callback); + unsigned int handle = numbers::invalid_unsigned_int; + + // Add new callback function to the corresponding register. + // Encode handles according to returns_variable_size_data. + if (returns_variable_size_data) + { + handle = 2 * cell_attached_data.pack_callbacks_variable.size(); + cell_attached_data.pack_callbacks_variable.push_back(pack_callback); + } + else + { + handle = 2 * cell_attached_data.pack_callbacks_fixed.size() + 1; + cell_attached_data.pack_callbacks_fixed.push_back(pack_callback); + } + + // Increase overall counter. ++cell_attached_data.n_attached_data_sets; - // return the position of the callback in the register vector as a handle - return cell_attached_data.pack_callbacks.size() - 1; + return handle; } @@ -3867,9 +4197,6 @@ namespace parallel const boost::iterator_range::const_iterator> &)> &unpack_callback) { -# ifdef DEBUG - Assert(handle < cell_attached_data.pack_callbacks.size(), - ExcMessage("Invalid handle.")); Assert(cell_attached_data.n_attached_data_sets > 0, ExcMessage("The notify_ready_to_unpack() has already been called " "once for each registered callback.")); @@ -3880,13 +4207,33 @@ namespace parallel static_cast(parallel_forest->local_num_quadrants), ExcInternalError()); - // deregister pack_callback function +# ifdef DEBUG + // check validity of handle and deregister pack_callback function. // first reset with invalid entries to preserve ambiguity of - // handles, then free memory when all were unpacked (see below) - // consider_cell_invalid is irrelevant in this context - Assert(cell_attached_data.pack_callbacks[handle] != nullptr, - ExcInternalError()); - cell_attached_data.pack_callbacks[handle] = nullptr; + // handles, then free memory when all were unpacked (see below). + const unsigned int callback_index = handle / 2; + if (handle % 2 == 0) + { + Assert(callback_index < + cell_attached_data.pack_callbacks_variable.size(), + ExcMessage("Invalid handle.")); + + Assert(cell_attached_data.pack_callbacks_variable[callback_index] != + nullptr, + ExcInternalError()); + cell_attached_data.pack_callbacks_variable[callback_index] = nullptr; + } + else + { + Assert(callback_index < + cell_attached_data.pack_callbacks_fixed.size(), + ExcMessage("Invalid handle.")); + + Assert(cell_attached_data.pack_callbacks_fixed[callback_index] != + nullptr, + ExcInternalError()); + cell_attached_data.pack_callbacks_fixed[callback_index] = nullptr; + } # endif // perform unpacking @@ -3909,7 +4256,8 @@ namespace parallel cell_attached_data.n_attached_deserialize == 0) { // everybody got their data, time for cleanup! - cell_attached_data.pack_callbacks.clear(); + cell_attached_data.pack_callbacks_fixed.clear(); + cell_attached_data.pack_callbacks_variable.clear(); data_transfer.clear(); // reset all cell_status entries after coarsening/refinement @@ -4211,7 +4559,9 @@ namespace parallel MemoryConsumption::memory_consumption(parallel_forest) + MemoryConsumption::memory_consumption( cell_attached_data.n_attached_data_sets) + - // MemoryConsumption::memory_consumption(cell_attached_data.pack_callbacks) + // MemoryConsumption::memory_consumption(cell_attached_data.pack_callbacks_fixed) + // + + // MemoryConsumption::memory_consumption(cell_attached_data.pack_callbacks_variable) // + // TODO[TH]: how? MemoryConsumption::memory_consumption( @@ -4456,7 +4806,8 @@ namespace parallel const std::function( const typename dealii::Triangulation<1, spacedim>::cell_iterator &, const typename dealii::Triangulation<1, spacedim>::CellStatus)> - & /*pack_callback*/) + & /*pack_callback*/, + const bool /*returns_variable_size_data*/) { Assert(false, ExcNotImplemented()); return 0; diff --git a/source/particles/particle_handler.cc b/source/particles/particle_handler.cc index 2b4131ef04..5945ae07bc 100644 --- a/source/particles/particle_handler.cc +++ b/source/particles/particle_handler.cc @@ -1046,8 +1046,8 @@ namespace Particles std::placeholders::_1, std::placeholders::_2); - handle = - non_const_triangulation->register_data_attach(callback_function); + handle = non_const_triangulation->register_data_attach( + callback_function, /*returns_variable_size_data=*/false); } } @@ -1082,8 +1082,8 @@ namespace Particles std::placeholders::_1, std::placeholders::_2); - handle = - non_const_triangulation->register_data_attach(callback_function); + handle = non_const_triangulation->register_data_attach( + callback_function, /*returns_variable_size_data=*/false); } // Check if something was stored and load it diff --git a/tests/mpi/attach_data_01.cc b/tests/mpi/attach_data_01.cc index 9c556459bd..b79920add5 100644 --- a/tests/mpi/attach_data_01.cc +++ b/tests/mpi/attach_data_01.cc @@ -137,7 +137,9 @@ test() } } - unsigned int handle = tr.register_data_attach(pack_function); + unsigned int handle = + tr.register_data_attach(pack_function, + /*returns_variable_size_data=*/false); deallog << "handle=" << handle << std::endl; tr.execute_coarsening_and_refinement(); diff --git a/tests/mpi/attach_data_01.mpirun=1.output b/tests/mpi/attach_data_01.with_p4est=true.mpirun=1.output similarity index 99% rename from tests/mpi/attach_data_01.mpirun=1.output rename to tests/mpi/attach_data_01.with_p4est=true.mpirun=1.output index 85bce57b19..30434789b1 100644 --- a/tests/mpi/attach_data_01.mpirun=1.output +++ b/tests/mpi/attach_data_01.with_p4est=true.mpirun=1.output @@ -16,7 +16,7 @@ DEAL:0::myid=0 cellid=3_1:0 coarsening DEAL:0::myid=0 cellid=3_1:1 coarsening DEAL:0::myid=0 cellid=3_1:2 coarsening DEAL:0::myid=0 cellid=3_1:3 coarsening -DEAL:0::handle=0 +DEAL:0::handle=1 DEAL:0::packing cell 0_1:0 with data=0 status=REFINE DEAL:0::packing cell 0_1:1 with data=1 status=PERSIST DEAL:0::packing cell 0_1:2 with data=2 status=PERSIST diff --git a/tests/mpi/attach_data_01.mpirun=3.output b/tests/mpi/attach_data_01.with_p4est=true.mpirun=3.output similarity index 97% rename from tests/mpi/attach_data_01.mpirun=3.output rename to tests/mpi/attach_data_01.with_p4est=true.mpirun=3.output index 791e44f0f4..3fa972a4bc 100644 --- a/tests/mpi/attach_data_01.mpirun=3.output +++ b/tests/mpi/attach_data_01.with_p4est=true.mpirun=3.output @@ -4,7 +4,7 @@ DEAL:0::myid=0 cellid=0_1:0 refining DEAL:0::myid=0 cellid=0_1:1 DEAL:0::myid=0 cellid=0_1:2 DEAL:0::myid=0 cellid=0_1:3 -DEAL:0::handle=0 +DEAL:0::handle=1 DEAL:0::packing cell 0_1:0 with data=0 status=REFINE DEAL:0::packing cell 0_1:1 with data=1 status=PERSIST DEAL:0::packing cell 0_1:2 with data=2 status=PERSIST @@ -24,7 +24,7 @@ DEAL:1::myid=1 cellid=2_1:0 DEAL:1::myid=1 cellid=2_1:1 DEAL:1::myid=1 cellid=2_1:2 DEAL:1::myid=1 cellid=2_1:3 -DEAL:1::handle=0 +DEAL:1::handle=1 DEAL:1::packing cell 1_1:0 with data=0 status=PERSIST DEAL:1::packing cell 1_1:1 with data=1 status=PERSIST DEAL:1::packing cell 1_1:2 with data=2 status=PERSIST @@ -49,7 +49,7 @@ DEAL:2::myid=2 cellid=3_1:0 coarsening DEAL:2::myid=2 cellid=3_1:1 coarsening DEAL:2::myid=2 cellid=3_1:2 coarsening DEAL:2::myid=2 cellid=3_1:3 coarsening -DEAL:2::handle=0 +DEAL:2::handle=1 DEAL:2::packing cell 3_0: with data=0 status=COARSEN DEAL:2::cells after: 16 DEAL:2::unpacking cell 2_1:0 with data=4 status=PERSIST diff --git a/tests/mpi/attach_data_01.mpirun=8.output b/tests/mpi/attach_data_01.with_p4est=true.mpirun=8.output similarity index 94% rename from tests/mpi/attach_data_01.mpirun=8.output rename to tests/mpi/attach_data_01.with_p4est=true.mpirun=8.output index 620362e819..195cf8b8e2 100644 --- a/tests/mpi/attach_data_01.mpirun=8.output +++ b/tests/mpi/attach_data_01.with_p4est=true.mpirun=8.output @@ -4,7 +4,7 @@ DEAL:0::myid=0 cellid=0_1:0 refining DEAL:0::myid=0 cellid=0_1:1 DEAL:0::myid=0 cellid=0_1:2 DEAL:0::myid=0 cellid=0_1:3 -DEAL:0::handle=0 +DEAL:0::handle=1 DEAL:0::packing cell 0_1:0 with data=0 status=REFINE DEAL:0::packing cell 0_1:1 with data=1 status=PERSIST DEAL:0::packing cell 0_1:2 with data=2 status=PERSIST @@ -15,7 +15,7 @@ DEAL:0::Checksum: 2822439380 DEAL:0::OK DEAL:1::cells before: 16 -DEAL:1::handle=0 +DEAL:1::handle=1 DEAL:1::cells after: 16 DEAL:1::Checksum: 0 DEAL:1::OK @@ -26,7 +26,7 @@ DEAL:2::myid=2 cellid=1_1:0 DEAL:2::myid=2 cellid=1_1:1 DEAL:2::myid=2 cellid=1_1:2 DEAL:2::myid=2 cellid=1_1:3 -DEAL:2::handle=0 +DEAL:2::handle=1 DEAL:2::packing cell 1_1:0 with data=0 status=PERSIST DEAL:2::packing cell 1_1:1 with data=1 status=PERSIST DEAL:2::packing cell 1_1:2 with data=2 status=PERSIST @@ -39,7 +39,7 @@ DEAL:2::OK DEAL:3::cells before: 16 -DEAL:3::handle=0 +DEAL:3::handle=1 DEAL:3::cells after: 16 DEAL:3::unpacking cell 0_1:3 with data=3 status=PERSIST DEAL:3::Checksum: 0 @@ -51,7 +51,7 @@ DEAL:4::myid=4 cellid=2_1:0 DEAL:4::myid=4 cellid=2_1:1 DEAL:4::myid=4 cellid=2_1:2 DEAL:4::myid=4 cellid=2_1:3 -DEAL:4::handle=0 +DEAL:4::handle=1 DEAL:4::packing cell 2_1:0 with data=0 status=PERSIST DEAL:4::packing cell 2_1:1 with data=1 status=PERSIST DEAL:4::packing cell 2_1:2 with data=2 status=PERSIST @@ -66,7 +66,7 @@ DEAL:4::OK DEAL:5::cells before: 16 -DEAL:5::handle=0 +DEAL:5::handle=1 DEAL:5::cells after: 16 DEAL:5::Checksum: 0 DEAL:5::OK @@ -77,7 +77,7 @@ DEAL:6::myid=6 cellid=3_1:0 coarsening DEAL:6::myid=6 cellid=3_1:1 coarsening DEAL:6::myid=6 cellid=3_1:2 coarsening DEAL:6::myid=6 cellid=3_1:3 coarsening -DEAL:6::handle=0 +DEAL:6::handle=1 DEAL:6::packing cell 3_0: with data=0 status=COARSEN DEAL:6::cells after: 16 DEAL:6::unpacking cell 2_1:0 with data=0 status=PERSIST @@ -89,7 +89,7 @@ DEAL:6::OK DEAL:7::cells before: 16 -DEAL:7::handle=0 +DEAL:7::handle=1 DEAL:7::cells after: 16 DEAL:7::unpacking cell 3_0: with data=0 status=COARSEN DEAL:7::Checksum: 0 diff --git a/tests/mpi/attach_data_02.cc b/tests/mpi/attach_data_02.cc new file mode 100644 index 0000000000..55902e4a9e --- /dev/null +++ b/tests/mpi/attach_data_02.cc @@ -0,0 +1,206 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2009 - 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE.md at +// the top level directory of deal.II. +// +// --------------------------------------------------------------------- + + + +// check register_data_attach and notify_ready_to_unpack +// for variable size transfer + +#include +#include + +#include + +#include +#include +#include +#include +#include + +#include "../tests.h" + + + +template +std::vector +pack_function( + const typename parallel::distributed::Triangulation::cell_iterator + &cell, + const typename parallel::distributed::Triangulation::CellStatus + status) +{ + static int some_number = 1; + std::vector some_vector(some_number); + for (unsigned int i = 0; i < some_number; ++i) + some_vector[i] = i; + + std::vector buffer; + buffer.reserve(some_number * sizeof(unsigned int)); + for (auto vector_it = some_vector.cbegin(); vector_it != some_vector.cend(); + ++vector_it) + { + Utilities::pack(*vector_it, buffer, /*allow_compression=*/false); + } + + deallog << "packing cell " << cell->id() + << " with data size =" << buffer.size() << " accumulated data=" + << std::accumulate(some_vector.begin(), some_vector.end(), 0) + << " status="; + if (status == parallel::distributed::Triangulation::CELL_PERSIST) + deallog << "PERSIST"; + else if (status == + parallel::distributed::Triangulation::CELL_REFINE) + deallog << "REFINE"; + else if (status == + parallel::distributed::Triangulation::CELL_COARSEN) + deallog << "COARSEN"; + deallog << std::endl; + + if (status == parallel::distributed::Triangulation::CELL_COARSEN) + { + Assert(cell->has_children(), ExcInternalError()); + } + else + { + Assert(!cell->has_children(), ExcInternalError()); + } + + ++some_number; + return buffer; +} + +template +void +unpack_function( + const typename parallel::distributed::Triangulation::cell_iterator + &cell, + const typename parallel::distributed::Triangulation::CellStatus + status, + const boost::iterator_range::const_iterator> &data_range) +{ + const unsigned int data_in_bytes = + std::distance(data_range.begin(), data_range.end()); + + std::vector intdatavector(data_in_bytes / sizeof(unsigned int)); + + auto vector_it = intdatavector.begin(); + auto data_it = data_range.begin(); + for (; data_it != data_range.end(); + ++vector_it, data_it += sizeof(unsigned int)) + { + *vector_it = + Utilities::unpack(data_it, + data_it + sizeof(unsigned int), + /*allow_compression=*/false); + } + + deallog << "unpacking cell " << cell->id() << " with data size=" + << std::distance(data_range.begin(), data_range.end()) + << " accumulated data=" + << std::accumulate(intdatavector.begin(), intdatavector.end(), 0) + << " status="; + if (status == parallel::distributed::Triangulation::CELL_PERSIST) + deallog << "PERSIST"; + else if (status == + parallel::distributed::Triangulation::CELL_REFINE) + deallog << "REFINE"; + else if (status == + parallel::distributed::Triangulation::CELL_COARSEN) + deallog << "COARSEN"; + deallog << std::endl; + + if (status == parallel::distributed::Triangulation::CELL_REFINE) + { + Assert(cell->has_children(), ExcInternalError()); + } + else + { + Assert(!cell->has_children(), ExcInternalError()); + } +} + + +template +void +test() +{ + unsigned int myid = Utilities::MPI::this_mpi_process(MPI_COMM_WORLD); + unsigned int numprocs = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); + + if (true) + { + parallel::distributed::Triangulation tr(MPI_COMM_WORLD); + + GridGenerator::subdivided_hyper_cube(tr, 2); + tr.refine_global(1); + deallog << "cells before: " << tr.n_global_active_cells() << std::endl; + + typename Triangulation::active_cell_iterator cell; + + for (cell = tr.begin_active(); cell != tr.end(); ++cell) + { + if (cell->is_locally_owned()) + { + if (cell->id().to_string() == "0_1:0") + cell->set_refine_flag(); + else if (cell->parent()->id().to_string() == "3_0:") + cell->set_coarsen_flag(); + + deallog << "myid=" << myid << " cellid=" << cell->id(); + if (cell->coarsen_flag_set()) + deallog << " coarsening" << std::endl; + else if (cell->refine_flag_set()) + deallog << " refining" << std::endl; + else + deallog << std::endl; + } + } + + unsigned int handle = + tr.register_data_attach(pack_function, + /*packs_variable_size_data=*/true); + + deallog << "handle=" << handle << std::endl; + tr.execute_coarsening_and_refinement(); + + deallog << "cells after: " << tr.n_global_active_cells() << std::endl; + + /* + for (cell = tr.begin_active(); + cell != tr.end(); + ++cell) + { + if (cell->is_locally_owned()) + deallog << "myid=" << myid << " cellid=" << cell->id() << std::endl; + }*/ + + tr.notify_ready_to_unpack(handle, unpack_function); + + const unsigned int checksum = tr.get_checksum(); + deallog << "Checksum: " << checksum << std::endl; + } + + deallog << "OK" << std::endl; +} + + +int +main(int argc, char *argv[]) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); + MPILogInitAll log; + + test<2>(); +} diff --git a/tests/mpi/attach_data_02.with_p4est=true.mpirun=1.output b/tests/mpi/attach_data_02.with_p4est=true.mpirun=1.output new file mode 100644 index 0000000000..1300045ff9 --- /dev/null +++ b/tests/mpi/attach_data_02.with_p4est=true.mpirun=1.output @@ -0,0 +1,48 @@ + +DEAL:0::cells before: 16 +DEAL:0::myid=0 cellid=0_1:0 refining +DEAL:0::myid=0 cellid=0_1:1 +DEAL:0::myid=0 cellid=0_1:2 +DEAL:0::myid=0 cellid=0_1:3 +DEAL:0::myid=0 cellid=1_1:0 +DEAL:0::myid=0 cellid=1_1:1 +DEAL:0::myid=0 cellid=1_1:2 +DEAL:0::myid=0 cellid=1_1:3 +DEAL:0::myid=0 cellid=2_1:0 +DEAL:0::myid=0 cellid=2_1:1 +DEAL:0::myid=0 cellid=2_1:2 +DEAL:0::myid=0 cellid=2_1:3 +DEAL:0::myid=0 cellid=3_1:0 coarsening +DEAL:0::myid=0 cellid=3_1:1 coarsening +DEAL:0::myid=0 cellid=3_1:2 coarsening +DEAL:0::myid=0 cellid=3_1:3 coarsening +DEAL:0::handle=0 +DEAL:0::packing cell 0_1:0 with data size=4 accumulated data=0 status=REFINE +DEAL:0::packing cell 0_1:1 with data size=8 accumulated data=1 status=PERSIST +DEAL:0::packing cell 0_1:2 with data size=12 accumulated data=3 status=PERSIST +DEAL:0::packing cell 0_1:3 with data size=16 accumulated data=6 status=PERSIST +DEAL:0::packing cell 1_1:0 with data size=20 accumulated data=10 status=PERSIST +DEAL:0::packing cell 1_1:1 with data size=24 accumulated data=15 status=PERSIST +DEAL:0::packing cell 1_1:2 with data size=28 accumulated data=21 status=PERSIST +DEAL:0::packing cell 1_1:3 with data size=32 accumulated data=28 status=PERSIST +DEAL:0::packing cell 2_1:0 with data size=36 accumulated data=36 status=PERSIST +DEAL:0::packing cell 2_1:1 with data size=40 accumulated data=45 status=PERSIST +DEAL:0::packing cell 2_1:2 with data size=44 accumulated data=55 status=PERSIST +DEAL:0::packing cell 2_1:3 with data size=48 accumulated data=66 status=PERSIST +DEAL:0::packing cell 3_0: with data size=52 accumulated data=78 status=COARSEN +DEAL:0::cells after: 16 +DEAL:0::unpacking cell 0_1:0 with data size=4 accumulated data=0 status=REFINE +DEAL:0::unpacking cell 0_1:1 with data size=8 accumulated data=1 status=PERSIST +DEAL:0::unpacking cell 0_1:2 with data size=12 accumulated data=3 status=PERSIST +DEAL:0::unpacking cell 0_1:3 with data size=16 accumulated data=6 status=PERSIST +DEAL:0::unpacking cell 1_1:0 with data size=20 accumulated data=10 status=PERSIST +DEAL:0::unpacking cell 1_1:1 with data size=24 accumulated data=15 status=PERSIST +DEAL:0::unpacking cell 1_1:2 with data size=28 accumulated data=21 status=PERSIST +DEAL:0::unpacking cell 1_1:3 with data size=32 accumulated data=28 status=PERSIST +DEAL:0::unpacking cell 2_1:0 with data size=36 accumulated data=36 status=PERSIST +DEAL:0::unpacking cell 2_1:1 with data size=40 accumulated data=45 status=PERSIST +DEAL:0::unpacking cell 2_1:2 with data size=44 accumulated data=55 status=PERSIST +DEAL:0::unpacking cell 2_1:3 with data size=48 accumulated data=66 status=PERSIST +DEAL:0::unpacking cell 3_0: with data size=52 accumulated data=78 status=COARSEN +DEAL:0::Checksum: 2822439380 +DEAL:0::OK diff --git a/tests/mpi/attach_data_02.with_p4est=true.mpirun=3.output b/tests/mpi/attach_data_02.with_p4est=true.mpirun=3.output new file mode 100644 index 0000000000..7c2adabb9f --- /dev/null +++ b/tests/mpi/attach_data_02.with_p4est=true.mpirun=3.output @@ -0,0 +1,62 @@ + +DEAL:0::cells before: 16 +DEAL:0::myid=0 cellid=0_1:0 refining +DEAL:0::myid=0 cellid=0_1:1 +DEAL:0::myid=0 cellid=0_1:2 +DEAL:0::myid=0 cellid=0_1:3 +DEAL:0::handle=0 +DEAL:0::packing cell 0_1:0 with data size =4 accumulated data=0 status=REFINE +DEAL:0::packing cell 0_1:1 with data size =8 accumulated data=1 status=PERSIST +DEAL:0::packing cell 0_1:2 with data size =12 accumulated data=3 status=PERSIST +DEAL:0::packing cell 0_1:3 with data size =16 accumulated data=6 status=PERSIST +DEAL:0::cells after: 16 +DEAL:0::unpacking cell 0_1:0 with data size=4 accumulated data=0 status=REFINE +DEAL:0::unpacking cell 0_1:1 with data size=8 accumulated data=1 status=PERSIST +DEAL:0::Checksum: 2822439380 +DEAL:0::OK + +DEAL:1::cells before: 16 +DEAL:1::myid=1 cellid=1_1:0 +DEAL:1::myid=1 cellid=1_1:1 +DEAL:1::myid=1 cellid=1_1:2 +DEAL:1::myid=1 cellid=1_1:3 +DEAL:1::myid=1 cellid=2_1:0 +DEAL:1::myid=1 cellid=2_1:1 +DEAL:1::myid=1 cellid=2_1:2 +DEAL:1::myid=1 cellid=2_1:3 +DEAL:1::handle=0 +DEAL:1::packing cell 1_1:0 with data size =4 accumulated data=0 status=PERSIST +DEAL:1::packing cell 1_1:1 with data size =8 accumulated data=1 status=PERSIST +DEAL:1::packing cell 1_1:2 with data size =12 accumulated data=3 status=PERSIST +DEAL:1::packing cell 1_1:3 with data size =16 accumulated data=6 status=PERSIST +DEAL:1::packing cell 2_1:0 with data size =20 accumulated data=10 status=PERSIST +DEAL:1::packing cell 2_1:1 with data size =24 accumulated data=15 status=PERSIST +DEAL:1::packing cell 2_1:2 with data size =28 accumulated data=21 status=PERSIST +DEAL:1::packing cell 2_1:3 with data size =32 accumulated data=28 status=PERSIST +DEAL:1::cells after: 16 +DEAL:1::unpacking cell 0_1:2 with data size=12 accumulated data=3 status=PERSIST +DEAL:1::unpacking cell 0_1:3 with data size=16 accumulated data=6 status=PERSIST +DEAL:1::unpacking cell 1_1:0 with data size=4 accumulated data=0 status=PERSIST +DEAL:1::unpacking cell 1_1:1 with data size=8 accumulated data=1 status=PERSIST +DEAL:1::unpacking cell 1_1:2 with data size=12 accumulated data=3 status=PERSIST +DEAL:1::unpacking cell 1_1:3 with data size=16 accumulated data=6 status=PERSIST +DEAL:1::Checksum: 0 +DEAL:1::OK + + +DEAL:2::cells before: 16 +DEAL:2::myid=2 cellid=3_1:0 coarsening +DEAL:2::myid=2 cellid=3_1:1 coarsening +DEAL:2::myid=2 cellid=3_1:2 coarsening +DEAL:2::myid=2 cellid=3_1:3 coarsening +DEAL:2::handle=0 +DEAL:2::packing cell 3_0: with data size =4 accumulated data=0 status=COARSEN +DEAL:2::cells after: 16 +DEAL:2::unpacking cell 2_1:0 with data size=20 accumulated data=10 status=PERSIST +DEAL:2::unpacking cell 2_1:1 with data size=24 accumulated data=15 status=PERSIST +DEAL:2::unpacking cell 2_1:2 with data size=28 accumulated data=21 status=PERSIST +DEAL:2::unpacking cell 2_1:3 with data size=32 accumulated data=28 status=PERSIST +DEAL:2::unpacking cell 3_0: with data size=4 accumulated data=0 status=COARSEN +DEAL:2::Checksum: 0 +DEAL:2::OK + diff --git a/tests/mpi/attach_data_02.with_p4est=true.mpirun=8.output b/tests/mpi/attach_data_02.with_p4est=true.mpirun=8.output new file mode 100644 index 0000000000..1a36fdec69 --- /dev/null +++ b/tests/mpi/attach_data_02.with_p4est=true.mpirun=8.output @@ -0,0 +1,97 @@ + +DEAL:0::cells before: 16 +DEAL:0::myid=0 cellid=0_1:0 refining +DEAL:0::myid=0 cellid=0_1:1 +DEAL:0::myid=0 cellid=0_1:2 +DEAL:0::myid=0 cellid=0_1:3 +DEAL:0::handle=0 +DEAL:0::packing cell 0_1:0 with data size =4 accumulated data=0 status=REFINE +DEAL:0::packing cell 0_1:1 with data size =8 accumulated data=1 status=PERSIST +DEAL:0::packing cell 0_1:2 with data size =12 accumulated data=3 status=PERSIST +DEAL:0::packing cell 0_1:3 with data size =16 accumulated data=6 status=PERSIST +DEAL:0::cells after: 16 +DEAL:0::unpacking cell 0_1:0 with data size=4 accumulated data=0 status=REFINE +DEAL:0::Checksum: 2822439380 +DEAL:0::OK + +DEAL:1::cells before: 16 +DEAL:1::handle=0 +DEAL:1::cells after: 16 +DEAL:1::Checksum: 0 +DEAL:1::OK + + +DEAL:2::cells before: 16 +DEAL:2::myid=2 cellid=1_1:0 +DEAL:2::myid=2 cellid=1_1:1 +DEAL:2::myid=2 cellid=1_1:2 +DEAL:2::myid=2 cellid=1_1:3 +DEAL:2::handle=0 +DEAL:2::packing cell 1_1:0 with data size =4 accumulated data=0 status=PERSIST +DEAL:2::packing cell 1_1:1 with data size =8 accumulated data=1 status=PERSIST +DEAL:2::packing cell 1_1:2 with data size =12 accumulated data=3 status=PERSIST +DEAL:2::packing cell 1_1:3 with data size =16 accumulated data=6 status=PERSIST +DEAL:2::cells after: 16 +DEAL:2::unpacking cell 0_1:1 with data size=8 accumulated data=1 status=PERSIST +DEAL:2::unpacking cell 0_1:2 with data size=12 accumulated data=3 status=PERSIST +DEAL:2::Checksum: 0 +DEAL:2::OK + + +DEAL:3::cells before: 16 +DEAL:3::handle=0 +DEAL:3::cells after: 16 +DEAL:3::unpacking cell 0_1:3 with data size=16 accumulated data=6 status=PERSIST +DEAL:3::Checksum: 0 +DEAL:3::OK + + +DEAL:4::cells before: 16 +DEAL:4::myid=4 cellid=2_1:0 +DEAL:4::myid=4 cellid=2_1:1 +DEAL:4::myid=4 cellid=2_1:2 +DEAL:4::myid=4 cellid=2_1:3 +DEAL:4::handle=0 +DEAL:4::packing cell 2_1:0 with data size =4 accumulated data=0 status=PERSIST +DEAL:4::packing cell 2_1:1 with data size =8 accumulated data=1 status=PERSIST +DEAL:4::packing cell 2_1:2 with data size =12 accumulated data=3 status=PERSIST +DEAL:4::packing cell 2_1:3 with data size =16 accumulated data=6 status=PERSIST +DEAL:4::cells after: 16 +DEAL:4::unpacking cell 1_1:0 with data size=4 accumulated data=0 status=PERSIST +DEAL:4::unpacking cell 1_1:1 with data size=8 accumulated data=1 status=PERSIST +DEAL:4::unpacking cell 1_1:2 with data size=12 accumulated data=3 status=PERSIST +DEAL:4::unpacking cell 1_1:3 with data size=16 accumulated data=6 status=PERSIST +DEAL:4::Checksum: 0 +DEAL:4::OK + + +DEAL:5::cells before: 16 +DEAL:5::handle=0 +DEAL:5::cells after: 16 +DEAL:5::Checksum: 0 +DEAL:5::OK + + +DEAL:6::cells before: 16 +DEAL:6::myid=6 cellid=3_1:0 coarsening +DEAL:6::myid=6 cellid=3_1:1 coarsening +DEAL:6::myid=6 cellid=3_1:2 coarsening +DEAL:6::myid=6 cellid=3_1:3 coarsening +DEAL:6::handle=0 +DEAL:6::packing cell 3_0: with data size =4 accumulated data=0 status=COARSEN +DEAL:6::cells after: 16 +DEAL:6::unpacking cell 2_1:0 with data size=4 accumulated data=0 status=PERSIST +DEAL:6::unpacking cell 2_1:1 with data size=8 accumulated data=1 status=PERSIST +DEAL:6::unpacking cell 2_1:2 with data size=12 accumulated data=3 status=PERSIST +DEAL:6::unpacking cell 2_1:3 with data size=16 accumulated data=6 status=PERSIST +DEAL:6::Checksum: 0 +DEAL:6::OK + + +DEAL:7::cells before: 16 +DEAL:7::handle=0 +DEAL:7::cells after: 16 +DEAL:7::unpacking cell 3_0: with data size=4 accumulated data=0 status=COARSEN +DEAL:7::Checksum: 0 +DEAL:7::OK + diff --git a/tests/mpi/repartition_02.cc b/tests/mpi/repartition_02.cc index ce953cc88e..d0fc26cccf 100644 --- a/tests/mpi/repartition_02.cc +++ b/tests/mpi/repartition_02.cc @@ -121,7 +121,9 @@ test() deallog << "* global refine:" << std::endl; - unsigned int handle = tr.register_data_attach(pack_function); + unsigned int handle = + tr.register_data_attach(pack_function, + /*returns_variable_size_data=*/false); tr.refine_global(1); @@ -136,7 +138,8 @@ test() deallog << "* repartition:" << std::endl; - handle = tr.register_data_attach(pack_function); + handle = tr.register_data_attach(pack_function, + /*returns_variable_size_data=*/false); tr.repartition();