/// ProcessGrid::ProcessGrid
process_grid_constructor,
+ /// 200 tags for Partitioner::import_from_ghosted_array_start
+ partitioner_import_start,
+ partitioner_import_end = partitioner_import_start + 200,
+
+ /// 200 tags for Partitioner::export_to_ghosted_array_start
+ partitioner_export_start,
+ partitioner_export_end = partitioner_export_start + 200,
+
+
};
} // namespace Tags
} // namespace internal
* @param communication_channel Sets an offset to the MPI_Isend and
* MPI_Irecv calls that avoids interference with other ongoing
* export_to_ghosted_array_start() calls on different entries. Typically
- * handled within the blocks of a block vector.
+ * handled within the blocks of a block vector. Any value less than 200
+ * is a valid value.
*
* @param locally_owned_array The array of data from which the data is
* extracted and sent to the ghost entries on a remote processor.
* MPI_Irecv calls that avoids interference with other ongoing
* import_from_ghosted_array_start() calls on different
* entries. Typically handled within the blocks of a block vector.
+ * Any value less than 200 is a valid value.
*
* @param ghost_array The array of ghost data that is sent to a remote
* owner of the respective index in a vector. Its size must either be
std::vector<MPI_Request> & requests) const
{
AssertDimension(temporary_storage.size(), n_import_indices());
+ AssertIndexRange(communication_channel, 200);
Assert(ghost_array.size() == n_ghost_indices() ||
ghost_array.size() == n_ghost_indices_in_larger_set,
ExcGhostIndexArrayHasWrongSize(ghost_array.size(),
ExcMessage("Another operation seems to still be running. "
"Call update_ghost_values_finish() first."));
+ const unsigned int mpi_tag =
+ Utilities::MPI::internal::Tags::partitioner_export_start +
+ communication_channel;
+ Assert(mpi_tag <= Utilities::MPI::internal::Tags::partitioner_export_end,
+ ExcInternalError());
+
// Need to send and receive the data. Use non-blocking communication,
// where it is usually less overhead to first initiate the receive and
// then actually send the data
ghost_targets_data[i].second * sizeof(Number),
MPI_BYTE,
ghost_targets_data[i].first,
- ghost_targets_data[i].first + communication_channel,
+ mpi_tag,
communicator,
&requests[i]);
AssertThrowMPI(ierr);
import_targets_data[i].second * sizeof(Number),
MPI_BYTE,
import_targets_data[i].first,
- my_pid + communication_channel,
+ mpi_tag,
communicator,
&requests[n_ghost_targets + i]);
AssertThrowMPI(ierr);
std::vector<MPI_Request> & requests) const
{
AssertDimension(temporary_storage.size(), n_import_indices());
+ AssertIndexRange(communication_channel, 200);
Assert(ghost_array.size() == n_ghost_indices() ||
ghost_array.size() == n_ghost_indices_in_larger_set,
ExcGhostIndexArrayHasWrongSize(ghost_array.size(),
// where it is generally less overhead to first initiate the receive and
// then actually send the data
- // set channels in different range from update_ghost_values channels
- const unsigned int channel = communication_channel + 401;
+ const unsigned int mpi_tag =
+ Utilities::MPI::internal::Tags::partitioner_import_start +
+ communication_channel;
+ Assert(mpi_tag <= Utilities::MPI::internal::Tags::partitioner_import_end,
+ ExcInternalError());
requests.resize(n_import_targets + n_ghost_targets);
// initiate the receive operations
import_targets_data[i].second * sizeof(Number),
MPI_BYTE,
import_targets_data[i].first,
- import_targets_data[i].first + channel,
+ mpi_tag,
communicator,
&requests[i]);
AssertThrowMPI(ierr);
ghost_targets_data[i].second * sizeof(Number),
MPI_BYTE,
ghost_targets_data[i].first,
- this_mpi_process() + channel,
+ mpi_tag,
communicator,
&requests[n_import_targets + i]);
AssertThrowMPI(ierr);
// start all requests for all blocks before finishing the transfers as
// this saves repeated synchronizations. In order to avoid conflict
// with possible other ongoing communication requests (from
- // LA::distributed::Vector that supports unfinished requests), add an
- // arbitrary number 8273 to the communication tag
+ // LA::distributed::Vector that supports unfinished requests), add
+ // 100 to the communication tag (the first 100 can be used by normal
+ // vectors).
for (unsigned int block = start; block < end; ++block)
- this->block(block).compress_start(block + 8273 - start, operation);
+ this->block(block).compress_start(block - start + 100, operation);
for (unsigned int block = start; block < end; ++block)
this->block(block).compress_finish(operation);
}
* In case this function is called for more than one vector before @p
* compress_finish() is invoked, it is mandatory to specify a unique
* communication channel to each such call, in order to avoid several
- * messages with the same ID that will corrupt this operation.
+ * messages with the same ID that will corrupt this operation. Any
+ * communication channel less than 100 is a valid value.
*/
void
compress_start(
* update_ghost_values_finish() is invoked, it is mandatory to specify a
* unique communication channel to each such call, in order to avoid
* several messages with the same ID that will corrupt this operation.
+ * Any communication channel less than 100 is a valid value.
*/
void
update_ghost_values_start(
template <typename Number, typename MemorySpaceType>
void
Vector<Number, MemorySpaceType>::compress_start(
- const unsigned int counter,
+ const unsigned int communication_channel,
::dealii::VectorOperation::values operation)
{
- (void)counter;
+ (void)communication_channel;
(void)operation;
Assert(vector_is_ghosted == false,
ExcMessage("Cannot call compress() on a ghosted vector"));
{
partitioner->import_from_ghosted_array_start(
operation,
- counter,
+ communication_channel,
ArrayView<Number, MemorySpace::CUDA>(
data.values_dev.get() + partitioner->local_size(),
partitioner->n_ghost_indices()),
{
partitioner->import_from_ghosted_array_start(
operation,
- counter,
+ communication_channel,
ArrayView<Number, MemorySpace::Host>(
data.values.get() + partitioner->local_size(),
partitioner->n_ghost_indices()),
template <typename Number, typename MemorySpaceType>
void
Vector<Number, MemorySpaceType>::update_ghost_values_start(
- const unsigned int counter) const
+ const unsigned int communication_channel) const
{
#ifdef DEAL_II_WITH_MPI
// nothing to do when we neither have import nor ghost indices.
# if !(defined(DEAL_II_COMPILER_CUDA_AWARE) && \
defined(DEAL_II_MPI_WITH_CUDA_SUPPORT))
partitioner->export_to_ghosted_array_start<Number, MemorySpace::Host>(
- counter,
+ communication_channel,
ArrayView<const Number, MemorySpace::Host>(data.values.get(),
partitioner->local_size()),
ArrayView<Number, MemorySpace::Host>(import_data.values.get(),
update_ghost_values_requests);
# else
partitioner->export_to_ghosted_array_start<Number, MemorySpace::CUDA>(
- counter,
+ communication_channel,
ArrayView<const Number, MemorySpace::CUDA>(data.values_dev.get(),
partitioner->local_size()),
ArrayView<Number, MemorySpace::CUDA>(import_data.values_dev.get(),
# endif
#else
- (void)counter;
+ (void)communication_channel;
#endif
}