* The user has to ensure that all processes call this with identical @p rank.
* The @p rank refers to a process of the MPI communicator used to create the process grid
* of the distributed matrix.
+ *
+ * @note This function requires MPI-3.0 support
*/
void
copy_from(const LAPACKFullMatrix<NumberType> &matrix,
*
* @note This function should only be used for relatively small matrix
* dimensions. It is primarily intended for debugging purposes.
+ * This function requires MPI-3.0 support
*/
void
copy_to(FullMatrix<NumberType> &matrix) const;
ScaLAPACKMatrix<NumberType>::copy_from(const LAPACKFullMatrix<NumberType> &B,
const unsigned int rank)
{
+# if DEAL_II_MPI_VERSION_GTE(3, 0)
if (n_rows * n_columns == 0)
return;
const unsigned int this_mpi_process(
Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
-# ifdef DEBUG
+# ifdef DEBUG
Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
ExcMessage("All processes have to call routine with identical rank"));
Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
ExcMessage("All processes have to call routine with identical rank"));
-# endif
+# endif
// root process has to be active in the grid of A
if (this_mpi_process == rank)
MPI_Comm_free(&communicator_B);
state = LAPACKSupport::matrix;
+# else
+ (void)B;
+ (void)rank;
+ AssertThrow(false, ExcNotImplemented());
+# endif
}
ScaLAPACKMatrix<NumberType>::copy_to(LAPACKFullMatrix<NumberType> &B,
const unsigned int rank) const
{
+# if DEAL_II_MPI_VERSION_GTE(3, 0)
if (n_rows * n_columns == 0)
return;
const unsigned int this_mpi_process(
Utilities::MPI::this_mpi_process(this->grid->mpi_communicator));
-# ifdef DEBUG
+# ifdef DEBUG
Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank,
ExcMessage("All processes have to call routine with identical rank"));
Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank,
ExcMessage("All processes have to call routine with identical rank"));
-# endif
+# endif
if (this_mpi_process == rank)
{
MPI_Group_free(&group_B);
if (MPI_COMM_NULL != communicator_B)
MPI_Comm_free(&communicator_B);
+# else
+ (void)B;
+ (void)rank;
+ AssertThrow(false, ExcNotImplemented());
+# endif
}
unsigned int idx = 0;
for (const auto &sparsity_line : send_data)
{
- const int ierr = MPI_Isend(sparsity_line.second.data(),
- sparsity_line.second.size(),
- DEAL_II_DOF_INDEX_MPI_TYPE,
- sparsity_line.first,
- 124,
- mpi_comm,
- &requests[idx++]);
+ const int ierr =
+ MPI_Isend(DEAL_II_MPI_CONST_CAST(sparsity_line.second.data()),
+ sparsity_line.second.size(),
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ sparsity_line.first,
+ 124,
+ mpi_comm,
+ &requests[idx++]);
AssertThrowMPI(ierr);
}
}
unsigned int idx = 0;
for (const auto &sparsity_line : send_data)
{
- const int ierr = MPI_Isend(sparsity_line.second.data(),
- sparsity_line.second.size(),
- DEAL_II_DOF_INDEX_MPI_TYPE,
- sparsity_line.first,
- 124,
- mpi_comm,
- &requests[idx++]);
+ const int ierr =
+ MPI_Isend(DEAL_II_MPI_CONST_CAST(sparsity_line.second.data()),
+ sparsity_line.second.size(),
+ DEAL_II_DOF_INDEX_MPI_TYPE,
+ sparsity_line.first,
+ 124,
+ mpi_comm,
+ &requests[idx++]);
AssertThrowMPI(ierr);
}
}