From: Daniel Arndt Date: Thu, 7 Feb 2019 17:04:32 +0000 (+0100) Subject: Unrestrict ScaLAPACKMatrix::copy_to/from X-Git-Tag: v9.1.0-rc1~361^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=613673a1c0a1470ff7ad2e486b4cf8242636ae5e;p=dealii.git Unrestrict ScaLAPACKMatrix::copy_to/from --- diff --git a/include/deal.II/lac/scalapack.h b/include/deal.II/lac/scalapack.h index ebc212f622..e44ef1ffe4 100644 --- a/include/deal.II/lac/scalapack.h +++ b/include/deal.II/lac/scalapack.h @@ -203,8 +203,6 @@ public: * The user has to ensure that all processes call this with identical @p rank. * The @p rank refers to a process of the MPI communicator used to create the process grid * of the distributed matrix. - * - * @note This function requires MPI-3.0 support */ void copy_from(const LAPACKFullMatrix &matrix, @@ -215,7 +213,6 @@ public: * * @note This function should only be used for relatively small matrix * dimensions. It is primarily intended for debugging purposes. - * This function requires MPI-3.0 support */ void copy_to(FullMatrix &matrix) const; diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index 8651557833..89d98c930c 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -361,19 +361,18 @@ void ScaLAPACKMatrix::copy_from(const LAPACKFullMatrix &B, const unsigned int rank) { -# if DEAL_II_MPI_VERSION_GTE(3, 0) if (n_rows * n_columns == 0) return; const unsigned int this_mpi_process( Utilities::MPI::this_mpi_process(this->grid->mpi_communicator)); -# ifdef DEBUG +# ifdef DEBUG Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank, ExcMessage("All processes have to call routine with identical rank")); Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank, ExcMessage("All processes have to call routine with identical rank")); -# endif +# endif // root process has to be active in the grid of A if (this_mpi_process == rank) @@ -391,12 +390,12 @@ ScaLAPACKMatrix::copy_from(const LAPACKFullMatrix &B, const int n = 1; const std::vector ranks(n, rank); MPI_Group group_B; - MPI_Group_incl(group_A, n, ranks.data(), &group_B); + MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B); MPI_Comm communicator_B; - MPI_Comm_create_group(this->grid->mpi_communicator, - group_B, - 0, - &communicator_B); + Utilities::MPI::create_group(this->grid->mpi_communicator, + group_B, + 0, + &communicator_B); int n_proc_rows_B = 1, n_proc_cols_B = 1; int this_process_row_B = -1, this_process_column_B = -1; int blacs_context_B = -1; @@ -485,11 +484,6 @@ ScaLAPACKMatrix::copy_from(const LAPACKFullMatrix &B, MPI_Comm_free(&communicator_B); state = LAPACKSupport::matrix; -# else - (void)B; - (void)rank; - AssertThrow(false, ExcNotImplemented()); -# endif } @@ -534,19 +528,18 @@ void ScaLAPACKMatrix::copy_to(LAPACKFullMatrix &B, const unsigned int rank) const { -# if DEAL_II_MPI_VERSION_GTE(3, 0) if (n_rows * n_columns == 0) return; const unsigned int this_mpi_process( Utilities::MPI::this_mpi_process(this->grid->mpi_communicator)); -# ifdef DEBUG +# ifdef DEBUG Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank, ExcMessage("All processes have to call routine with identical rank")); Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank, ExcMessage("All processes have to call routine with identical rank")); -# endif +# endif if (this_mpi_process == rank) { @@ -565,12 +558,12 @@ ScaLAPACKMatrix::copy_to(LAPACKFullMatrix &B, const int n = 1; const std::vector ranks(n, rank); MPI_Group group_B; - MPI_Group_incl(group_A, n, ranks.data(), &group_B); + MPI_Group_incl(group_A, n, DEAL_II_MPI_CONST_CAST(ranks.data()), &group_B); MPI_Comm communicator_B; - MPI_Comm_create_group(this->grid->mpi_communicator, - group_B, - 0, - &communicator_B); + Utilities::MPI::create_group(this->grid->mpi_communicator, + group_B, + 0, + &communicator_B); int n_proc_rows_B = 1, n_proc_cols_B = 1; int this_process_row_B = -1, this_process_column_B = -1; int blacs_context_B = -1; @@ -657,11 +650,6 @@ ScaLAPACKMatrix::copy_to(LAPACKFullMatrix &B, MPI_Group_free(&group_B); if (MPI_COMM_NULL != communicator_B) MPI_Comm_free(&communicator_B); -# else - (void)B; - (void)rank; - AssertThrow(false, ExcNotImplemented()); -# endif }