From: Daniel Arndt Date: Wed, 22 Apr 2020 02:48:29 +0000 (-0400) Subject: Remove a few deprecated PETScWrappers matrix functions X-Git-Tag: v9.5.0-rc1~1061^2~3 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=cf509e856453c0af5231143c7321d26357bdf522;p=dealii.git Remove a few deprecated PETScWrappers matrix functions --- diff --git a/include/deal.II/lac/petsc_matrix_base.h b/include/deal.II/lac/petsc_matrix_base.h index 91457c9fd7..ad57f1cb26 100644 --- a/include/deal.II/lac/petsc_matrix_base.h +++ b/include/deal.II/lac/petsc_matrix_base.h @@ -757,15 +757,6 @@ namespace PETScWrappers add(const PetscScalar factor, const MatrixBase &other); - /** - * Add the matrix @p other scaled by the factor @p factor to the current - * matrix. - * @deprecated Use the function with order of arguments reversed instead. - */ - DEAL_II_DEPRECATED - MatrixBase & - add(const MatrixBase &other, const PetscScalar factor); - /** * Matrix-vector multiplication: let dst = M*src with M * being this matrix. diff --git a/include/deal.II/lac/petsc_sparse_matrix.h b/include/deal.II/lac/petsc_sparse_matrix.h index 2c86f0c5b6..0cbba1bae4 100644 --- a/include/deal.II/lac/petsc_sparse_matrix.h +++ b/include/deal.II/lac/petsc_sparse_matrix.h @@ -401,72 +401,6 @@ namespace PETScWrappers */ ~SparseMatrix() override; - /** - * Create a sparse matrix of dimensions @p m times @p n, with an initial - * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row - * nonzero elements per row (see documentation of the MatCreateAIJ PETSc - * function for more information about these parameters). PETSc is able - * to cope with the situation that more than this number of elements are - * later allocated for a row, but this involves copying data, and is - * thus expensive. - * - * For the meaning of the @p local_row and @p local_columns parameters, - * see the class documentation. - * - * The @p is_symmetric flag determines whether we should tell PETSc that - * the matrix is going to be symmetric (as indicated by the call - * MatSetOption(mat, MAT_SYMMETRIC). Note that the PETSc - * documentation states that one cannot form an ILU decomposition of a - * matrix for which this flag has been set to @p true, only an ICC. The - * default value of this flag is @p false. - * - * @deprecated This constructor is deprecated: please use the - * constructor with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - SparseMatrix(const MPI_Comm &communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric = false, - const size_type n_offdiag_nonzero_per_row = 0); - - /** - * Initialize a rectangular matrix with @p m rows and @p n columns. The - * maximal number of nonzero entries for diagonal and off- diagonal - * blocks of each row is given by the @p row_lengths and @p - * offdiag_row_lengths arrays. - * - * For the meaning of the @p local_row and @p local_columns parameters, - * see the class documentation. - * - * Just as for the other constructors: PETSc is able to cope with the - * situation that more than this number of elements are later allocated - * for a row, but this involves copying data, and is thus expensive. - * - * The @p is_symmetric flag determines whether we should tell PETSc that - * the matrix is going to be symmetric (as indicated by the call - * MatSetOption(mat, MAT_SYMMETRIC). Note that the PETSc - * documentation states that one cannot form an ILU decomposition of a - * matrix for which this flag has been set to @p true, only an ICC. The - * default value of this flag is @p false. - * - * @deprecated This constructor is deprecated: please use the - * constructor with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - SparseMatrix(const MPI_Comm & communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric = false, - const std::vector &offdiag_row_lengths = - std::vector()); - /** * Initialize using the given sparsity pattern with communication * happening over the provided @p communicator. @@ -517,45 +451,6 @@ namespace PETScWrappers void copy_from(const SparseMatrix &other); - /** - * Throw away the present matrix and generate one that has the same - * properties as if it were created by the constructor of this class - * with the same argument list as the present function. - * - * @deprecated This overload of reinit is deprecated: - * please use the overload with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - void - reinit(const MPI_Comm &communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric = false, - const size_type n_offdiag_nonzero_per_row = 0); - - /** - * Throw away the present matrix and generate one that has the same - * properties as if it were created by the constructor of this class - * with the same argument list as the present function. - * - * @deprecated This overload of reinit is deprecated: - * please use the overload with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - void - reinit(const MPI_Comm & communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric = false, - const std::vector &offdiag_row_lengths = - std::vector()); - /** * Initialize using the given sparsity pattern with communication * happening over the provided @p communicator. @@ -700,41 +595,6 @@ namespace PETScWrappers */ MPI_Comm communicator; - /** - * Do the actual work for the respective reinit() function and the - * matching constructor, i.e. create a matrix. Getting rid of the - * previous matrix is left to the caller. - * - * @deprecated This overload of do_reinit is deprecated: - * please use the overload with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - void - do_reinit(const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric = false, - const size_type n_offdiag_nonzero_per_row = 0); - - /** - * Same as previous function. - * - * @deprecated This overload of do_reinit is deprecated: - * please use the overload with a sparsity pattern argument instead. - */ - DEAL_II_DEPRECATED - void - do_reinit(const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric = false, - const std::vector &offdiag_row_lengths = - std::vector()); - /** * Same as previous functions. */ diff --git a/source/lac/petsc_matrix_base.cc b/source/lac/petsc_matrix_base.cc index e904755d2b..bd1dc42f2b 100644 --- a/source/lac/petsc_matrix_base.cc +++ b/source/lac/petsc_matrix_base.cc @@ -426,6 +426,7 @@ namespace PETScWrappers } + MatrixBase & MatrixBase::add(const PetscScalar factor, const MatrixBase &other) { @@ -438,13 +439,6 @@ namespace PETScWrappers - MatrixBase & - MatrixBase::add(const MatrixBase &other, const PetscScalar factor) - { - return add(factor, other); - } - - void MatrixBase::vmult(VectorBase &dst, const VectorBase &src) const { diff --git a/source/lac/petsc_parallel_sparse_matrix.cc b/source/lac/petsc_parallel_sparse_matrix.cc index 3af36ce1c0..5229076f30 100644 --- a/source/lac/petsc_parallel_sparse_matrix.cc +++ b/source/lac/petsc_parallel_sparse_matrix.cc @@ -49,47 +49,6 @@ namespace PETScWrappers destroy_matrix(matrix); } - SparseMatrix::SparseMatrix(const MPI_Comm &communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric, - const size_type n_offdiag_nonzero_per_row) - : communicator(communicator) - { - do_reinit(m, - n, - local_rows, - local_columns, - n_nonzero_per_row, - is_symmetric, - n_offdiag_nonzero_per_row); - } - - - - SparseMatrix::SparseMatrix( - const MPI_Comm & communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric, - const std::vector &offdiag_row_lengths) - : communicator(communicator) - { - do_reinit(m, - n, - local_rows, - local_columns, - row_lengths, - is_symmetric, - offdiag_row_lengths); - } - template @@ -146,59 +105,6 @@ namespace PETScWrappers AssertThrow(ierr == 0, ExcPETScError(ierr)); } - void - SparseMatrix::reinit(const MPI_Comm &communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric, - const size_type n_offdiag_nonzero_per_row) - { - this->communicator = communicator; - - // get rid of old matrix and generate a new one - const PetscErrorCode ierr = destroy_matrix(matrix); - AssertThrow(ierr == 0, ExcPETScError(ierr)); - - do_reinit(m, - n, - local_rows, - local_columns, - n_nonzero_per_row, - is_symmetric, - n_offdiag_nonzero_per_row); - } - - - - void - SparseMatrix::reinit(const MPI_Comm & communicator, - const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric, - const std::vector &offdiag_row_lengths) - { - this->communicator = communicator; - - // get rid of old matrix and generate a - // new one - const PetscErrorCode ierr = destroy_matrix(matrix); - AssertThrow(ierr == 0, ExcPETScError(ierr)); - - do_reinit(m, - n, - local_rows, - local_columns, - row_lengths, - is_symmetric, - offdiag_row_lengths); - } - template @@ -225,6 +131,8 @@ namespace PETScWrappers preset_nonzero_locations); } + + template void SparseMatrix::reinit(const IndexSet & local_rows, @@ -241,110 +149,6 @@ namespace PETScWrappers do_reinit(local_rows, local_columns, sparsity_pattern); } - void - SparseMatrix::do_reinit(const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const size_type n_nonzero_per_row, - const bool is_symmetric, - const size_type n_offdiag_nonzero_per_row) - { - Assert(local_rows <= m, ExcLocalRowsTooLarge(local_rows, m)); - - // use the call sequence indicating only - // a maximal number of elements per row - // for all rows globally - const PetscErrorCode ierr = MatCreateAIJ(communicator, - local_rows, - local_columns, - m, - n, - n_nonzero_per_row, - nullptr, - n_offdiag_nonzero_per_row, - nullptr, - &matrix); - set_matrix_option(matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); - AssertThrow(ierr == 0, ExcPETScError(ierr)); - - // set symmetric flag, if so requested - if (is_symmetric == true) - { - set_matrix_option(matrix, MAT_SYMMETRIC, PETSC_TRUE); - } - } - - - - void - SparseMatrix::do_reinit(const size_type m, - const size_type n, - const size_type local_rows, - const size_type local_columns, - const std::vector &row_lengths, - const bool is_symmetric, - const std::vector &offdiag_row_lengths) - { - Assert(local_rows <= m, ExcLocalRowsTooLarge(local_rows, m)); - - Assert(row_lengths.size() == m, - ExcDimensionMismatch(row_lengths.size(), m)); - - // For the case that local_columns is smaller than one of the row lengths - // MatCreateMPIAIJ throws an error. In this case use a - // PETScWrappers::SparseMatrix - for (const size_type row_length : row_lengths) - { - (void)row_length; - Assert(row_length <= local_columns, - ExcIndexRange(row_length, 1, local_columns + 1)); - } - - // use the call sequence indicating a - // maximal number of elements for each - // row individually. annoyingly, we - // always use unsigned ints for cases - // like this, while PETSc wants to see - // signed integers. so we have to - // convert, unless we want to play dirty - // tricks with conversions of pointers - const std::vector int_row_lengths(row_lengths.begin(), - row_lengths.end()); - const std::vector int_offdiag_row_lengths( - offdiag_row_lengths.begin(), offdiag_row_lengths.end()); - - // TODO: There must be a significantly better way to provide information - // about the off-diagonal blocks of the matrix. this way, petsc keeps - // allocating tiny chunks of memory, and gets completely hung up over this - const PetscErrorCode ierr = - MatCreateAIJ(communicator, - local_rows, - local_columns, - m, - n, - 0, - int_row_lengths.data(), - 0, - offdiag_row_lengths.size() ? - int_offdiag_row_lengths.data() : - nullptr, - &matrix); - - // TODO: Sometimes the actual number of nonzero entries allocated is - // greater than the number of nonzero entries, which petsc will complain - // about unless explicitly disabled with MatSetOption. There is probably a - // way to prevent a different number nonzero elements being allocated in - // the first place. (See also previous TODO). - set_matrix_option(matrix, MAT_NEW_NONZERO_ALLOCATION_ERR, PETSC_FALSE); - AssertThrow(ierr == 0, ExcPETScError(ierr)); - - // set symmetric flag, if so requested - if (is_symmetric == true) - { - set_matrix_option(matrix, MAT_SYMMETRIC, PETSC_TRUE); - } - } template diff --git a/tests/petsc/64.cc b/tests/petsc/64.cc index 9cf1a0db5c..88c60427e3 100644 --- a/tests/petsc/64.cc +++ b/tests/petsc/64.cc @@ -20,6 +20,7 @@ // PETScWrappers::MatrixBase::operator= +#include #include #include @@ -61,14 +62,24 @@ main(int argc, char **argv) // check // PETScWrappers::MPI::SparseMatrix - MPI_Comm mpi_communicator(MPI_COMM_WORLD); - int n_jobs = 1; - MPI_Comm_size(mpi_communicator, &n_jobs); - const unsigned int n_mpi_processes = static_cast(n_jobs); + MPI_Comm mpi_communicator(MPI_COMM_WORLD); + const unsigned int n_mpi_processes = + Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int my_id = + Utilities::MPI::this_mpi_process(mpi_communicator); Assert(n_dofs % n_mpi_processes == 0, ExcInternalError()); const unsigned int n_local_dofs = n_dofs / n_mpi_processes; - PETScWrappers::MPI::SparseMatrix v2( - mpi_communicator, n_dofs, n_dofs, n_local_dofs, n_local_dofs, 5); + IndexSet locally_owned_dofs(n_dofs); + locally_owned_dofs.add_range(my_id * n_dofs / n_mpi_processes, + (my_id + 1) * n_dofs / n_mpi_processes); + locally_relevant_dofs.add_index(0); + DynamicSparsityPattern dsp(n_dofs); + dsp.add(0, 0); + PETScWrappers::MPI::SparseMatrix v2; + v2.reinit(locally_owned_dofs, + locally_owned_dofs, + dsp, + mpi_communicator); test(v2); } } diff --git a/tests/petsc/slowness_03.cc b/tests/petsc/slowness_03.cc index b44aa82ea0..bac1d15a45 100644 --- a/tests/petsc/slowness_03.cc +++ b/tests/petsc/slowness_03.cc @@ -22,6 +22,7 @@ // // the tests build the 5-point stencil matrix for a uniform grid of size N*N +#include #include #include #include @@ -34,11 +35,50 @@ void test() { - const unsigned int N = 200; + const unsigned int N = 200; + const unsigned int n_dofs = N * N; // build the sparse matrix - PETScWrappers::MPI::SparseMatrix matrix( - PETSC_COMM_WORLD, N * N, N * N, N * N, N * N, 5); + MPI_Comm mpi_communicator(MPI_COMM_WORLD); + const unsigned int n_mpi_processes = + Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int my_id = Utilities::MPI::this_mpi_process(mpi_communicator); + Assert(n_dofs % n_mpi_processes == 0, ExcInternalError()); + const unsigned int n_local_dofs = n_dofs / n_mpi_processes; + IndexSet locally_owned_dofs(n_dofs); + locally_owned_dofs.add_range(my_id * n_dofs / n_mpi_processes, + (my_id + 1) * n_dofs / n_mpi_processes); + IndexSet locally_relevant_dofs = locally_owned_dofs; + DynamicSparsityPattern dsp(n_dofs); + for (unsigned int i = 0; i < N; i++) + for (unsigned int j = 0; j < N; j++) + { + const unsigned int global = i * N + j; + dsp.add(global, global); + if (j > 0) + { + dsp.add(global - 1, global); + dsp.add(global, global - 1); + } + if (j < N - 1) + { + dsp.add(global + 1, global); + dsp.add(global, global + 1); + } + if (i > 0) + { + dsp.add(global - N, global); + dsp.add(global, global - N); + } + if (i < N - 1) + { + dsp.add(global + N, global); + dsp.add(global, global + N); + } + } + + PETScWrappers::MPI::SparseMatrix matrix; + matrix.reinit(locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator); for (unsigned int i = 0; i < N; i++) for (unsigned int j = 0; j < N; j++) { @@ -70,9 +110,9 @@ test() // then do a single matrix-vector // multiplication with subsequent formation // of the matrix norm - PETScWrappers::MPI::Vector v1(PETSC_COMM_WORLD, N * N, N * N); - PETScWrappers::MPI::Vector v2(PETSC_COMM_WORLD, N * N, N * N); - for (unsigned int i = 0; i < N * N; ++i) + PETScWrappers::MPI::Vector v1(PETSC_COMM_WORLD, n_dofs, n_dofs); + PETScWrappers::MPI::Vector v2(PETSC_COMM_WORLD, n_dofs, n_dofs); + for (unsigned int i = 0; i < n_dofs; ++i) v1(i) = i; matrix.vmult(v2, v1); diff --git a/tests/petsc/slowness_04.cc b/tests/petsc/slowness_04.cc index 51d212f006..b555290df4 100644 --- a/tests/petsc/slowness_04.cc +++ b/tests/petsc/slowness_04.cc @@ -28,6 +28,7 @@ // matrix in a consecutive fashion, but rather according to the order of // degrees of freedom in the sequence of cells that we traverse +#include #include #include #include @@ -40,7 +41,8 @@ void test() { - const unsigned int N = 200; + const unsigned int N = 200; + const unsigned int n_dofs = N * N; // first find a random permutation of the // indices @@ -65,8 +67,49 @@ test() } // build the sparse matrix - PETScWrappers::MPI::SparseMatrix matrix( - PETSC_COMM_WORLD, N * N, N * N, N * N, N * N, 5); + MPI_Comm mpi_communicator(MPI_COMM_WORLD); + const unsigned int n_mpi_processes = + Utilities::MPI::n_mpi_processes(mpi_communicator); + const unsigned int my_id = Utilities::MPI::this_mpi_process(mpi_communicator); + Assert(n_dofs % n_mpi_processes == 0, ExcInternalError()); + const unsigned int n_local_dofs = n_dofs / n_mpi_processes; + IndexSet locally_owned_dofs(n_dofs); + locally_owned_dofs.add_range(my_id * n_dofs / n_mpi_processes, + (my_id + 1) * n_dofs / n_mpi_processes); + IndexSet locally_relevant_dofs = locally_owned_dofs; + DynamicSparsityPattern dsp(n_dofs); + for (unsigned int i_ = 0; i_ < N; i_++) + for (unsigned int j_ = 0; j_ < N; j_++) + { + const unsigned int i = permutation[i_]; + const unsigned int j = permutation[j_]; + + const unsigned int global = i * N + j; + dsp.add(global, global); + if (j > 0) + { + dsp.add(global - 1, global); + dsp.add(global, global - 1); + } + if (j < N - 1) + { + dsp.add(global + 1, global); + dsp.add(global, global + 1); + } + if (i > 0) + { + dsp.add(global - N, global); + dsp.add(global, global - N); + } + if (i < N - 1) + { + dsp.add(global + N, global); + dsp.add(global, global + N); + } + } + + PETScWrappers::MPI::SparseMatrix matrix; + matrix.reinit(locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator); for (unsigned int i_ = 0; i_ < N; i_++) for (unsigned int j_ = 0; j_ < N; j_++) { @@ -101,9 +144,9 @@ test() // then do a single matrix-vector // multiplication with subsequent formation // of the matrix norm - PETScWrappers::MPI::Vector v1(PETSC_COMM_WORLD, N * N, N * N); - PETScWrappers::MPI::Vector v2(PETSC_COMM_WORLD, N * N, N * N); - for (unsigned int i = 0; i < N * N; ++i) + PETScWrappers::MPI::Vector v1(PETSC_COMM_WORLD, n_dofs, n_dofs); + PETScWrappers::MPI::Vector v2(PETSC_COMM_WORLD, n_dofs, n_dofs); + for (unsigned int i = 0; i < n_dofs; ++i) v1(i) = i; matrix.vmult(v2, v1); diff --git a/tests/petsc/sparse_matrix_01.cc b/tests/petsc/sparse_matrix_01.cc index 87b4225818..9dcea8a6c7 100644 --- a/tests/petsc/sparse_matrix_01.cc +++ b/tests/petsc/sparse_matrix_01.cc @@ -51,14 +51,14 @@ test() deallog << m(i, i) << " "; deallog << std::endl; - m.add(m2, 1.0); + m.add(1.0, m2); deallog << "after: " << m(0, 1) << std::endl; for (unsigned int i = 0; i < s; ++i) deallog << m(i, i) << " "; deallog << std::endl; - m.add(m2, -1.0); + m.add(-1.0, m2); deallog << "back to original: " << m(0, 1) << std::endl; for (unsigned int i = 0; i < s; ++i)