From: Benjamin Brands Date: Fri, 13 Apr 2018 14:50:33 +0000 (+0200) Subject: ScaLAPACKMatrix::copy_to(LAPACKFullMatrix) and ScaLAPACKMatrix::copy_from(LAPACKFullM... X-Git-Tag: v9.1.0-rc1~845^2 X-Git-Url: https://gitweb.dealii.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=09a1adf5f04e84cf4e6af8ea4da5f0807ce05a2f;p=dealii.git ScaLAPACKMatrix::copy_to(LAPACKFullMatrix) and ScaLAPACKMatrix::copy_from(LAPACKFullMatrix) --- diff --git a/include/deal.II/lac/scalapack.h b/include/deal.II/lac/scalapack.h index 721dad3f80..3198d1a34f 100644 --- a/include/deal.II/lac/scalapack.h +++ b/include/deal.II/lac/scalapack.h @@ -26,6 +26,7 @@ # include # include +# include # include # include @@ -190,6 +191,19 @@ public: ScaLAPACKMatrix & operator=(const FullMatrix &); + /** + * Copies the content of the locally owned @p matrix to the distributed matrix. + * The distributed matrix and @p matrix on process @p rank must have matching dimensions. + * + * For all processes except the process with rank @p rank the serial @p matrix is not referenced. + * The user has to ensure that all processes call this with identical @p rank. + * The @p rank refers to a process of the MPI communicator used to create the process grid + * of the distributed matrix. + */ + void + copy_from(const LAPACKFullMatrix &matrix, + const unsigned int rank); + /** * Copy the contents of the distributed matrix into @p matrix. * @@ -199,6 +213,18 @@ public: void copy_to(FullMatrix &matrix) const; + /** + * Copies the content of the distributed matrix into the locally replicated @p matrix + * on the process with rank @rank. For all processes except @p rank @p matrix is not referenced. + * The distributed matrix and @p matrix on the process @p rank must have matching dimensions. + * + * The user has to ensure that all processes call this with identical @p rank. + * The @p rank refers to a process of the MPI communicator used to create the process grid + * of the distributed matrix. + */ + void + copy_to(LAPACKFullMatrix &matrix, const unsigned int rank) const; + /** * Copy the contents of the distributed matrix into a differently distributed matrix @p dest. * The function also works for matrices with different process grids diff --git a/include/deal.II/lac/scalapack.templates.h b/include/deal.II/lac/scalapack.templates.h index 86db46b762..01a9645a3b 100644 --- a/include/deal.II/lac/scalapack.templates.h +++ b/include/deal.II/lac/scalapack.templates.h @@ -580,29 +580,29 @@ extern "C" * A(ia:ia+m-1,ja:ja+n-1) and sub(B) denotes B(ib:ib+m-1,jb:jb+n-1) */ void - pdlacpy_(const char *uplo, - const int * m, - const int * n, - double * A, - const int * ia, - const int * ja, - int * desca, - double * B, - const int * ib, - const int * jb, - int * descb); + pdlacpy_(const char * uplo, + const int * m, + const int * n, + const double *A, + const int * ia, + const int * ja, + const int * desca, + double * B, + const int * ib, + const int * jb, + const int * descb); void - pslacpy_(const char *uplo, - const int * m, - const int * n, - float * A, - const int * ia, - const int * ja, - int * desca, - float * B, - const int * ib, - const int * jb, - int * descb); + pslacpy_(const char * uplo, + const int * m, + const int * n, + const float *A, + const int * ia, + const int * ja, + const int * desca, + float * B, + const int * ib, + const int * jb, + const int * descb); /** * Copies the content of a general rectangular distributed matrix @p A to another distributed matrix @p B @@ -1558,46 +1558,46 @@ inline void placpy(const char * /*uplo*/, const int * /*m*/, const int * /*n*/, - number * /*A*/, + const number * /*A*/, const int * /*ia*/, const int * /*ja*/, - int * /*desca*/, + const int * /*desca*/, number * /*B*/, const int * /*ib*/, const int * /*jb*/, - int * /*descb*/) + const int * /*descb*/) { Assert(false, dealii::ExcNotImplemented()); } inline void -placpy(const char *uplo, - const int * m, - const int * n, - double * A, - const int * ia, - const int * ja, - int * desca, - double * B, - const int * ib, - const int * jb, - int * descb) +placpy(const char * uplo, + const int * m, + const int * n, + const double *A, + const int * ia, + const int * ja, + const int * desca, + double * B, + const int * ib, + const int * jb, + const int * descb) { pdlacpy_(uplo, m, n, A, ia, ja, desca, B, ib, jb, descb); } inline void -placpy(const char *uplo, - const int * m, - const int * n, - float * A, - const int * ia, - const int * ja, - int * desca, - float * B, - const int * ib, - const int * jb, - int * descb) +placpy(const char * uplo, + const int * m, + const int * n, + const float *A, + const int * ia, + const int * ja, + const int * desca, + float * B, + const int * ib, + const int * jb, + const int * descb) { pslacpy_(uplo, m, n, A, ia, ja, desca, B, ib, jb, descb); } diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index 9baf808aa7..6a41d47a22 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -263,6 +263,138 @@ ScaLAPACKMatrix::operator=(const FullMatrix &matrix) +template +void +ScaLAPACKMatrix::copy_from(const LAPACKFullMatrix &B, + const unsigned int rank) +{ + if (n_rows * n_columns == 0) + return; + + const unsigned int this_mpi_process( + Utilities::MPI::this_mpi_process(this->grid->mpi_communicator)); + +# ifdef DEBUG + Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank, + ExcMessage("All processes have to call routine with identical rank")); + Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank, + ExcMessage("All processes have to call routine with identical rank")); +# endif + + // root process has to be active in the grid of A + if (this_mpi_process == rank) + { + Assert(grid->mpi_process_is_active, ExcInternalError()); + Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m())); + Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n())); + } + // Create 1x1 grid for matrix B. + // The underlying grid for matrix B only contains the process #rank. + // This grid will be used to copy the serial matrix B to the distributed + // matrix using the ScaLAPACK routine pgemr2d. + MPI_Group group_A; + MPI_Comm_group(this->grid->mpi_communicator, &group_A); + const int n = 1; + const std::vector ranks(n, rank); + MPI_Group group_B; + MPI_Group_incl(group_A, n, ranks.data(), &group_B); + MPI_Comm communicator_B; + MPI_Comm_create_group(this->grid->mpi_communicator, + group_B, + 0, + &communicator_B); + int n_proc_rows_B = 1, n_proc_cols_B = 1; + int this_process_row_B = -1, this_process_column_B = -1; + int blacs_context_B = -1; + if (MPI_COMM_NULL != communicator_B) + { + // Initialize Cblas context from the provided communicator + blacs_context_B = Csys2blacs_handle(communicator_B); + const char *order = "Col"; + Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B); + Cblacs_gridinfo(blacs_context_B, + &n_proc_rows_B, + &n_proc_cols_B, + &this_process_row_B, + &this_process_column_B); + Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError()); + // the active process of grid B has to be process #rank of the + // communicator attached to A + Assert(this_mpi_process == rank, ExcInternalError()); + } + const bool mpi_process_is_active_B = + (this_process_row_B >= 0 && this_process_column_B >= 0); + + // create descriptor for matrix B + std::vector descriptor_B(9, -1); + const int first_process_row_B = 0, first_process_col_B = 0; + + if (mpi_process_is_active_B) + { + // Get local sizes: + int n_local_rows_B = numroc_(&n_rows, + &n_rows, + &this_process_row_B, + &first_process_row_B, + &n_proc_rows_B); + int n_local_cols_B = numroc_(&n_columns, + &n_columns, + &this_process_column_B, + &first_process_col_B, + &n_proc_cols_B); + Assert(n_local_rows_B == n_rows, ExcInternalError()); + Assert(n_local_cols_B == n_columns, ExcInternalError()); + (void)n_local_cols_B; + + int lda = std::max(1, n_local_rows_B); + int info = 0; + descinit_(descriptor_B.data(), + &n_rows, + &n_columns, + &n_rows, + &n_columns, + &first_process_row_B, + &first_process_col_B, + &blacs_context_B, + &lda, + &info); + AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info)); + } + if (this->grid->mpi_process_is_active) + { + const int ii = 1; + NumberType *loc_vals_A = + this->values.size() > 0 ? &(this->values[0]) : nullptr; + const NumberType *loc_vals_B = + mpi_process_is_active_B ? &(B(0, 0)) : nullptr; + + // pgemr2d has to be called only for processes active on grid attached to + // matrix A + pgemr2d(&n_rows, + &n_columns, + loc_vals_B, + &ii, + &ii, + descriptor_B.data(), + loc_vals_A, + &ii, + &ii, + this->descriptor, + &(this->grid->blacs_context)); + } + if (mpi_process_is_active_B) + Cblacs_gridexit(blacs_context_B); + + MPI_Group_free(&group_A); + MPI_Group_free(&group_B); + if (MPI_COMM_NULL != communicator_B) + MPI_Comm_free(&communicator_B); + + state = LAPACKSupport::matrix; +} + + + template unsigned int ScaLAPACKMatrix::global_row(const unsigned int loc_row) const @@ -298,6 +430,137 @@ ScaLAPACKMatrix::global_column(const unsigned int loc_column) const +template +void +ScaLAPACKMatrix::copy_to(LAPACKFullMatrix &B, + const unsigned int rank) const +{ + if (n_rows * n_columns == 0) + return; + + const unsigned int this_mpi_process( + Utilities::MPI::this_mpi_process(this->grid->mpi_communicator)); + +# ifdef DEBUG + Assert(Utilities::MPI::max(rank, this->grid->mpi_communicator) == rank, + ExcMessage("All processes have to call routine with identical rank")); + Assert(Utilities::MPI::min(rank, this->grid->mpi_communicator) == rank, + ExcMessage("All processes have to call routine with identical rank")); +# endif + + if (this_mpi_process == rank) + { + // the process which gets the serial copy has to be in the process grid + Assert(this->grid->is_process_active(), ExcInternalError()); + Assert(n_rows == int(B.m()), ExcDimensionMismatch(n_rows, B.m())); + Assert(n_columns == int(B.n()), ExcDimensionMismatch(n_columns, B.n())); + } + + // Create 1x1 grid for matrix B. + // The underlying grid for matrix B only contains the process #rank. + // This grid will be used to copy to the distributed matrix to the serial + // matrix B using the ScaLAPACK routine pgemr2d. + MPI_Group group_A; + MPI_Comm_group(this->grid->mpi_communicator, &group_A); + const int n = 1; + const std::vector ranks(n, rank); + MPI_Group group_B; + MPI_Group_incl(group_A, n, ranks.data(), &group_B); + MPI_Comm communicator_B; + MPI_Comm_create_group(this->grid->mpi_communicator, + group_B, + 0, + &communicator_B); + int n_proc_rows_B = 1, n_proc_cols_B = 1; + int this_process_row_B = -1, this_process_column_B = -1; + int blacs_context_B = -1; + if (MPI_COMM_NULL != communicator_B) + { + // Initialize Cblas context from the provided communicator + blacs_context_B = Csys2blacs_handle(communicator_B); + const char *order = "Col"; + Cblacs_gridinit(&blacs_context_B, order, n_proc_rows_B, n_proc_cols_B); + Cblacs_gridinfo(blacs_context_B, + &n_proc_rows_B, + &n_proc_cols_B, + &this_process_row_B, + &this_process_column_B); + Assert(n_proc_rows_B * n_proc_cols_B == 1, ExcInternalError()); + // the active process of grid B has to be process #rank of the + // communicator attached to A + Assert(this_mpi_process == rank, ExcInternalError()); + } + const bool mpi_process_is_active_B = + (this_process_row_B >= 0 && this_process_column_B >= 0); + + // create descriptor for matrix B + std::vector descriptor_B(9, -1); + const int first_process_row_B = 0, first_process_col_B = 0; + + if (mpi_process_is_active_B) + { + // Get local sizes: + int n_local_rows_B = numroc_(&n_rows, + &n_rows, + &this_process_row_B, + &first_process_row_B, + &n_proc_rows_B); + int n_local_cols_B = numroc_(&n_columns, + &n_columns, + &this_process_column_B, + &first_process_col_B, + &n_proc_cols_B); + Assert(n_local_rows_B == n_rows, ExcInternalError()); + Assert(n_local_cols_B == n_columns, ExcInternalError()); + (void)n_local_cols_B; + + int lda = std::max(1, n_local_rows_B); + int info = 0; + // fill descriptor for matrix B + descinit_(descriptor_B.data(), + &n_rows, + &n_columns, + &n_rows, + &n_columns, + &first_process_row_B, + &first_process_col_B, + &blacs_context_B, + &lda, + &info); + AssertThrow(info == 0, LAPACKSupport::ExcErrorCode("descinit_", info)); + } + // pgemr2d has to be called only for processes active on grid attached to + // matrix A + if (this->grid->mpi_process_is_active) + { + const int ii = 1; + const NumberType *loc_vals_A = + this->values.size() > 0 ? &(this->values[0]) : nullptr; + NumberType *loc_vals_B = mpi_process_is_active_B ? &(B(0, 0)) : nullptr; + + pgemr2d(&n_rows, + &n_columns, + loc_vals_A, + &ii, + &ii, + this->descriptor, + loc_vals_B, + &ii, + &ii, + descriptor_B.data(), + &(this->grid->blacs_context)); + } + if (mpi_process_is_active_B) + Cblacs_gridexit(blacs_context_B); + + MPI_Group_free(&group_A); + MPI_Group_free(&group_B); + if (MPI_COMM_NULL != communicator_B) + MPI_Comm_free(&communicator_B); +} + + + template void ScaLAPACKMatrix::copy_to(FullMatrix &matrix) const @@ -419,7 +682,6 @@ ScaLAPACKMatrix::copy_to( (my_row_A >= 0 && my_row_A < n_grid_rows_A) && (my_column_A >= 0 && my_column_A < n_grid_columns_A); - int n_grid_rows_B, n_grid_columns_B, my_row_B, my_column_B; Cblacs_gridinfo(B.grid->blacs_context, &n_grid_rows_B, diff --git a/tests/scalapack/scalapack_17_a.cc b/tests/scalapack/scalapack_17_a.cc new file mode 100644 index 0000000000..1a82e15b29 --- /dev/null +++ b/tests/scalapack/scalapack_17_a.cc @@ -0,0 +1,130 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include "../lapack/create_matrix.h" +#include "../tests.h" + +// test ScaLAPACKMatrix::operator = (const LAPACKFullMatrix &) + +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + + +template +void +test(const unsigned int block_size_i, const unsigned int block_size_j) +{ + MPI_Comm mpi_communicator(MPI_COMM_WORLD); + const unsigned int n_mpi_processes( + Utilities::MPI::n_mpi_processes(mpi_communicator)); + const unsigned int this_mpi_process( + Utilities::MPI::this_mpi_process(mpi_communicator)); + + ConditionalOStream pcout(std::cout, (this_mpi_process == 0)); + + const unsigned int size = 500; + + // create 2d process grid + std::shared_ptr grid = + std::make_shared( + mpi_communicator, size, size, block_size_i, block_size_i); + ScaLAPACKMatrix scalapack_matrix( + size, size, grid, block_size_i, block_size_i); + + pcout << "2D grid matrix: dim=" << scalapack_matrix.m() << "x" + << scalapack_matrix.n() << "; blocks=" << block_size_i << "x" + << block_size_i << "; grid=" << grid->get_process_grid_rows() << "x" + << grid->get_process_grid_columns() << std::endl + << std::endl; + + const bool process_is_in_grid = grid->is_process_active(); + const int entry = process_is_in_grid ? this_mpi_process : -1; + std::vector valid_process_ranks = + Utilities::MPI::gather(mpi_communicator, entry, 0); + int random_rank = 0; + + if (this_mpi_process == 0) + { + std::vector valid_ranks; + + for (unsigned int i = 0; i < valid_process_ranks.size(); ++i) + if (valid_process_ranks[i] >= 0) + valid_ranks.push_back(valid_process_ranks[i]); + + const int entry = random_value(0, valid_ranks.size() - 1); + random_rank = valid_ranks[entry]; + } + MPI_Bcast(&random_rank, 1, MPI_INT, 0, mpi_communicator); + const unsigned int rank = random_rank; + + // create LAPACKFullMatrix and fill it + LAPACKFullMatrix full; + + if (this_mpi_process == rank) + { + full.reinit(size); + + unsigned int count = 0; + for (unsigned int i = 0; i < size; ++i) + for (unsigned int j = 0; j < size; ++j, ++count) + full(i, j) = count; + } + + scalapack_matrix.copy_from(full, rank); + FullMatrix comparison(size); + scalapack_matrix.copy_to(comparison); + + if (this_mpi_process == rank) + { + NumberType diff = 0; + for (unsigned int i = 0; i < size; ++i) + for (unsigned int j = 0; j < size; ++j) + diff += + (full(i, j) - comparison(i, j)) * (full(i, j) - comparison(i, j)); + diff = sqrt(diff); + + AssertThrow(diff < 1e-12, ExcInternalError()); + } +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, numbers::invalid_unsigned_int); + + const std::vector blocks_i = {{16, 32, 64}}; + const std::vector blocks_j = {{16, 32, 64}}; + + for (const auto &s : blocks_i) + for (const auto &b : blocks_j) + test(s, b); + + for (const auto &s : blocks_i) + for (const auto &b : blocks_j) + test(s, b); +} diff --git a/tests/scalapack/scalapack_17_a.mpirun=1.output b/tests/scalapack/scalapack_17_a.mpirun=1.output new file mode 100644 index 0000000000..b483cc9d3e --- /dev/null +++ b/tests/scalapack/scalapack_17_a.mpirun=1.output @@ -0,0 +1,36 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + diff --git a/tests/scalapack/scalapack_17_a.mpirun=11.output b/tests/scalapack/scalapack_17_a.mpirun=11.output new file mode 100644 index 0000000000..73f104ff40 --- /dev/null +++ b/tests/scalapack/scalapack_17_a.mpirun=11.output @@ -0,0 +1,36 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + diff --git a/tests/scalapack/scalapack_17_a.mpirun=9.output b/tests/scalapack/scalapack_17_a.mpirun=9.output new file mode 100644 index 0000000000..73f104ff40 --- /dev/null +++ b/tests/scalapack/scalapack_17_a.mpirun=9.output @@ -0,0 +1,36 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + diff --git a/tests/scalapack/scalapack_17_b.cc b/tests/scalapack/scalapack_17_b.cc new file mode 100644 index 0000000000..28aa246cfa --- /dev/null +++ b/tests/scalapack/scalapack_17_b.cc @@ -0,0 +1,126 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 - 2018 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include "../lapack/create_matrix.h" +#include "../tests.h" + +// test ScaLAPACKMatrix::copy_to (LAPACKFullMatrix &matrix) + +#include +#include +#include +#include +#include + +#include + +#include +#include + + +template +void +test(const unsigned int block_size_i, const unsigned int block_size_j) +{ + MPI_Comm mpi_communicator(MPI_COMM_WORLD); + const unsigned int n_mpi_processes( + Utilities::MPI::n_mpi_processes(mpi_communicator)); + const unsigned int this_mpi_process( + Utilities::MPI::this_mpi_process(mpi_communicator)); + + ConditionalOStream pcout(std::cout, (this_mpi_process == 0)); + + const unsigned int size = 500; + // create FullMatrix and fill it + FullMatrix full(size); + + unsigned int count = 0; + for (unsigned int i = 0; i < size; ++i) + for (unsigned int j = 0; j < size; ++j, ++count) + full(i, j) = count; + + // create 2d process grid + std::shared_ptr grid = + std::make_shared( + mpi_communicator, size, size, block_size_i, block_size_i); + ScaLAPACKMatrix scalapack_matrix( + size, size, grid, block_size_i, block_size_i); + + pcout << "2D grid matrix: dim=" << scalapack_matrix.m() << "x" + << scalapack_matrix.n() << "; blocks=" << block_size_i << "x" + << block_size_i << "; grid=" << grid->get_process_grid_rows() << "x" + << grid->get_process_grid_columns() << std::endl + << std::endl; + + scalapack_matrix = full; + LAPACKFullMatrix comparison; + + const bool process_is_in_grid = grid->is_process_active(); + const int entry = process_is_in_grid ? this_mpi_process : -1; + std::vector valid_process_ranks = + Utilities::MPI::gather(mpi_communicator, entry, 0); + int random_rank = 0; + + if (this_mpi_process == 0) + { + std::vector valid_ranks; + + for (unsigned int i = 0; i < valid_process_ranks.size(); ++i) + if (valid_process_ranks[i] >= 0) + valid_ranks.push_back(valid_process_ranks[i]); + + const int entry = random_value(0, valid_ranks.size() - 1); + random_rank = valid_ranks[entry]; + } + MPI_Bcast(&random_rank, 1, MPI_INT, 0, mpi_communicator); + const unsigned int rank = random_rank; + + if (this_mpi_process == rank) + comparison.reinit(size); + + scalapack_matrix.copy_to(comparison, rank); + + if (this_mpi_process == rank) + { + NumberType diff = 0; + for (unsigned int i = 0; i < size; ++i) + for (unsigned int j = 0; j < size; ++j) + diff += + (full(i, j) - comparison(i, j)) * (full(i, j) - comparison(i, j)); + diff = sqrt(diff); + + AssertThrow(diff < 1e-12, ExcInternalError()); + } +} + + + +int +main(int argc, char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization( + argc, argv, numbers::invalid_unsigned_int); + + const std::vector blocks_i = {{16, 32, 64}}; + const std::vector blocks_j = {{16, 32, 64}}; + + for (const auto &s : blocks_i) + for (const auto &b : blocks_j) + test(s, b); + + for (const auto &s : blocks_i) + for (const auto &b : blocks_j) + test(s, b); +} diff --git a/tests/scalapack/scalapack_17_b.mpirun=1.output b/tests/scalapack/scalapack_17_b.mpirun=1.output new file mode 100644 index 0000000000..b483cc9d3e --- /dev/null +++ b/tests/scalapack/scalapack_17_b.mpirun=1.output @@ -0,0 +1,36 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 + diff --git a/tests/scalapack/scalapack_17_b.mpirun=11.output b/tests/scalapack/scalapack_17_b.mpirun=11.output new file mode 100644 index 0000000000..73f104ff40 --- /dev/null +++ b/tests/scalapack/scalapack_17_b.mpirun=11.output @@ -0,0 +1,36 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + diff --git a/tests/scalapack/scalapack_17_b.mpirun=9.output b/tests/scalapack/scalapack_17_b.mpirun=9.output new file mode 100644 index 0000000000..73f104ff40 --- /dev/null +++ b/tests/scalapack/scalapack_17_b.mpirun=9.output @@ -0,0 +1,36 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 +