From 8c204b85e679536487c3228df736aee542dab821 Mon Sep 17 00:00:00 2001 From: Benjamin Brands Date: Wed, 10 Jan 2018 11:47:52 +0100 Subject: [PATCH] add ScaLAPACKMAtrix::copy_to(ScaLAPACKMatrix &dest) --- include/deal.II/lac/scalapack.h | 9 ++ source/lac/scalapack.cc | 79 +++++++++++++ tests/scalapack/scalapack_08.cc | 110 ++++++++++++++++++ tests/scalapack/scalapack_08.mpirun=1.output | 72 ++++++++++++ tests/scalapack/scalapack_08.mpirun=10.output | 72 ++++++++++++ tests/scalapack/scalapack_08.mpirun=5.output | 72 ++++++++++++ 6 files changed, 414 insertions(+) create mode 100644 tests/scalapack/scalapack_08.cc create mode 100644 tests/scalapack/scalapack_08.mpirun=1.output create mode 100644 tests/scalapack/scalapack_08.mpirun=10.output create mode 100644 tests/scalapack/scalapack_08.mpirun=5.output diff --git a/include/deal.II/lac/scalapack.h b/include/deal.II/lac/scalapack.h index 2a1002e944..7076508778 100644 --- a/include/deal.II/lac/scalapack.h +++ b/include/deal.II/lac/scalapack.h @@ -158,6 +158,15 @@ public: */ void copy_to (FullMatrix &matrix) const; + + /** + * Copy the contents of the distributed matrix into a differently distributed matrix @p dest. + * The function also works for matrices with different process grids + * or block-cyclic distributions. + */ + void copy_to (ScaLAPACKMatrix &dest) const; + + /** * Compute the Cholesky factorization of the matrix using ScaLAPACK * function pXpotrf. The result of the factorization is stored in this object. diff --git a/source/lac/scalapack.cc b/source/lac/scalapack.cc index 87f1d776f0..2ceb0dfd3e 100644 --- a/source/lac/scalapack.cc +++ b/source/lac/scalapack.cc @@ -205,6 +205,85 @@ ScaLAPACKMatrix::copy_to (FullMatrix &matrix) const +template +void +ScaLAPACKMatrix::copy_to (ScaLAPACKMatrix &dest) const +{ + Assert (n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows)); + Assert (n_columns == dest.n_columns, ExcDimensionMismatch(n_columns, dest.n_columns)); + + if (this->grid->mpi_process_is_active) + AssertThrow (this->descriptor[0]==1,ExcMessage("Copying of ScaLAPACK matrices only implemented for dense matrices")); + if (dest.grid->mpi_process_is_active) + AssertThrow (dest.descriptor[0]==1,ExcMessage("Copying of ScaLAPACK matrices only implemented for dense matrices")); + + /* + * just in case of different process grids or block-cyclic distributions + * inter-process communication is necessary + * if distributed matrices have the same process grid and block sizes, local copying is enough + */ + if ( (this->grid != dest.grid) || (row_block_size != dest.row_block_size) || (column_block_size != dest.column_block_size) ) + { + /* + * get the MPI communicator, which is the union of the source and destination MPI communicator + */ + int ierr = 0; + MPI_Group group_source, group_dest, group_union; + ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source); + AssertThrowMPI(ierr); + ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest); + AssertThrowMPI(ierr); + ierr = MPI_Group_union(group_source, group_dest, &group_union); + AssertThrowMPI(ierr); + MPI_Comm mpi_communicator_union; + // to create a communicator representing the union of the source and destination MPI communicator + // we need a communicator containing all desired processes --> use MPI_COMM_WORLD + ierr = MPI_Comm_create_group(MPI_COMM_WORLD, group_union, 5, &mpi_communicator_union); + AssertThrowMPI(ierr); + + /* + * The routine pgemr2d requires a BLACS context resembling at least the union of process grids + * described by the BLACS contexts of the source and destination matrix + */ + int union_blacs_context = Csys2blacs_handle(mpi_communicator_union); + const char *order = "Col"; + int union_n_process_rows = Utilities::MPI::n_mpi_processes(mpi_communicator_union); + int union_n_process_columns = 1; + Cblacs_gridinit(&union_blacs_context, order, union_n_process_rows, union_n_process_columns); + + const NumberType *loc_vals_source = NULL; + NumberType *loc_vals_dest = NULL; + + if (this->grid->mpi_process_is_active && (this->values.size()>0)) + { + AssertThrow(this->values.size()>0,dealii::ExcMessage("source: process is active but local matrix empty")); + loc_vals_source = &this->values[0]; + } + if (dest.grid->mpi_process_is_active && (dest.values.size()>0)) + { + AssertThrow(dest.values.size()>0,dealii::ExcMessage("destination: process is active but local matrix empty")); + loc_vals_dest = &dest.values[0]; + } + pgemr2d(&n_rows, &n_columns, loc_vals_source, &submatrix_row, &submatrix_column, descriptor, + loc_vals_dest, &dest.submatrix_row, &dest.submatrix_column, dest.descriptor, + &union_blacs_context); + + Cblacs_gridexit(union_blacs_context); + + if (mpi_communicator_union != MPI_COMM_NULL) + MPI_Comm_free(&mpi_communicator_union); + MPI_Group_free(&group_source); + MPI_Group_free(&group_dest); + MPI_Group_free(&group_union); + } + else + //process is active in the process grid + if (this->grid->mpi_process_is_active) + dest.values = this->values; +} + + + template void ScaLAPACKMatrix::compute_cholesky_factorization() { diff --git a/tests/scalapack/scalapack_08.cc b/tests/scalapack/scalapack_08.cc new file mode 100644 index 0000000000..2dfb1b5e23 --- /dev/null +++ b/tests/scalapack/scalapack_08.cc @@ -0,0 +1,110 @@ +// --------------------------------------------------------------------- +// +// Copyright (C) 2017 by the deal.II authors +// +// This file is part of the deal.II library. +// +// The deal.II library is free software; you can use it, redistribute +// it, and/or modify it under the terms of the GNU Lesser General +// Public License as published by the Free Software Foundation; either +// version 2.1 of the License, or (at your option) any later version. +// The full text of the license can be found in the file LICENSE at +// the top level of the deal.II distribution. +// +// --------------------------------------------------------------------- + +#include "../tests.h" +#include "../lapack/create_matrix.h" + +// test copying of distributed ScaLAPACKMatrices using ScaLAPACK routine p_gemr2d + +#include +#include +#include +#include +#include + + +#include + +#include +#include + + +template +void test(const unsigned int block_size_i, const unsigned int block_size_j) +{ + MPI_Comm mpi_communicator(MPI_COMM_WORLD); + const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)); + const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)); + + ConditionalOStream pcout (std::cout, (this_mpi_process ==0)); + + const unsigned int size = 500; + //create FullMatrix and fill it + FullMatrix full(size); + unsigned int count=0; + for (unsigned int i=0; i grid_2d = std::make_shared(mpi_communicator,size,size,block_size_i,block_size_i); + //create 1d process grid + std::shared_ptr grid_1d = std::make_shared(mpi_communicator,n_mpi_processes,1); + //create process grid containing one process + std::shared_ptr grid_single = std::make_shared(mpi_communicator,1,1); + + ScaLAPACKMatrix scalapack_matrix_2d (size, size, grid_2d, block_size_i, block_size_i); + ScaLAPACKMatrix scalapack_matrix_1d (size, size, grid_1d, block_size_j, block_size_j); + ScaLAPACKMatrix scalapack_matrix_single (size, size, grid_single, block_size_i, block_size_j); + ScaLAPACKMatrix scalapack_matrix_source (size, size, grid_single, block_size_j, block_size_i); + + pcout << "2D grid matrix: dim=" << scalapack_matrix_2d.m() << "x" << scalapack_matrix_2d.n() + << "; blocks=" << block_size_i << "x" << block_size_i + << "; grid=" << grid_2d->get_process_grid_rows() << "x" << grid_2d->get_process_grid_columns() << std::endl; + + pcout << "1D grid matrix: " << scalapack_matrix_1d.m() << "x" << scalapack_matrix_1d.n() + << "; blocks=" << block_size_j << "x" << block_size_j + << "; grid=" << grid_1d->get_process_grid_rows() << "x" << grid_1d->get_process_grid_columns() << std::endl; + + pcout << "single process matrix: " << scalapack_matrix_single.m() << "x" << scalapack_matrix_single.n() + << "; blocks=" << block_size_i << "x" << block_size_j + << "; grid=" << grid_single->get_process_grid_rows() << "x" << grid_single->get_process_grid_columns() << std::endl << std::endl; + + scalapack_matrix_source = full; + + scalapack_matrix_source.copy_to(scalapack_matrix_2d); + scalapack_matrix_source.copy_to(scalapack_matrix_1d); + scalapack_matrix_source.copy_to(scalapack_matrix_single); + + FullMatrix test_2d(size), test_1d(size), test_one(size); + scalapack_matrix_2d.copy_to(test_2d); + scalapack_matrix_1d.copy_to(test_1d); + scalapack_matrix_single.copy_to(test_one); + test_2d.add (-1,full); + test_1d.add (-1,full); + test_one.add (-1,full); + + AssertThrow(test_2d.frobenius_norm() < 1e-12,ExcInternalError()); + AssertThrow(test_1d.frobenius_norm() < 1e-12,ExcInternalError()); + AssertThrow(test_one.frobenius_norm() < 1e-12,ExcInternalError()); +} + + + +int main (int argc,char **argv) +{ + Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int); + + const std::vector blocks_i = {{16,32,64}}; + const std::vector blocks_j = {{16,32,64}}; + + for (const auto &s : blocks_i) + for (const auto &b : blocks_j) + test(s,b); + + for (const auto &s : blocks_i) + for (const auto &b : blocks_j) + test(s,b); +} diff --git a/tests/scalapack/scalapack_08.mpirun=1.output b/tests/scalapack/scalapack_08.mpirun=1.output new file mode 100644 index 0000000000..5028735bb4 --- /dev/null +++ b/tests/scalapack/scalapack_08.mpirun=1.output @@ -0,0 +1,72 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 +1D grid matrix: 500x500; blocks=16x16; grid=1x1 +single process matrix: 500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 +1D grid matrix: 500x500; blocks=32x32; grid=1x1 +single process matrix: 500x500; blocks=16x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 +1D grid matrix: 500x500; blocks=64x64; grid=1x1 +single process matrix: 500x500; blocks=16x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 +1D grid matrix: 500x500; blocks=16x16; grid=1x1 +single process matrix: 500x500; blocks=32x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 +1D grid matrix: 500x500; blocks=32x32; grid=1x1 +single process matrix: 500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 +1D grid matrix: 500x500; blocks=64x64; grid=1x1 +single process matrix: 500x500; blocks=32x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 +1D grid matrix: 500x500; blocks=16x16; grid=1x1 +single process matrix: 500x500; blocks=64x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 +1D grid matrix: 500x500; blocks=32x32; grid=1x1 +single process matrix: 500x500; blocks=64x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 +1D grid matrix: 500x500; blocks=64x64; grid=1x1 +single process matrix: 500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 +1D grid matrix: 500x500; blocks=16x16; grid=1x1 +single process matrix: 500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 +1D grid matrix: 500x500; blocks=32x32; grid=1x1 +single process matrix: 500x500; blocks=16x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=1x1 +1D grid matrix: 500x500; blocks=64x64; grid=1x1 +single process matrix: 500x500; blocks=16x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 +1D grid matrix: 500x500; blocks=16x16; grid=1x1 +single process matrix: 500x500; blocks=32x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 +1D grid matrix: 500x500; blocks=32x32; grid=1x1 +single process matrix: 500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=1x1 +1D grid matrix: 500x500; blocks=64x64; grid=1x1 +single process matrix: 500x500; blocks=32x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 +1D grid matrix: 500x500; blocks=16x16; grid=1x1 +single process matrix: 500x500; blocks=64x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 +1D grid matrix: 500x500; blocks=32x32; grid=1x1 +single process matrix: 500x500; blocks=64x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=1x1 +1D grid matrix: 500x500; blocks=64x64; grid=1x1 +single process matrix: 500x500; blocks=64x64; grid=1x1 + diff --git a/tests/scalapack/scalapack_08.mpirun=10.output b/tests/scalapack/scalapack_08.mpirun=10.output new file mode 100644 index 0000000000..11365188de --- /dev/null +++ b/tests/scalapack/scalapack_08.mpirun=10.output @@ -0,0 +1,72 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 +1D grid matrix: 500x500; blocks=16x16; grid=10x1 +single process matrix: 500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 +1D grid matrix: 500x500; blocks=32x32; grid=10x1 +single process matrix: 500x500; blocks=16x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 +1D grid matrix: 500x500; blocks=64x64; grid=10x1 +single process matrix: 500x500; blocks=16x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 +1D grid matrix: 500x500; blocks=16x16; grid=10x1 +single process matrix: 500x500; blocks=32x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 +1D grid matrix: 500x500; blocks=32x32; grid=10x1 +single process matrix: 500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 +1D grid matrix: 500x500; blocks=64x64; grid=10x1 +single process matrix: 500x500; blocks=32x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 +1D grid matrix: 500x500; blocks=16x16; grid=10x1 +single process matrix: 500x500; blocks=64x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 +1D grid matrix: 500x500; blocks=32x32; grid=10x1 +single process matrix: 500x500; blocks=64x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 +1D grid matrix: 500x500; blocks=64x64; grid=10x1 +single process matrix: 500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 +1D grid matrix: 500x500; blocks=16x16; grid=10x1 +single process matrix: 500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 +1D grid matrix: 500x500; blocks=32x32; grid=10x1 +single process matrix: 500x500; blocks=16x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=3x3 +1D grid matrix: 500x500; blocks=64x64; grid=10x1 +single process matrix: 500x500; blocks=16x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 +1D grid matrix: 500x500; blocks=16x16; grid=10x1 +single process matrix: 500x500; blocks=32x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 +1D grid matrix: 500x500; blocks=32x32; grid=10x1 +single process matrix: 500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=3x3 +1D grid matrix: 500x500; blocks=64x64; grid=10x1 +single process matrix: 500x500; blocks=32x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 +1D grid matrix: 500x500; blocks=16x16; grid=10x1 +single process matrix: 500x500; blocks=64x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 +1D grid matrix: 500x500; blocks=32x32; grid=10x1 +single process matrix: 500x500; blocks=64x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=3x3 +1D grid matrix: 500x500; blocks=64x64; grid=10x1 +single process matrix: 500x500; blocks=64x64; grid=1x1 + diff --git a/tests/scalapack/scalapack_08.mpirun=5.output b/tests/scalapack/scalapack_08.mpirun=5.output new file mode 100644 index 0000000000..9358ffbe32 --- /dev/null +++ b/tests/scalapack/scalapack_08.mpirun=5.output @@ -0,0 +1,72 @@ +2D grid matrix: dim=500x500; blocks=16x16; grid=2x2 +1D grid matrix: 500x500; blocks=16x16; grid=5x1 +single process matrix: 500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=2x2 +1D grid matrix: 500x500; blocks=32x32; grid=5x1 +single process matrix: 500x500; blocks=16x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=2x2 +1D grid matrix: 500x500; blocks=64x64; grid=5x1 +single process matrix: 500x500; blocks=16x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=2x2 +1D grid matrix: 500x500; blocks=16x16; grid=5x1 +single process matrix: 500x500; blocks=32x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=2x2 +1D grid matrix: 500x500; blocks=32x32; grid=5x1 +single process matrix: 500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=2x2 +1D grid matrix: 500x500; blocks=64x64; grid=5x1 +single process matrix: 500x500; blocks=32x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=2x2 +1D grid matrix: 500x500; blocks=16x16; grid=5x1 +single process matrix: 500x500; blocks=64x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=2x2 +1D grid matrix: 500x500; blocks=32x32; grid=5x1 +single process matrix: 500x500; blocks=64x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=2x2 +1D grid matrix: 500x500; blocks=64x64; grid=5x1 +single process matrix: 500x500; blocks=64x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=2x2 +1D grid matrix: 500x500; blocks=16x16; grid=5x1 +single process matrix: 500x500; blocks=16x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=2x2 +1D grid matrix: 500x500; blocks=32x32; grid=5x1 +single process matrix: 500x500; blocks=16x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=16x16; grid=2x2 +1D grid matrix: 500x500; blocks=64x64; grid=5x1 +single process matrix: 500x500; blocks=16x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=2x2 +1D grid matrix: 500x500; blocks=16x16; grid=5x1 +single process matrix: 500x500; blocks=32x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=2x2 +1D grid matrix: 500x500; blocks=32x32; grid=5x1 +single process matrix: 500x500; blocks=32x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=32x32; grid=2x2 +1D grid matrix: 500x500; blocks=64x64; grid=5x1 +single process matrix: 500x500; blocks=32x64; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=2x2 +1D grid matrix: 500x500; blocks=16x16; grid=5x1 +single process matrix: 500x500; blocks=64x16; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=2x2 +1D grid matrix: 500x500; blocks=32x32; grid=5x1 +single process matrix: 500x500; blocks=64x32; grid=1x1 + +2D grid matrix: dim=500x500; blocks=64x64; grid=2x2 +1D grid matrix: 500x500; blocks=64x64; grid=5x1 +single process matrix: 500x500; blocks=64x64; grid=1x1 + -- 2.39.5