* Copy a submatrix (subset) of the distributed matrix A to a submatrix of the distributed matrix @p B.
*
* - The global row and column index of the first element of the submatrix A is provided by @p offset_A
- * with row index=<code>offset_A.first</code> and column index=<code>offset_A.second</code>
+ * with row index=<code>offset_A.first</code> and column index=<code>offset_A.second</code>.
*
- * - The global row and column index of the first element of the submatrix B is provided by @p offset_B.
- * with row index=<code>offset_B.first</code> and column index=<code>offset_B.second</code>
+ * - The global row and column index of the first element of the submatrix B is provided by @p offset_B
+ * with row index=<code>offset_B.first</code> and column index=<code>offset_B.second</code>.
*
* - The dimension of the submatrix to be copied is given by @p submatrix_size
- * with number of rows=<code>submatrix_size.first</code> and number of columns=<code>submatrix_size.second</code>
- * .
+ * with number of rows=<code>submatrix_size.first</code> and number of columns=<code>submatrix_size.second</code>.
+ *
*
* If it is necessary to copy complete matrices with an identical block-cyclic distribution,
* use copy_to(ScaLAPACKMatrix<NumberType> &dest) with only one argument to avoid communication.
/**
* Stores the distributed matrix in @p filename using HDF5.
+ * In case that deal.II was built without HDF5
+ * a call to this function will cause an exception to be thrown.
*
- * If HDF5 was build with MPI, parallel I/O is used to save the matrix.
+ * If HDF5 was built with MPI, parallel I/O is used to save the matrix.
* Otherwise, just one process will do the output. This means that
* internally the distributed matrix is copied to one process, which
* does the output. Therefore, the matrix has to fit into the memory
/**
* Loads the distributed matrix from file @p filename using HDF5.
+ * In case that deal.II was built without HDF5
+ * a call to this function will cause an exception to be thrown.
*
* The matrix must have the same dimensions as the matrix stored in the file.
*
* $\min \Vert B - A*X\Vert$.
*
* Upon exit the rows 0 to N-1 of $B$ contain the least square solution vectors. The residual sum of squares
- * for each column is given by the sum of squares of elements N to M-1 in that column
+ * for each column is given by the sum of squares of elements N to M-1 in that column.
*
* - 2. If <code>transpose==false</code> and $M < N$: find minimum norm solutions of underdetermined systems
* $A * X = B$.
*
- * Upon exit the columns of $B$ contain the minimum norm solution vectors
+ * Upon exit the columns of $B$ contain the minimum norm solution vectors.
*
* - 3. If <code>transpose==true</code> and $M \geq N$: find minimum norm solutions of underdetermined system
* $ A^\top X = B$.
* $\min \Vert B - A^\top X\Vert$.
*
* Upon exit the rows 0 to M-1 contain the least square solution vectors. The residual sum of squares
- * for each column is given by the sum of squares of elements M to N-1 in that column
- * .
+ * for each column is given by the sum of squares of elements M to N-1 in that column.
+ *
* If <code>transpose==false</code> then $B \in \mathbb{R}^{M \times N_{\rm RHS}}$,
* otherwise $B \in \mathbb{R}^{N \times N_{\rm RHS}}}$.
* The matrices $A$ and $B$ must have an identical block cyclic distribution for rows and columns.
#include "../lapack/create_matrix.h"
// test copying of distributed ScaLAPACKMatrices using ScaLAPACK routine p_gemr2d
-// test copying submatrices of distributed ScaLAPACKMatrices using ScaLAPACK routine p_gemr2d
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
AssertThrow(test_2d.frobenius_norm() < 1e-12,ExcInternalError());
AssertThrow(test_1d.frobenius_norm() < 1e-12,ExcInternalError());
AssertThrow(test_one.frobenius_norm() < 1e-12,ExcInternalError());
-
- //copying submatrices
- unsigned int sub_size=100;
- std::pair<unsigned int,unsigned int> offset_A = std::make_pair(49,99);
- std::pair<unsigned int,unsigned int> offset_B = std::make_pair(4,7);
- std::pair<unsigned int,unsigned int> submatrix_size = std::make_pair(sub_size,sub_size);
- ScaLAPACKMatrix<NumberType> scalapack_matrix_dest(sub_size+offset_B.first,sub_size+offset_B.second,grid_2d,block_size_j,block_size_i);
- scalapack_matrix_2d.copy_to(scalapack_matrix_dest,offset_A,offset_B,submatrix_size);
- FullMatrix<NumberType> dest (sub_size+offset_B.first,sub_size+offset_B.second);
- scalapack_matrix_dest.copy_to(dest);
- for (unsigned int i=0; i<sub_size; ++i)
- for (unsigned int j=0; j<sub_size; ++j)
- dest(i+offset_B.first,j+offset_B.second) -= full(offset_A.first+i,offset_A.second+j);
- AssertThrow(dest.frobenius_norm() < 1e-12,ExcInternalError());
}
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test copying submatrices of distributed ScaLAPACKMatrices using ScaLAPACK routine p_gemr2d
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+
+
+template <typename NumberType>
+void test(const unsigned int block_size_i, const unsigned int block_size_j)
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int size = 500;
+ //create FullMatrix and fill it
+ FullMatrix<NumberType> full(size);
+ unsigned int count=0;
+ for (unsigned int i=0; i<size; ++i)
+ for (unsigned int j=0; j<size; ++j, ++count)
+ full(i,j) = count;
+
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size_i,block_size_i);
+
+ ScaLAPACKMatrix<NumberType> scalapack_matrix (size, size, grid, block_size_i, block_size_i);
+
+ pcout << "2D grid matrix: dim=" << scalapack_matrix.m() << "x" << scalapack_matrix.n()
+ << "; blocks=" << block_size_i << "x" << block_size_i
+ << "; grid=" << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ scalapack_matrix = full;
+ unsigned int sub_size=100;
+ std::pair<unsigned int,unsigned int> offset_A = std::make_pair(49,99);
+ std::pair<unsigned int,unsigned int> offset_B = std::make_pair(4,7);
+ std::pair<unsigned int,unsigned int> submatrix_size = std::make_pair(sub_size,sub_size);
+ ScaLAPACKMatrix<NumberType> scalapack_matrix_dest(sub_size+offset_B.first,sub_size+offset_B.second,grid,block_size_j,block_size_i);
+ scalapack_matrix.copy_to(scalapack_matrix_dest,offset_A,offset_B,submatrix_size);
+ FullMatrix<NumberType> dest (sub_size+offset_B.first,sub_size+offset_B.second);
+ scalapack_matrix_dest.copy_to(dest);
+
+ for (unsigned int i=0; i<sub_size; ++i)
+ for (unsigned int j=0; j<sub_size; ++j)
+ dest(i+offset_B.first,j+offset_B.second) -= full(offset_A.first+i,offset_A.second+j);
+ AssertThrow(dest.frobenius_norm() < 1e-12,ExcInternalError());
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ const std::vector<unsigned int> blocks_i = {{16,32,64}};
+ const std::vector<unsigned int> blocks_j = {{16,32,64}};
+
+ for (const auto &s : blocks_i)
+ for (const auto &b : blocks_j)
+ test<float>(s,b);
+
+ for (const auto &s : blocks_i)
+ for (const auto &b : blocks_j)
+ test<double>(s,b);
+}
--- /dev/null
+2D grid matrix: dim=500x500; blocks=16x16; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=1x1
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=1x1
+
--- /dev/null
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
--- /dev/null
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=16x16; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=32x32; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+
+2D grid matrix: dim=500x500; blocks=64x64; grid=3x3
+