const std::pair<unsigned int,unsigned int> &submatrix_size) const;
/**
+ /**
+ * Matrix-addition:
+ *
+ * <i>A = a A + b op(B)</i>
+ *
+ * if(transpose_B) <i>op(B) = B<sup>T</sup></i>
+ *
+ * else <i>op(B) = B</i>
+ *
+ * The matrices <tt>A</tt> and <tt>B</tt> must have the same process grid
+ *
+ * Following alignment conditions have to be fulfilled:
+ *
+ * | transpose_B | Block Sizes |
+ * | :---------: | :--------------------------: |
+ * | false | MB_A = MB_B <br> NB_A = NB_B |
+ * | true | MB_A = NB_B <br> NB_A = MB_B |
+ */
+ void add(const ScaLAPACKMatrix<NumberType> &B,
+ const NumberType a=0.,
+ const NumberType b=1.,
+ const bool transpose_B=false);
+
+ /**
+ * Matrix-addition:
+ *
+ * <i>A += b B</i>
+ *
+ * The matrices <tt>A</tt> and <tt>B</tt> must have the same process grid
+ *
+ * Following alignment conditions have to be fulfilled: MB_A = MB_B and NB_A = NB_B
+ */
+ void add(const NumberType b,
+ const ScaLAPACKMatrix<NumberType> &B);
+
+ /**
+ * Matrix-addition:
+ *
+ * <i>A += b B<sup>T</sup></i>
+ *
+ * The matrices <tt>A</tt> and <tt>B</tt> must have the same process grid
+ *
+ * Following alignment conditions have to be fulfilled: MB_A = NB_B and NB_A = MB_B
+ */
+ void Tadd(const NumberType b,
+ const ScaLAPACKMatrix<NumberType> &B);
+
* Stores the distributed matrix in @p filename using HDF5.
* In case that deal.II was built without HDF5
* a call to this function will cause an exception to be thrown.
float *work,
int *lwork,
int *info);
+
+ /*
+ * Perform matrix sum:
+ * C := beta*C + alpha*op(A),
+ * where op(A) denotes either op(A)=A or op(A)=A^T
+ */
+ void pdgeadd_(const char *transa,
+ const int *m,
+ const int *n,
+ const double *alpha,
+ const double *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const double *beta,
+ double *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC);
+ void psgeadd_(const char *transa,
+ const int *m,
+ const int *n,
+ const float *alpha,
+ const float *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const float *beta,
+ float *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC);
+
+ /**
+ * Routine to transpose a matrix:
+ * C = beta C + alpha A^T
+ */
+ void pdtran_(const int *m,
+ const int *n,
+ const double *alpha,
+ const double *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const double *beta,
+ double *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC);
+ void pstran_(const int *m,
+ const int *n,
+ const float *alpha,
+ const float *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const float *beta,
+ float *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC);
}
psgels_(trans,m,n,nrhs,A,ia,ja,desca,B,ib,jb,descb,work,lwork,info);
}
+
+template <typename number>
+inline void pgeadd(const char *transa,
+ const int *m,
+ const int *n,
+ const number *alpha,
+ const number *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const number *beta,
+ number *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC)
+{
+ Assert (false, dealii::ExcNotImplemented());
+}
+
+inline void pgeadd(const char *transa,
+ const int *m,
+ const int *n,
+ const double *alpha,
+ const double *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const double *beta,
+ double *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC)
+{
+ pdgeadd_(transa,m,n,alpha,A,IA,JA,DESCA,beta,C,IC,JC,DESCC);
+}
+
+inline void pgeadd(const char *transa,
+ const int *m,
+ const int *n,
+ const float *alpha,
+ const float *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const float *beta,
+ float *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC)
+{
+ psgeadd_(transa,m,n,alpha,A,IA,JA,DESCA,beta,C,IC,JC,DESCC);
+}
+
+
+template <typename number>
+inline void ptran(const int *m,
+ const int *n,
+ const number *alpha,
+ const number *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const number *beta,
+ number *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC)
+{
+ Assert (false, dealii::ExcNotImplemented());
+}
+
+inline void ptran(const int *m,
+ const int *n,
+ const double *alpha,
+ const double *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const double *beta,
+ double *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC)
+{
+ pdtran_(m,n,alpha,A,IA,JA,DESCA,beta,C,IC,JC,DESCC);
+}
+
+inline void ptran(const int *m,
+ const int *n,
+ const float *alpha,
+ const float *A,
+ const int *IA,
+ const int *JA,
+ const int *DESCA,
+ const float *beta,
+ float *C,
+ const int *IC,
+ const int *JC,
+ const int *DESCC)
+{
+ pstran_(m,n,alpha,A,IA,JA,DESCA,beta,C,IC,JC,DESCC);
+}
+
#endif // DEAL_II_WITH_SCALAPACK
#endif // dealii_scalapack_templates_h
template <typename NumberType>
+
+
+
+template <typename NumberType>
+void ScaLAPACKMatrix<NumberType>::add(const ScaLAPACKMatrix<NumberType> &B,
+ const NumberType alpha,
+ const NumberType beta,
+ const bool transpose_B)
+{
+ if (transpose_B)
+ {
+ Assert (n_rows == B.n_columns, ExcDimensionMismatch(n_rows,B.n_columns));
+ Assert (n_columns == B.n_rows, ExcDimensionMismatch(n_columns,B.n_rows));
+ Assert(column_block_size==B.row_block_size,ExcDimensionMismatch(column_block_size,B.row_block_size));
+ Assert(row_block_size==B.column_block_size,ExcDimensionMismatch(row_block_size,B.column_block_size));
+ }
+ else
+ {
+ Assert (n_rows == B.n_rows, ExcDimensionMismatch(n_rows,B.n_rows));
+ Assert (n_columns == B.n_columns, ExcDimensionMismatch(n_columns,B.n_columns));
+ Assert(column_block_size==B.column_block_size,ExcDimensionMismatch(column_block_size,B.column_block_size));
+ Assert(row_block_size==B.row_block_size,ExcDimensionMismatch(row_block_size,B.row_block_size));
+ }
+ Assert(this->grid==B.grid,ExcMessage("The matrices A and B need to have the same process grid"));
+
+ if (this->grid->mpi_process_is_active)
+ {
+ char trans_b = transpose_B ? 'T' : 'N';
+ NumberType *A_loc = (this->values.size()>0) ? &this->values[0] : nullptr;
+ const NumberType *B_loc = (B.values.size()>0) ? &B.values[0] : nullptr;
+
+ pgeadd(&trans_b,&n_rows,&n_columns,
+ &beta,B_loc,&B.submatrix_row,&B.submatrix_column,B.descriptor,
+ &alpha,A_loc,&submatrix_row,&submatrix_column,descriptor);
+ }
+}
+
+
+
+template <typename NumberType>
+void ScaLAPACKMatrix<NumberType>::add(const NumberType a,
+ const ScaLAPACKMatrix<NumberType> &B)
+{
+ add(B,1,a,false);
+}
+
+
+
+template <typename NumberType>
+void ScaLAPACKMatrix<NumberType>::Tadd(const NumberType a,
+ const ScaLAPACKMatrix<NumberType> &B)
+{
+ add(B,1,a,true);
+}
void ScaLAPACKMatrix<NumberType>::compute_cholesky_factorization()
{
Assert (n_columns == n_rows,
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test addition of distributed ScaLAPACKMatrices
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <typeinfo>
+
+
+template <typename NumberType>
+void test(const unsigned int block_size_i, const unsigned int block_size_j)
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ std::cout << std::setprecision(10);
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
+ const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
+ pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ const std::vector<unsigned int> sizes = {{400,500}};
+
+ // test A = alpha A + beta B
+ {
+ FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
+ FullMatrix<NumberType> full_B(sizes[0],sizes[1]);
+ create_random(full_A);
+ create_random(full_B);
+
+ // conditions for block sizes: mb_A=mb_C, nb_B=nb_C, nb_A=mb_B
+ const unsigned int mb_A=block_size_i, nb_A=block_size_j;
+ const unsigned int mb_B=mb_A, nb_B=nb_A;
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
+ ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
+ scalapack_A = full_A;
+ scalapack_B = full_B;
+
+ const NumberType alpha = 1.2, beta = -0.7;
+
+ full_A *= alpha;
+ full_A.add(beta,full_B);
+
+ scalapack_A.add(scalapack_B,alpha,beta,false);
+ FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
+ scalapack_A.copy_to(tmp_full_A);
+
+ pcout << " computing A = alpha A + beta B with"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ") and"
+ << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
+ << full_A.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ }
+ // test A = alpha A + beta B^T
+ {
+ FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
+ FullMatrix<NumberType> full_B(sizes[1],sizes[0]);
+ create_random(full_A);
+ create_random(full_B);
+
+ // conditions for block sizes: mb_A=mb_C, nb_B=nb_C, nb_A=mb_B
+ const unsigned int mb_A=block_size_i, nb_A=block_size_j;
+ const unsigned int mb_B=nb_A, nb_B=mb_A;
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
+ ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
+ scalapack_A = full_A;
+ scalapack_B = full_B;
+
+ const NumberType alpha = 1.2, beta = -0.7;
+
+ full_A *= alpha;
+ FullMatrix<NumberType> full_B_t(sizes[0],sizes[1]);
+ full_B_t.copy_transposed(full_B);
+ full_A.add(beta,full_B_t);
+
+ scalapack_A.add(scalapack_B,alpha,beta,true);
+ FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
+ scalapack_A.copy_to(tmp_full_A);
+
+ pcout << " computing A = alpha A + beta B^T with"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ") and"
+ << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
+ << full_A.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ }
+ pcout << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ const std::vector<unsigned int> blocks_i = {{16,32,64}};
+ const std::vector<unsigned int> blocks_j = {{16,32,64}};
+
+ for (const auto &s : blocks_i)
+ for (const auto &b : blocks_j)
+ test<double>(s,b);
+}
--- /dev/null
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 210.8890477 & 210.8890477 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3218642 & 211.3218642 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.0415011 & 211.0415011 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.963856 & 210.963856 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1814485 & 211.1814485 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.4254302 & 211.4254302 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1982034 & 211.1982034 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7211238 & 211.7211238 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7341662 & 211.7341662 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.390201 & 211.390201 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 212.0358711 & 212.0358711 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.7492073 & 210.7492073 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.3884882 & 211.3884882 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.2484978 & 211.2484978 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.5479046 & 211.5479046 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.5775407 & 211.5775407 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.3749142 & 211.3749142 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3602887 & 211.3602887 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 210.8890477 & 210.8890477 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3218642 & 211.3218642 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.0415011 & 211.0415011 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.963856 & 210.963856 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1814485 & 211.1814485 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.4254302 & 211.4254302 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1982034 & 211.1982034 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7211238 & 211.7211238 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7341662 & 211.7341662 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.390201 & 211.390201 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 212.0358711 & 212.0358711 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.7492073 & 210.7492073 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.3884882 & 211.3884882 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.2484978 & 211.2484978 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.5479046 & 211.5479046 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.5775407 & 211.5775407 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.3749142 & 211.3749142 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3602887 & 211.3602887 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 210.8890477 & 210.8890477 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3218642 & 211.3218642 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.0415011 & 211.0415011 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.963856 & 210.963856 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1814485 & 211.1814485 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.4254302 & 211.4254302 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1982034 & 211.1982034 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7211238 & 211.7211238 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7341662 & 211.7341662 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.390201 & 211.390201 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 212.0358711 & 212.0358711 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.7492073 & 210.7492073 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.3884882 & 211.3884882 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.2484978 & 211.2484978 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.5479046 & 211.5479046 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.5775407 & 211.5775407 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.3749142 & 211.3749142 for d
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3602887 & 211.3602887 for d
+
+