template <typename NumberType>
void scale_columns(ScaLAPACKMatrix<NumberType> &matrix,
- const ArrayView<const NumberType> &factors,
- const bool grid_mpi_process_is_active)
+ const ArrayView<const NumberType> &factors)
{
Assert(matrix.n()==factors.size(),ExcDimensionMismatch(matrix.n(),factors.size()));
- if (grid_mpi_process_is_active)
- for (unsigned int i=0; i<matrix.local_n(); ++i)
- {
- const NumberType s = factors[matrix.global_column(i)];
+ for (unsigned int i=0; i<matrix.local_n(); ++i)
+ {
+ const NumberType s = factors[matrix.global_column(i)];
- for (unsigned int j=0; j<matrix.local_m(); ++j)
- matrix.local_el(j,i) *= s;
- }
+ for (unsigned int j=0; j<matrix.local_m(); ++j)
+ matrix.local_el(j,i) *= s;
+ }
}
template <typename NumberType>
void scale_rows(ScaLAPACKMatrix<NumberType> &matrix,
- const ArrayView<const NumberType> &factors,
- const bool grid_mpi_process_is_active)
+ const ArrayView<const NumberType> &factors)
{
Assert(matrix.m()==factors.size(),ExcDimensionMismatch(matrix.m(),factors.size()));
- if (grid_mpi_process_is_active)
- for (unsigned int i=0; i<matrix.local_m(); ++i)
- {
- const NumberType s = factors[matrix.global_row(i)];
+ for (unsigned int i=0; i<matrix.local_m(); ++i)
+ {
+ const NumberType s = factors[matrix.global_row(i)];
- for (unsigned int j=0; j<matrix.local_n(); ++j)
- matrix.local_el(i,j) *= s;
- }
+ for (unsigned int j=0; j<matrix.local_n(); ++j)
+ matrix.local_el(i,j) *= s;
+ }
}
}
template <class InputVector>
void ScaLAPACKMatrix<NumberType>::scale_columns(const InputVector &factors)
{
- internal::scale_columns(*this, make_array_view(factors),
- this->grid->mpi_process_is_active);
+ if (this->grid->mpi_process_is_active)
+ internal::scale_columns(*this, make_array_view(factors));
}
template <class InputVector>
void ScaLAPACKMatrix<NumberType>::scale_rows(const InputVector &factors)
{
- internal::scale_rows(*this, make_array_view(factors),
- this->grid->mpi_process_is_active);
+ if (this->grid->mpi_process_is_active)
+ internal::scale_rows(*this, make_array_view(factors));
}
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2017 - 2018 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE at
-// the top level of the deal.II distribution.
-//
-// ---------------------------------------------------------------------
-
-#include "../tests.h"
-#include "../lapack/create_matrix.h"
-
-// test multiplication of distributed ScaLAPACKMatrices
-
-#include <deal.II/base/logstream.h>
-#include <deal.II/base/utilities.h>
-#include <deal.II/base/conditional_ostream.h>
-#include <deal.II/base/timer.h>
-#include <deal.II/base/multithread_info.h>
-
-#include <deal.II/lac/scalapack.h>
-
-#include <fstream>
-#include <iostream>
-#include <typeinfo>
-
-
-template <typename NumberType>
-void test()
-{
- MPI_Comm mpi_communicator(MPI_COMM_WORLD);
- const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
- const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
-
- std::cout << std::setprecision(10);
- ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
-
- const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
- const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
- //create 2d process grid
- std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
- pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
-
- const std::vector<unsigned int> sizes = {{300,400,500}};
-
- // test C = b A*B + c C
- {
- FullMatrix<NumberType> full_A(sizes[0],sizes[2]);
- FullMatrix<NumberType> full_B(sizes[2],sizes[1]);
- FullMatrix<NumberType> full_C(sizes[0],sizes[1]);
- create_random(full_A);
- create_random(full_B);
- create_random(full_C);
-
- // conditions for block sizes: mb_A=mb_C, nb_B=nb_C, nb_A=mb_B
- const unsigned int mb_A=32, nb_A=64, nb_B=16;
- const unsigned int mb_C=mb_A, mb_B=nb_A;
- const unsigned int nb_C=nb_B;
-
- ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
- ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
- ScaLAPACKMatrix<NumberType> scalapack_C (full_C.m(),full_C.n(),grid,mb_C,nb_C);
- scalapack_A = full_A;
- scalapack_B = full_B;
- scalapack_C = full_C;
-
- const NumberType b=1.4, c=0.1;
-
- full_A *= b;
- full_C *= c;
- full_A.mmult(full_C,full_B,true);
-
- scalapack_A.mult(b,scalapack_B,c,scalapack_C,false,false);
- FullMatrix<NumberType> tmp_full_C(full_C.m(),full_C.n());
- scalapack_C.copy_to(tmp_full_C);
-
- pcout << " computing C = b A * B + c C with"
- << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << "),"
- << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ") and"
- << " C in R^(" << scalapack_C.m() << "x" << scalapack_C.n() << ")" << std::endl;
- pcout << " norms: " << tmp_full_C.frobenius_norm()<< " & "
- << full_C.frobenius_norm() << " for "
- << typeid(NumberType).name() << std::endl << std::endl;
- }
- // test C = alpha A^T*B + beta C
- {
- FullMatrix<NumberType> full_A(sizes[2],sizes[0]);
- FullMatrix<NumberType> full_B(sizes[2],sizes[1]);
- FullMatrix<NumberType> full_C(sizes[0],sizes[1]);
- create_random(full_A);
- create_random(full_B);
- create_random(full_C);
-
- // conditions for block sizes: nb_A=mb_C, nb_B=nb_C, mb_A=mb_B
- const unsigned int mb_A=32, nb_A=64, nb_B=16;
- const unsigned int mb_B=mb_A, mb_C=nb_A;
- const unsigned int nb_C=nb_B;
-
- ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
- ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
- ScaLAPACKMatrix<NumberType> scalapack_C (full_C.m(),full_C.n(),grid,mb_C,nb_C);
- scalapack_A = full_A;
- scalapack_B = full_B;
- scalapack_C = full_C;
-
- const NumberType b=1.4, c=0.1;
-
- full_A *= b;
- full_C *= c;
- full_A.Tmmult(full_C,full_B,true);
-
- scalapack_A.mult(b,scalapack_B,c,scalapack_C,true,false);
- FullMatrix<NumberType> tmp_full_C(full_C.m(),full_C.n());
- scalapack_C.copy_to(tmp_full_C);
-
- pcout << " computing C = b A^T * B + c C with"
- << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << "),"
- << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ") and"
- << " C in R^(" << scalapack_C.m() << "x" << scalapack_C.n() << ")" << std::endl;
- pcout << " norms: " << tmp_full_C.frobenius_norm()<< " & "
- << full_C.frobenius_norm() << " for "
- << typeid(NumberType).name() << std::endl << std::endl;
- }
- // test C = alpha A * B^T + beta C
- {
- FullMatrix<NumberType> full_A(sizes[0],sizes[2]);
- FullMatrix<NumberType> full_B(sizes[1],sizes[2]);
- FullMatrix<NumberType> full_C(sizes[0],sizes[1]);
- create_random(full_A);
- create_random(full_B);
- create_random(full_C);
-
- // conditions for block sizes: mb_A=mb_C, mb_B=nb_C, nb_A=nb_B
- const unsigned int mb_A=32, nb_A=64, mb_B=16;
- const unsigned int nb_B=nb_A, mb_C=mb_A;
- const unsigned int nb_C=mb_B;
-
- ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
- ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
- ScaLAPACKMatrix<NumberType> scalapack_C (full_C.m(),full_C.n(),grid,mb_C,nb_C);
- scalapack_A = full_A;
- scalapack_B = full_B;
- scalapack_C = full_C;
-
- const NumberType b=1.4, c=0.1;
-
- full_A *= b;
- full_C *= c;
- full_A.mTmult(full_C,full_B,true);
-
- scalapack_A.mult(b,scalapack_B,c,scalapack_C,false,true);
- FullMatrix<NumberType> tmp_full_C(full_C.m(),full_C.n());
- scalapack_C.copy_to(tmp_full_C);
-
- pcout << " computing C = b A * B^T + c C with"
- << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << "),"
- << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ") and"
- << " C in R^(" << scalapack_C.m() << "x" << scalapack_C.n() << ")" << std::endl;
- pcout << " norms: " << tmp_full_C.frobenius_norm()<< " & "
- << full_C.frobenius_norm() << " for "
- << typeid(NumberType).name() << std::endl << std::endl;
- }
- // test C = alpha A^T * B^T + beta C
- {
- FullMatrix<NumberType> full_A(sizes[2],sizes[0]);
- FullMatrix<NumberType> full_B(sizes[1],sizes[2]);
- FullMatrix<NumberType> full_C(sizes[0],sizes[1]);
- create_random(full_A);
- create_random(full_B);
- create_random(full_C);
-
- // conditions for block sizes: nb_A=mb_C, mb_B=nb_C, mb_A=nb_B
- const unsigned int mb_A=32, nb_A=64, mb_B=16;
- const unsigned int nb_B=mb_A, mb_C=nb_A;
- const unsigned int nb_C=mb_B;
-
- ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
- ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
- ScaLAPACKMatrix<NumberType> scalapack_C (full_C.m(),full_C.n(),grid,mb_C,nb_C);
- scalapack_A = full_A;
- scalapack_B = full_B;
- scalapack_C = full_C;
-
- const NumberType b=1.4, c=0.1;
-
- full_A *= b;
- full_C *= c;
- full_A.TmTmult(full_C,full_B,true);
-
- scalapack_A.mult(b,scalapack_B,c,scalapack_C,true,true);
- FullMatrix<NumberType> tmp_full_C(full_C.m(),full_C.n());
- scalapack_C.copy_to(tmp_full_C);
-
- pcout << " computing C = b A^T * B^T + c C with"
- << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << "),"
- << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ") and"
- << " C in R^(" << scalapack_C.m() << "x" << scalapack_C.n() << ")" << std::endl;
- pcout << " norms: " << tmp_full_C.frobenius_norm()<< " & "
- << full_C.frobenius_norm() << " for "
- << typeid(NumberType).name() << std::endl << std::endl;
- }
- pcout << std::endl;
-}
-
-
-
-int main (int argc,char **argv)
-{
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- test<double>();
-}
+++ /dev/null
-2D process grid: 1x1
-
- computing C = b A * B + c C with A in R^(300x500), B in R^(500x400) and C in R^(300x400)
- norms: 60787.8449 & 60787.8449 for d
-
- computing C = b A^T * B + c C with A in R^(500x300), B in R^(500x400) and C in R^(300x400)
- norms: 60655.07764 & 60655.07764 for d
-
- computing C = b A * B^T + c C with A in R^(300x500), B in R^(400x500) and C in R^(300x400)
- norms: 60707.53954 & 60707.53954 for d
-
- computing C = b A^T * B^T + c C with A in R^(500x300), B in R^(400x500) and C in R^(300x400)
- norms: 60757.09659 & 60757.09659 for d
-
-
+++ /dev/null
-2D process grid: 3x3
-
- computing C = b A * B + c C with A in R^(300x500), B in R^(500x400) and C in R^(300x400)
- norms: 60787.8449 & 60787.8449 for d
-
- computing C = b A^T * B + c C with A in R^(500x300), B in R^(500x400) and C in R^(300x400)
- norms: 60655.07764 & 60655.07764 for d
-
- computing C = b A * B^T + c C with A in R^(300x500), B in R^(400x500) and C in R^(300x400)
- norms: 60707.53954 & 60707.53954 for d
-
- computing C = b A^T * B^T + c C with A in R^(500x300), B in R^(400x500) and C in R^(300x400)
- norms: 60757.09659 & 60757.09659 for d
-
-
+++ /dev/null
-2D process grid: 3x3
-
- computing C = b A * B + c C with A in R^(300x500), B in R^(500x400) and C in R^(300x400)
- norms: 60787.8449 & 60787.8449 for d
-
- computing C = b A^T * B + c C with A in R^(500x300), B in R^(500x400) and C in R^(300x400)
- norms: 60655.07764 & 60655.07764 for d
-
- computing C = b A * B^T + c C with A in R^(300x500), B in R^(400x500) and C in R^(300x400)
- norms: 60707.53954 & 60707.53954 for d
-
- computing C = b A^T * B^T + c C with A in R^(500x300), B in R^(400x500) and C in R^(300x400)
- norms: 60757.09659 & 60757.09659 for d
-
-
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test multiplication of distributed ScaLAPACKMatrices: C = b A*B + c C
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <typeinfo>
+
+
+template <typename NumberType>
+void test()
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ std::cout << std::setprecision(10);
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
+ const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
+ pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ const std::vector<unsigned int> sizes = {{300,400,500}};
+
+ FullMatrix<NumberType> full_A(sizes[0],sizes[2]);
+ FullMatrix<NumberType> full_B(sizes[2],sizes[1]);
+ FullMatrix<NumberType> full_C(sizes[0],sizes[1]);
+ create_random(full_A);
+ create_random(full_B);
+ create_random(full_C);
+
+ // conditions for block sizes: mb_A=mb_C, nb_B=nb_C, nb_A=mb_B
+ const unsigned int mb_A=32, nb_A=64, nb_B=16;
+ const unsigned int mb_C=mb_A, mb_B=nb_A;
+ const unsigned int nb_C=nb_B;
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
+ ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
+ ScaLAPACKMatrix<NumberType> scalapack_C (full_C.m(),full_C.n(),grid,mb_C,nb_C);
+ scalapack_A = full_A;
+ scalapack_B = full_B;
+ scalapack_C = full_C;
+
+ const NumberType b=1.4, c=0.1;
+
+ full_A *= b;
+ full_C *= c;
+ full_A.mmult(full_C,full_B,true);
+
+ scalapack_A.mult(b,scalapack_B,c,scalapack_C,false,false);
+ FullMatrix<NumberType> tmp_full_C(full_C.m(),full_C.n());
+ scalapack_C.copy_to(tmp_full_C);
+
+ pcout << " computing C = b A * B + c C with"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << "),"
+ << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ") and"
+ << " C in R^(" << scalapack_C.m() << "x" << scalapack_C.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_C.frobenius_norm()<< " & "
+ << full_C.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ pcout << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ test<double>();
+}
--- /dev/null
+2D process grid: 1x1
+
+ computing C = b A * B + c C with A in R^(300x500), B in R^(500x400) and C in R^(300x400)
+ norms: 60787.8449 & 60787.8449 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing C = b A * B + c C with A in R^(300x500), B in R^(500x400) and C in R^(300x400)
+ norms: 60787.8449 & 60787.8449 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing C = b A * B + c C with A in R^(300x500), B in R^(500x400) and C in R^(300x400)
+ norms: 60787.8449 & 60787.8449 for d
+
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test multiplication of distributed ScaLAPACKMatrices: C = alpha A^T*B + beta C
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <typeinfo>
+
+
+template <typename NumberType>
+void test()
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ std::cout << std::setprecision(10);
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
+ const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
+ pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ const std::vector<unsigned int> sizes = {{300,400,500}};
+
+ FullMatrix<NumberType> full_A(sizes[2],sizes[0]);
+ FullMatrix<NumberType> full_B(sizes[2],sizes[1]);
+ FullMatrix<NumberType> full_C(sizes[0],sizes[1]);
+ create_random(full_A);
+ create_random(full_B);
+ create_random(full_C);
+
+ // conditions for block sizes: nb_A=mb_C, nb_B=nb_C, mb_A=mb_B
+ const unsigned int mb_A=32, nb_A=64, nb_B=16;
+ const unsigned int mb_B=mb_A, mb_C=nb_A;
+ const unsigned int nb_C=nb_B;
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
+ ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
+ ScaLAPACKMatrix<NumberType> scalapack_C (full_C.m(),full_C.n(),grid,mb_C,nb_C);
+ scalapack_A = full_A;
+ scalapack_B = full_B;
+ scalapack_C = full_C;
+
+ const NumberType b=1.4, c=0.1;
+
+ full_A *= b;
+ full_C *= c;
+ full_A.Tmmult(full_C,full_B,true);
+
+ scalapack_A.mult(b,scalapack_B,c,scalapack_C,true,false);
+ FullMatrix<NumberType> tmp_full_C(full_C.m(),full_C.n());
+ scalapack_C.copy_to(tmp_full_C);
+
+ pcout << " computing C = b A^T * B + c C with"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << "),"
+ << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ") and"
+ << " C in R^(" << scalapack_C.m() << "x" << scalapack_C.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_C.frobenius_norm()<< " & "
+ << full_C.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ pcout << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ test<double>();
+}
--- /dev/null
+2D process grid: 1x1
+
+ computing C = b A^T * B + c C with A in R^(500x300), B in R^(500x400) and C in R^(300x400)
+ norms: 60784.47026 & 60784.47026 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing C = b A^T * B + c C with A in R^(500x300), B in R^(500x400) and C in R^(300x400)
+ norms: 60784.47026 & 60784.47026 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing C = b A^T * B + c C with A in R^(500x300), B in R^(500x400) and C in R^(300x400)
+ norms: 60784.47026 & 60784.47026 for d
+
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test multiplication of distributed ScaLAPACKMatrices: C = alpha A * B^T + beta C
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <typeinfo>
+
+
+template <typename NumberType>
+void test()
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ std::cout << std::setprecision(10);
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
+ const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
+ pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ const std::vector<unsigned int> sizes = {{300,400,500}};
+
+ FullMatrix<NumberType> full_A(sizes[0],sizes[2]);
+ FullMatrix<NumberType> full_B(sizes[1],sizes[2]);
+ FullMatrix<NumberType> full_C(sizes[0],sizes[1]);
+ create_random(full_A);
+ create_random(full_B);
+ create_random(full_C);
+
+ // conditions for block sizes: mb_A=mb_C, mb_B=nb_C, nb_A=nb_B
+ const unsigned int mb_A=32, nb_A=64, mb_B=16;
+ const unsigned int nb_B=nb_A, mb_C=mb_A;
+ const unsigned int nb_C=mb_B;
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
+ ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
+ ScaLAPACKMatrix<NumberType> scalapack_C (full_C.m(),full_C.n(),grid,mb_C,nb_C);
+ scalapack_A = full_A;
+ scalapack_B = full_B;
+ scalapack_C = full_C;
+
+ const NumberType b=1.4, c=0.1;
+
+ full_A *= b;
+ full_C *= c;
+ full_A.mTmult(full_C,full_B,true);
+
+ scalapack_A.mult(b,scalapack_B,c,scalapack_C,false,true);
+ FullMatrix<NumberType> tmp_full_C(full_C.m(),full_C.n());
+ scalapack_C.copy_to(tmp_full_C);
+
+ pcout << " computing C = b A * B^T + c C with"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << "),"
+ << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ") and"
+ << " C in R^(" << scalapack_C.m() << "x" << scalapack_C.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_C.frobenius_norm()<< " & "
+ << full_C.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ pcout << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ test<double>();
+}
--- /dev/null
+2D process grid: 1x1
+
+ computing C = b A * B^T + c C with A in R^(300x500), B in R^(400x500) and C in R^(300x400)
+ norms: 60781.44758 & 60781.44758 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing C = b A * B^T + c C with A in R^(300x500), B in R^(400x500) and C in R^(300x400)
+ norms: 60781.44758 & 60781.44758 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing C = b A * B^T + c C with A in R^(300x500), B in R^(400x500) and C in R^(300x400)
+ norms: 60781.44758 & 60781.44758 for d
+
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test multiplication of distributed ScaLAPACKMatrices: C = alpha A^T * B^T + beta C
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <typeinfo>
+
+
+template <typename NumberType>
+void test()
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ std::cout << std::setprecision(10);
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
+ const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
+ pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ const std::vector<unsigned int> sizes = {{300,400,500}};
+
+ FullMatrix<NumberType> full_A(sizes[2],sizes[0]);
+ FullMatrix<NumberType> full_B(sizes[1],sizes[2]);
+ FullMatrix<NumberType> full_C(sizes[0],sizes[1]);
+ create_random(full_A);
+ create_random(full_B);
+ create_random(full_C);
+
+ // conditions for block sizes: nb_A=mb_C, mb_B=nb_C, mb_A=nb_B
+ const unsigned int mb_A=32, nb_A=64, mb_B=16;
+ const unsigned int nb_B=mb_A, mb_C=nb_A;
+ const unsigned int nb_C=mb_B;
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
+ ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
+ ScaLAPACKMatrix<NumberType> scalapack_C (full_C.m(),full_C.n(),grid,mb_C,nb_C);
+ scalapack_A = full_A;
+ scalapack_B = full_B;
+ scalapack_C = full_C;
+
+ const NumberType b=1.4, c=0.1;
+
+ full_A *= b;
+ full_C *= c;
+ full_A.TmTmult(full_C,full_B,true);
+
+ scalapack_A.mult(b,scalapack_B,c,scalapack_C,true,true);
+ FullMatrix<NumberType> tmp_full_C(full_C.m(),full_C.n());
+ scalapack_C.copy_to(tmp_full_C);
+
+ pcout << " computing C = b A^T * B^T + c C with"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << "),"
+ << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ") and"
+ << " C in R^(" << scalapack_C.m() << "x" << scalapack_C.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_C.frobenius_norm()<< " & "
+ << full_C.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ pcout << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ test<double>();
+}
--- /dev/null
+2D process grid: 1x1
+
+ computing C = b A^T * B^T + c C with A in R^(500x300), B in R^(400x500) and C in R^(300x400)
+ norms: 60781.20992 & 60781.20992 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing C = b A^T * B^T + c C with A in R^(500x300), B in R^(400x500) and C in R^(300x400)
+ norms: 60781.20992 & 60781.20992 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing C = b A^T * B^T + c C with A in R^(500x300), B in R^(400x500) and C in R^(300x400)
+ norms: 60781.20992 & 60781.20992 for d
+
+
+++ /dev/null
-// ---------------------------------------------------------------------
-//
-// Copyright (C) 2017 - 2018 by the deal.II authors
-//
-// This file is part of the deal.II library.
-//
-// The deal.II library is free software; you can use it, redistribute
-// it, and/or modify it under the terms of the GNU Lesser General
-// Public License as published by the Free Software Foundation; either
-// version 2.1 of the License, or (at your option) any later version.
-// The full text of the license can be found in the file LICENSE at
-// the top level of the deal.II distribution.
-//
-// ---------------------------------------------------------------------
-
-#include "../tests.h"
-#include "../lapack/create_matrix.h"
-
-// test addition of distributed ScaLAPACKMatrices
-
-#include <deal.II/base/logstream.h>
-#include <deal.II/base/utilities.h>
-#include <deal.II/base/conditional_ostream.h>
-#include <deal.II/base/timer.h>
-#include <deal.II/base/multithread_info.h>
-
-#include <deal.II/lac/scalapack.h>
-
-#include <fstream>
-#include <iostream>
-#include <typeinfo>
-
-
-template <typename NumberType>
-void test(const unsigned int block_size_i, const unsigned int block_size_j)
-{
- MPI_Comm mpi_communicator(MPI_COMM_WORLD);
- const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
- const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
-
- std::cout << std::setprecision(10);
- ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
-
- const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
- const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
- //create 2d process grid
- std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
- pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
-
- const std::vector<unsigned int> sizes = {{400,500}};
-
- // test A = alpha A + beta B
- {
- FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
- FullMatrix<NumberType> full_B(sizes[0],sizes[1]);
- create_random(full_A);
- create_random(full_B);
-
- // conditions for block sizes: mb_A=mb_C, nb_B=nb_C, nb_A=mb_B
- const unsigned int mb_A=block_size_i, nb_A=block_size_j;
- const unsigned int mb_B=mb_A, nb_B=nb_A;
-
- ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
- ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
- scalapack_A = full_A;
- scalapack_B = full_B;
-
- const NumberType alpha = 1.2, beta = -0.7;
-
- full_A *= alpha;
- full_A.add(beta,full_B);
-
- scalapack_A.add(scalapack_B,alpha,beta,false);
- FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
- scalapack_A.copy_to(tmp_full_A);
-
- pcout << " computing A = alpha A + beta B with"
- << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ") and"
- << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ")" << std::endl;
- pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
- << full_A.frobenius_norm() << " for "
- << typeid(NumberType).name() << std::endl << std::endl;
- }
- // test A = alpha A + beta B^T
- {
- FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
- FullMatrix<NumberType> full_B(sizes[1],sizes[0]);
- create_random(full_A);
- create_random(full_B);
-
- // conditions for block sizes: mb_A=mb_C, nb_B=nb_C, nb_A=mb_B
- const unsigned int mb_A=block_size_i, nb_A=block_size_j;
- const unsigned int mb_B=nb_A, nb_B=mb_A;
-
- ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
- ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
- scalapack_A = full_A;
- scalapack_B = full_B;
-
- const NumberType alpha = 1.2, beta = -0.7;
-
- full_A *= alpha;
- FullMatrix<NumberType> full_B_t(sizes[0],sizes[1]);
- full_B_t.copy_transposed(full_B);
- full_A.add(beta,full_B_t);
-
- scalapack_A.add(scalapack_B,alpha,beta,true);
- FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
- scalapack_A.copy_to(tmp_full_A);
-
- pcout << " computing A = alpha A + beta B^T with"
- << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ") and"
- << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ")" << std::endl;
- pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
- << full_A.frobenius_norm() << " for "
- << typeid(NumberType).name() << std::endl << std::endl;
- }
- pcout << std::endl;
-}
-
-
-
-int main (int argc,char **argv)
-{
- Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
-
- const std::vector<unsigned int> blocks_i = {{16,32,64}};
- const std::vector<unsigned int> blocks_j = {{16,32,64}};
-
- for (const auto &s : blocks_i)
- for (const auto &b : blocks_j)
- test<double>(s,b);
-}
+++ /dev/null
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 210.8890477 & 210.8890477 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.3218642 & 211.3218642 for d
-
-
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.0415011 & 211.0415011 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 210.963856 & 210.963856 for d
-
-
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.1814485 & 211.1814485 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.4254302 & 211.4254302 for d
-
-
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.1982034 & 211.1982034 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.7211238 & 211.7211238 for d
-
-
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.7341662 & 211.7341662 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.390201 & 211.390201 for d
-
-
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 212.0358711 & 212.0358711 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 210.7492073 & 210.7492073 for d
-
-
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.3884882 & 211.3884882 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.2484978 & 211.2484978 for d
-
-
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.5479046 & 211.5479046 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.5775407 & 211.5775407 for d
-
-
-2D process grid: 1x1
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.3749142 & 211.3749142 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.3602887 & 211.3602887 for d
-
-
+++ /dev/null
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 210.8890477 & 210.8890477 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.3218642 & 211.3218642 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.0415011 & 211.0415011 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 210.963856 & 210.963856 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.1814485 & 211.1814485 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.4254302 & 211.4254302 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.1982034 & 211.1982034 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.7211238 & 211.7211238 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.7341662 & 211.7341662 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.390201 & 211.390201 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 212.0358711 & 212.0358711 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 210.7492073 & 210.7492073 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.3884882 & 211.3884882 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.2484978 & 211.2484978 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.5479046 & 211.5479046 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.5775407 & 211.5775407 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.3749142 & 211.3749142 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.3602887 & 211.3602887 for d
-
-
+++ /dev/null
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 210.8890477 & 210.8890477 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.3218642 & 211.3218642 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.0415011 & 211.0415011 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 210.963856 & 210.963856 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.1814485 & 211.1814485 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.4254302 & 211.4254302 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.1982034 & 211.1982034 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.7211238 & 211.7211238 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.7341662 & 211.7341662 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.390201 & 211.390201 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 212.0358711 & 212.0358711 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 210.7492073 & 210.7492073 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.3884882 & 211.3884882 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.2484978 & 211.2484978 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.5479046 & 211.5479046 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.5775407 & 211.5775407 for d
-
-
-2D process grid: 3x3
-
- computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
- norms: 211.3749142 & 211.3749142 for d
-
- computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
- norms: 211.3602887 & 211.3602887 for d
-
-
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test addition of distributed ScaLAPACKMatrices: A = alpha A + beta B
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <typeinfo>
+
+
+template <typename NumberType>
+void test(const unsigned int block_size_i, const unsigned int block_size_j)
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ std::cout << std::setprecision(10);
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
+ const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
+ pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ const std::vector<unsigned int> sizes = {{400,500}};
+
+ FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
+ FullMatrix<NumberType> full_B(sizes[0],sizes[1]);
+ create_random(full_A);
+ create_random(full_B);
+
+ // conditions for block sizes: mb_A=mb_C, nb_B=nb_C, nb_A=mb_B
+ const unsigned int mb_A=block_size_i, nb_A=block_size_j;
+ const unsigned int mb_B=mb_A, nb_B=nb_A;
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
+ ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
+ scalapack_A = full_A;
+ scalapack_B = full_B;
+
+ const NumberType alpha = 1.2, beta = -0.7;
+
+ full_A *= alpha;
+ full_A.add(beta,full_B);
+
+ scalapack_A.add(scalapack_B,alpha,beta,false);
+ FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
+ scalapack_A.copy_to(tmp_full_A);
+
+ pcout << " computing A = alpha A + beta B with"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ") and"
+ << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
+ << full_A.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ pcout << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ const std::vector<unsigned int> blocks_i = {{16,32,64}};
+ const std::vector<unsigned int> blocks_j = {{16,32,64}};
+
+ for (const auto &s : blocks_i)
+ for (const auto &b : blocks_j)
+ test<double>(s,b);
+}
--- /dev/null
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 210.8890477 & 210.8890477 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.279256 & 211.279256 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.0415011 & 211.0415011 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1717475 & 211.1717475 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1814485 & 211.1814485 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.548357 & 211.548357 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1982034 & 211.1982034 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7897812 & 211.7897812 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7341662 & 211.7341662 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 210.8890477 & 210.8890477 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.279256 & 211.279256 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.0415011 & 211.0415011 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1717475 & 211.1717475 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1814485 & 211.1814485 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.548357 & 211.548357 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1982034 & 211.1982034 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7897812 & 211.7897812 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7341662 & 211.7341662 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 210.8890477 & 210.8890477 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.279256 & 211.279256 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.0415011 & 211.0415011 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1717475 & 211.1717475 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1814485 & 211.1814485 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.548357 & 211.548357 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.1982034 & 211.1982034 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7897812 & 211.7897812 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B with A in R^(400x500) and B in R^(400x500)
+ norms: 211.7341662 & 211.7341662 for d
+
+
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test addition of distributed ScaLAPACKMatrices: A = alpha A + beta B^T
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <typeinfo>
+
+
+template <typename NumberType>
+void test(const unsigned int block_size_i, const unsigned int block_size_j)
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ std::cout << std::setprecision(10);
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
+ const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
+ //create 2d process grid
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,proc_rows,proc_columns);
+ pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ const std::vector<unsigned int> sizes = {{400,500}};
+
+ FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
+ FullMatrix<NumberType> full_B(sizes[1],sizes[0]);
+ create_random(full_A);
+ create_random(full_B);
+
+ // conditions for block sizes: mb_A=mb_C, nb_B=nb_C, nb_A=mb_B
+ const unsigned int mb_A=block_size_i, nb_A=block_size_j;
+ const unsigned int mb_B=nb_A, nb_B=mb_A;
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,mb_A,nb_A);
+ ScaLAPACKMatrix<NumberType> scalapack_B (full_B.m(),full_B.n(),grid,mb_B,nb_B);
+ scalapack_A = full_A;
+ scalapack_B = full_B;
+
+ const NumberType alpha = 1.2, beta = -0.7;
+
+ full_A *= alpha;
+ FullMatrix<NumberType> full_B_t(sizes[0],sizes[1]);
+ full_B_t.copy_transposed(full_B);
+ full_A.add(beta,full_B_t);
+
+ scalapack_A.add(scalapack_B,alpha,beta,true);
+ FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
+ scalapack_A.copy_to(tmp_full_A);
+
+ pcout << " computing A = alpha A + beta B^T with"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ") and"
+ << " B in R^(" << scalapack_B.m() << "x" << scalapack_B.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
+ << full_A.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ pcout << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ const std::vector<unsigned int> blocks_i = {{16,32,64}};
+ const std::vector<unsigned int> blocks_j = {{16,32,64}};
+
+ for (const auto &s : blocks_i)
+ for (const auto &b : blocks_j)
+ test<double>(s,b);
+}
--- /dev/null
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.163862 & 211.163862 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3218642 & 211.3218642 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.184002 & 211.184002 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.963856 & 210.963856 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.5403408 & 211.5403408 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.4254302 & 211.4254302 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.278436 & 211.278436 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7211238 & 211.7211238 for d
+
+
+2D process grid: 1x1
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7476249 & 211.7476249 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.163862 & 211.163862 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3218642 & 211.3218642 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.184002 & 211.184002 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.963856 & 210.963856 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.5403408 & 211.5403408 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.4254302 & 211.4254302 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.278436 & 211.278436 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7211238 & 211.7211238 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7476249 & 211.7476249 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.163862 & 211.163862 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.3218642 & 211.3218642 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.184002 & 211.184002 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 210.963856 & 210.963856 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.5403408 & 211.5403408 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.4254302 & 211.4254302 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.278436 & 211.278436 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7211238 & 211.7211238 for d
+
+
+2D process grid: 3x3
+
+ computing A = alpha A + beta B^T with A in R^(400x500) and B in R^(500x400)
+ norms: 211.7476249 & 211.7476249 for d
+
+
+++ /dev/null
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3654.285795 & 3654.285795 for d
-
- Column scaling for A in R^(400x500)
- norms: 4090.694598 & 4090.694598 for d
-
-
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3654.738726 & 3654.738726 for d
-
- Column scaling for A in R^(400x500)
- norms: 4085.273405 & 4085.273405 for d
-
-
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3660.42714 & 3660.42714 for d
-
- Column scaling for A in R^(400x500)
- norms: 4082.735765 & 4082.735765 for d
-
-
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3654.388891 & 3654.388891 for d
-
- Column scaling for A in R^(400x500)
- norms: 4084.61645 & 4084.61645 for d
-
-
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3655.261484 & 3655.261484 for d
-
- Column scaling for A in R^(400x500)
- norms: 4084.19574 & 4084.19574 for d
-
-
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3655.547554 & 3655.547554 for d
-
- Column scaling for A in R^(400x500)
- norms: 4088.843423 & 4088.843423 for d
-
-
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3654.096322 & 3654.096322 for d
-
- Column scaling for A in R^(400x500)
- norms: 4083.807486 & 4083.807486 for d
-
-
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3662.0357 & 3662.0357 for d
-
- Column scaling for A in R^(400x500)
- norms: 4089.328538 & 4089.328538 for d
-
-
-2D process grid: 1x1
-
- Row scaling for A in R^(400x500)
- norms: 3663.288472 & 3663.288472 for d
-
- Column scaling for A in R^(400x500)
- norms: 4081.172517 & 4081.172517 for d
-
-
+++ /dev/null
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3654.285795 & 3654.285795 for d
-
- Column scaling for A in R^(400x500)
- norms: 4090.694598 & 4090.694598 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3654.738726 & 3654.738726 for d
-
- Column scaling for A in R^(400x500)
- norms: 4085.273405 & 4085.273405 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3660.42714 & 3660.42714 for d
-
- Column scaling for A in R^(400x500)
- norms: 4082.735765 & 4082.735765 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3654.388891 & 3654.388891 for d
-
- Column scaling for A in R^(400x500)
- norms: 4084.61645 & 4084.61645 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3655.261484 & 3655.261484 for d
-
- Column scaling for A in R^(400x500)
- norms: 4084.19574 & 4084.19574 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3655.547554 & 3655.547554 for d
-
- Column scaling for A in R^(400x500)
- norms: 4088.843423 & 4088.843423 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3654.096322 & 3654.096322 for d
-
- Column scaling for A in R^(400x500)
- norms: 4083.807486 & 4083.807486 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3662.0357 & 3662.0357 for d
-
- Column scaling for A in R^(400x500)
- norms: 4089.328538 & 4089.328538 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3663.288472 & 3663.288472 for d
-
- Column scaling for A in R^(400x500)
- norms: 4081.172517 & 4081.172517 for d
-
-
+++ /dev/null
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3654.285795 & 3654.285795 for d
-
- Column scaling for A in R^(400x500)
- norms: 4090.694598 & 4090.694598 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3654.738726 & 3654.738726 for d
-
- Column scaling for A in R^(400x500)
- norms: 4085.273405 & 4085.273405 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3660.42714 & 3660.42714 for d
-
- Column scaling for A in R^(400x500)
- norms: 4082.735765 & 4082.735765 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3654.388891 & 3654.388891 for d
-
- Column scaling for A in R^(400x500)
- norms: 4084.61645 & 4084.61645 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3655.261484 & 3655.261484 for d
-
- Column scaling for A in R^(400x500)
- norms: 4084.19574 & 4084.19574 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3655.547554 & 3655.547554 for d
-
- Column scaling for A in R^(400x500)
- norms: 4088.843423 & 4088.843423 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3654.096322 & 3654.096322 for d
-
- Column scaling for A in R^(400x500)
- norms: 4083.807486 & 4083.807486 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3662.0357 & 3662.0357 for d
-
- Column scaling for A in R^(400x500)
- norms: 4089.328538 & 4089.328538 for d
-
-
-2D process grid: 3x3
-
- Row scaling for A in R^(400x500)
- norms: 3663.288472 & 3663.288472 for d
-
- Column scaling for A in R^(400x500)
- norms: 4081.172517 & 4081.172517 for d
-
-
--- /dev/null
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 - 2018 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test scaling of rows of distributed ScaLAPACKMatrices
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+#include <deal.II/base/array_view.h>
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+#include <typeinfo>
+
+
+template <typename NumberType>
+void test(const unsigned int block_size_i, const unsigned int block_size_j)
+{
+ MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+ const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+ const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+ std::cout << std::setprecision(10);
+ ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+ const unsigned int proc_rows = std::floor(std::sqrt(n_mpi_processes));
+ const unsigned int proc_columns = std::floor(n_mpi_processes/proc_rows);
+ //create 2d process grid
+ const std::vector<unsigned int> sizes = {{400,500}};
+ std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,sizes[0],sizes[1],block_size_i,block_size_i);
+ pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
+
+ // test scaling of rows
+ FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
+ create_random(full_A);
+
+ std::vector<NumberType> scaling_factors(full_A.m());
+ for (unsigned int i=0; i<scaling_factors.size(); ++i)
+ scaling_factors[i] = std::sqrt(i+1);
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,block_size_i,block_size_j);
+ scalapack_A = full_A;
+ const ArrayView<NumberType> view_rows(scaling_factors);
+ scalapack_A.scale_rows(view_rows);
+ FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
+ scalapack_A.copy_to(tmp_full_A);
+
+ for (unsigned int i=0; i<full_A.m(); ++i)
+ for (unsigned int j=0; j<full_A.n(); ++j)
+ full_A(i,j) *= scaling_factors[i];
+
+ pcout << " Row scaling for"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
+ << full_A.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
+ pcout << std::endl;
+}
+
+
+
+int main (int argc,char **argv)
+{
+ Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+ const std::vector<unsigned int> blocks_i = {{16,32,64}};
+ const std::vector<unsigned int> blocks_j = {{16,32,64}};
+
+ for (const auto &s : blocks_i)
+ for (const auto &b : blocks_j)
+ test<double>(s,b);
+}
--- /dev/null
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.285795 & 3654.285795 for d
+
+
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3660.063508 & 3660.063508 for d
+
+
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.738726 & 3654.738726 for d
+
+
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.280104 & 3654.280104 for d
+
+
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3660.42714 & 3660.42714 for d
+
+
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3651.8011 & 3651.8011 for d
+
+
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.388891 & 3654.388891 for d
+
+
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3655.950825 & 3655.950825 for d
+
+
+2D process grid: 1x1
+
+ Row scaling for A in R^(400x500)
+ norms: 3655.261484 & 3655.261484 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.285795 & 3654.285795 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3660.063508 & 3660.063508 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.738726 & 3654.738726 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.280104 & 3654.280104 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3660.42714 & 3660.42714 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3651.8011 & 3651.8011 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.388891 & 3654.388891 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3655.950825 & 3655.950825 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3655.261484 & 3655.261484 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.285795 & 3654.285795 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3660.063508 & 3660.063508 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.738726 & 3654.738726 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.280104 & 3654.280104 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3660.42714 & 3660.42714 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3651.8011 & 3651.8011 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3654.388891 & 3654.388891 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3655.950825 & 3655.950825 for d
+
+
+2D process grid: 3x3
+
+ Row scaling for A in R^(400x500)
+ norms: 3655.261484 & 3655.261484 for d
+
+
#include "../tests.h"
#include "../lapack/create_matrix.h"
-// test scaling of rows and columns of distributed ScaLAPACKMatrices
+// test scaling of columns of distributed ScaLAPACKMatrices
#include <deal.II/base/logstream.h>
#include <deal.II/base/utilities.h>
std::shared_ptr<Utilities::MPI::ProcessGrid> grid = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,sizes[0],sizes[1],block_size_i,block_size_i);
pcout << "2D process grid: " << grid->get_process_grid_rows() << "x" << grid->get_process_grid_columns() << std::endl << std::endl;
- // test scaling of rows
- {
- FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
- create_random(full_A);
-
- std::vector<NumberType> scaling_factors(full_A.m());
- for (unsigned int i=0; i<scaling_factors.size(); ++i)
- scaling_factors[i] = std::sqrt(i+1);
-
- ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,block_size_i,block_size_j);
- scalapack_A = full_A;
- const ArrayView<NumberType> view_rows(scaling_factors);
- scalapack_A.scale_rows(view_rows);
- FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
- scalapack_A.copy_to(tmp_full_A);
-
- for (unsigned int i=0; i<full_A.m(); ++i)
- for (unsigned int j=0; j<full_A.n(); ++j)
- full_A(i,j) *= scaling_factors[i];
-
- pcout << " Row scaling for"
- << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ")" << std::endl;
- pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
- << full_A.frobenius_norm() << " for "
- << typeid(NumberType).name() << std::endl << std::endl;
- }
// test scaling of columns
- {
- FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
- create_random(full_A);
-
- std::vector<NumberType> scaling_factors(full_A.n());
- for (unsigned int i=0; i<scaling_factors.size(); ++i)
- scaling_factors[i] = std::sqrt(i+1);
-
- ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,block_size_i,block_size_j);
- scalapack_A = full_A;
- const ArrayView<NumberType> view_columns(scaling_factors);
- scalapack_A.scale_columns(view_columns);
- FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
- scalapack_A.copy_to(tmp_full_A);
-
- for (unsigned int i=0; i<full_A.m(); ++i)
- for (unsigned int j=0; j<full_A.n(); ++j)
- full_A(i,j) *= scaling_factors[j];
-
- pcout << " Column scaling for"
- << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ")" << std::endl;
- pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
- << full_A.frobenius_norm() << " for "
- << typeid(NumberType).name() << std::endl << std::endl;
- }
+ FullMatrix<NumberType> full_A(sizes[0],sizes[1]);
+ create_random(full_A);
+
+ std::vector<NumberType> scaling_factors(full_A.n());
+ for (unsigned int i=0; i<scaling_factors.size(); ++i)
+ scaling_factors[i] = std::sqrt(i+1);
+
+ ScaLAPACKMatrix<NumberType> scalapack_A (full_A.m(),full_A.n(),grid,block_size_i,block_size_j);
+ scalapack_A = full_A;
+ const ArrayView<NumberType> view_columns(scaling_factors);
+ scalapack_A.scale_columns(view_columns);
+ FullMatrix<NumberType> tmp_full_A(scalapack_A.m(),scalapack_A.n());
+ scalapack_A.copy_to(tmp_full_A);
+
+ for (unsigned int i=0; i<full_A.m(); ++i)
+ for (unsigned int j=0; j<full_A.n(); ++j)
+ full_A(i,j) *= scaling_factors[j];
+
+ pcout << " Column scaling for"
+ << " A in R^(" << scalapack_A.m() << "x" << scalapack_A.n() << ")" << std::endl;
+ pcout << " norms: " << tmp_full_A.frobenius_norm()<< " & "
+ << full_A.frobenius_norm() << " for "
+ << typeid(NumberType).name() << std::endl << std::endl;
pcout << std::endl;
}
--- /dev/null
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4081.763525 & 4081.763525 for d
+
+
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4090.694598 & 4090.694598 for d
+
+
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4083.255058 & 4083.255058 for d
+
+
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4085.273405 & 4085.273405 for d
+
+
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4086.46816 & 4086.46816 for d
+
+
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4082.735765 & 4082.735765 for d
+
+
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4081.892068 & 4081.892068 for d
+
+
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4084.61645 & 4084.61645 for d
+
+
+2D process grid: 1x1
+
+ Column scaling for A in R^(400x500)
+ norms: 4084.481512 & 4084.481512 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4081.763525 & 4081.763525 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4090.694598 & 4090.694598 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4083.255058 & 4083.255058 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4085.273405 & 4085.273405 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4086.46816 & 4086.46816 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4082.735765 & 4082.735765 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4081.892068 & 4081.892068 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4084.61645 & 4084.61645 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4084.481512 & 4084.481512 for d
+
+
--- /dev/null
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4081.763525 & 4081.763525 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4090.694598 & 4090.694598 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4083.255058 & 4083.255058 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4085.273405 & 4085.273405 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4086.46816 & 4086.46816 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4082.735765 & 4082.735765 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4081.892068 & 4081.892068 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4084.61645 & 4084.61645 for d
+
+
+2D process grid: 3x3
+
+ Column scaling for A in R^(400x500)
+ norms: 4084.481512 & 4084.481512 for d
+
+