]> https://gitweb.dealii.org/ - dealii.git/commitdiff
add ScaLAPACKMAtrix::copy_to(ScaLAPACKMatrix &dest) 5704/head
authorBenjamin Brands <benjamin.brands@fau.de>
Wed, 10 Jan 2018 10:47:52 +0000 (11:47 +0100)
committerBenjamin Brands <benjamin.brands@fau.de>
Wed, 10 Jan 2018 10:47:52 +0000 (11:47 +0100)
include/deal.II/lac/scalapack.h
source/lac/scalapack.cc
tests/scalapack/scalapack_08.cc [new file with mode: 0644]
tests/scalapack/scalapack_08.mpirun=1.output [new file with mode: 0644]
tests/scalapack/scalapack_08.mpirun=10.output [new file with mode: 0644]
tests/scalapack/scalapack_08.mpirun=5.output [new file with mode: 0644]

index 2a1002e944d8ea81ac53689feb1eb961446b33a0..7076508778daf417b082050f889419d14758c459 100644 (file)
@@ -158,6 +158,15 @@ public:
    */
   void copy_to (FullMatrix<NumberType> &matrix) const;
 
+
+  /**
+   * Copy the contents of the distributed matrix into a differently distributed matrix @p dest.
+   * The function also works for matrices with different process grids
+   * or block-cyclic distributions.
+   */
+  void copy_to (ScaLAPACKMatrix<NumberType> &dest) const;
+
+
   /**
    * Compute the Cholesky factorization of the matrix using ScaLAPACK
    * function <code>pXpotrf</code>. The result of the factorization is stored in this object.
index 87f1d776f0f82e56c9f20999d30156034923d42b..2ceb0dfd3e99042816e9a1ccc23d2cf9d35e63ff 100644 (file)
@@ -205,6 +205,85 @@ ScaLAPACKMatrix<NumberType>::copy_to (FullMatrix<NumberType> &matrix) const
 
 
 
+template <typename NumberType>
+void
+ScaLAPACKMatrix<NumberType>::copy_to (ScaLAPACKMatrix<NumberType> &dest) const
+{
+  Assert (n_rows == dest.n_rows, ExcDimensionMismatch(n_rows, dest.n_rows));
+  Assert (n_columns == dest.n_columns, ExcDimensionMismatch(n_columns, dest.n_columns));
+
+  if (this->grid->mpi_process_is_active)
+    AssertThrow (this->descriptor[0]==1,ExcMessage("Copying of ScaLAPACK matrices only implemented for dense matrices"));
+  if (dest.grid->mpi_process_is_active)
+    AssertThrow (dest.descriptor[0]==1,ExcMessage("Copying of ScaLAPACK matrices only implemented for dense matrices"));
+
+  /*
+   * just in case of different process grids or block-cyclic distributions
+   * inter-process communication is necessary
+   * if distributed matrices have the same process grid and block sizes, local copying is enough
+   */
+  if ( (this->grid != dest.grid) || (row_block_size != dest.row_block_size) || (column_block_size != dest.column_block_size) )
+    {
+      /*
+       * get the MPI communicator, which is the union of the source and destination MPI communicator
+       */
+      int ierr = 0;
+      MPI_Group group_source, group_dest, group_union;
+      ierr = MPI_Comm_group(this->grid->mpi_communicator, &group_source);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Comm_group(dest.grid->mpi_communicator, &group_dest);
+      AssertThrowMPI(ierr);
+      ierr = MPI_Group_union(group_source, group_dest, &group_union);
+      AssertThrowMPI(ierr);
+      MPI_Comm mpi_communicator_union;
+      // to create a communicator representing the union of the source and destination MPI communicator
+      // we need a communicator containing all  desired processes --> use MPI_COMM_WORLD
+      ierr = MPI_Comm_create_group(MPI_COMM_WORLD, group_union, 5, &mpi_communicator_union);
+      AssertThrowMPI(ierr);
+
+      /*
+       * The routine pgemr2d requires a BLACS context resembling at least the union of process grids
+       * described by the BLACS contexts of the source and destination matrix
+       */
+      int union_blacs_context = Csys2blacs_handle(mpi_communicator_union);
+      const char *order = "Col";
+      int union_n_process_rows = Utilities::MPI::n_mpi_processes(mpi_communicator_union);
+      int union_n_process_columns = 1;
+      Cblacs_gridinit(&union_blacs_context, order, union_n_process_rows, union_n_process_columns);
+
+      const NumberType *loc_vals_source = NULL;
+      NumberType *loc_vals_dest = NULL;
+
+      if (this->grid->mpi_process_is_active && (this->values.size()>0))
+        {
+          AssertThrow(this->values.size()>0,dealii::ExcMessage("source: process is active but local matrix empty"));
+          loc_vals_source = &this->values[0];
+        }
+      if (dest.grid->mpi_process_is_active && (dest.values.size()>0))
+        {
+          AssertThrow(dest.values.size()>0,dealii::ExcMessage("destination: process is active but local matrix empty"));
+          loc_vals_dest = &dest.values[0];
+        }
+      pgemr2d(&n_rows, &n_columns, loc_vals_source, &submatrix_row, &submatrix_column, descriptor,
+              loc_vals_dest, &dest.submatrix_row, &dest.submatrix_column, dest.descriptor,
+              &union_blacs_context);
+
+      Cblacs_gridexit(union_blacs_context);
+
+      if (mpi_communicator_union != MPI_COMM_NULL)
+        MPI_Comm_free(&mpi_communicator_union);
+      MPI_Group_free(&group_source);
+      MPI_Group_free(&group_dest);
+      MPI_Group_free(&group_union);
+    }
+  else
+    //process is active in the process grid
+    if (this->grid->mpi_process_is_active)
+      dest.values = this->values;
+}
+
+
+
 template <typename NumberType>
 void ScaLAPACKMatrix<NumberType>::compute_cholesky_factorization()
 {
diff --git a/tests/scalapack/scalapack_08.cc b/tests/scalapack/scalapack_08.cc
new file mode 100644 (file)
index 0000000..2dfb1b5
--- /dev/null
@@ -0,0 +1,110 @@
+// ---------------------------------------------------------------------
+//
+// Copyright (C) 2017 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+#include "../tests.h"
+#include "../lapack/create_matrix.h"
+
+// test copying of distributed ScaLAPACKMatrices using ScaLAPACK routine  p_gemr2d
+
+#include <deal.II/base/logstream.h>
+#include <deal.II/base/utilities.h>
+#include <deal.II/base/conditional_ostream.h>
+#include <deal.II/base/timer.h>
+#include <deal.II/base/multithread_info.h>
+
+
+#include <deal.II/lac/scalapack.h>
+
+#include <fstream>
+#include <iostream>
+
+
+template <typename NumberType>
+void test(const unsigned int block_size_i, const unsigned int block_size_j)
+{
+  MPI_Comm mpi_communicator(MPI_COMM_WORLD);
+  const unsigned int n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator));
+  const unsigned int this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator));
+
+  ConditionalOStream pcout (std::cout, (this_mpi_process ==0));
+
+  const unsigned int size = 500;
+  //create FullMatrix and fill it
+  FullMatrix<NumberType> full(size);
+  unsigned int count=0;
+  for (unsigned int i=0; i<size; ++i)
+    for (unsigned int j=0; j<size; ++j, ++count)
+      full(i,j) = count;
+
+  //create 2d process grid
+  std::shared_ptr<Utilities::MPI::ProcessGrid> grid_2d = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,size,size,block_size_i,block_size_i);
+  //create 1d process grid
+  std::shared_ptr<Utilities::MPI::ProcessGrid> grid_1d = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,n_mpi_processes,1);
+  //create process grid containing one process
+  std::shared_ptr<Utilities::MPI::ProcessGrid> grid_single = std::make_shared<Utilities::MPI::ProcessGrid>(mpi_communicator,1,1);
+
+  ScaLAPACKMatrix<NumberType> scalapack_matrix_2d (size, size, grid_2d, block_size_i, block_size_i);
+  ScaLAPACKMatrix<NumberType> scalapack_matrix_1d (size, size, grid_1d, block_size_j, block_size_j);
+  ScaLAPACKMatrix<NumberType> scalapack_matrix_single (size, size, grid_single, block_size_i, block_size_j);
+  ScaLAPACKMatrix<NumberType> scalapack_matrix_source (size, size, grid_single, block_size_j, block_size_i);
+
+  pcout << "2D grid matrix: dim=" << scalapack_matrix_2d.m() << "x" << scalapack_matrix_2d.n()
+        << ";  blocks=" << block_size_i << "x" << block_size_i
+        << ";  grid=" << grid_2d->get_process_grid_rows() << "x" << grid_2d->get_process_grid_columns() << std::endl;
+
+  pcout << "1D grid matrix: " << scalapack_matrix_1d.m() << "x" << scalapack_matrix_1d.n()
+        << ";  blocks=" << block_size_j << "x" << block_size_j
+        << ";  grid=" << grid_1d->get_process_grid_rows() << "x" << grid_1d->get_process_grid_columns() << std::endl;
+
+  pcout << "single process matrix: " << scalapack_matrix_single.m() << "x" << scalapack_matrix_single.n()
+        << ";  blocks=" << block_size_i << "x" << block_size_j
+        << ";  grid=" << grid_single->get_process_grid_rows() << "x" << grid_single->get_process_grid_columns() << std::endl << std::endl;
+
+  scalapack_matrix_source = full;
+
+  scalapack_matrix_source.copy_to(scalapack_matrix_2d);
+  scalapack_matrix_source.copy_to(scalapack_matrix_1d);
+  scalapack_matrix_source.copy_to(scalapack_matrix_single);
+
+  FullMatrix<NumberType> test_2d(size), test_1d(size), test_one(size);
+  scalapack_matrix_2d.copy_to(test_2d);
+  scalapack_matrix_1d.copy_to(test_1d);
+  scalapack_matrix_single.copy_to(test_one);
+  test_2d.add (-1,full);
+  test_1d.add (-1,full);
+  test_one.add (-1,full);
+
+  AssertThrow(test_2d.frobenius_norm() < 1e-12,ExcInternalError());
+  AssertThrow(test_1d.frobenius_norm() < 1e-12,ExcInternalError());
+  AssertThrow(test_one.frobenius_norm() < 1e-12,ExcInternalError());
+}
+
+
+
+int main (int argc,char **argv)
+{
+  Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, numbers::invalid_unsigned_int);
+
+  const std::vector<unsigned int> blocks_i = {{16,32,64}};
+  const std::vector<unsigned int> blocks_j = {{16,32,64}};
+
+  for (const auto &s : blocks_i)
+    for (const auto &b : blocks_j)
+      test<float>(s,b);
+
+  for (const auto &s : blocks_i)
+    for (const auto &b : blocks_j)
+      test<double>(s,b);
+}
diff --git a/tests/scalapack/scalapack_08.mpirun=1.output b/tests/scalapack/scalapack_08.mpirun=1.output
new file mode 100644 (file)
index 0000000..5028735
--- /dev/null
@@ -0,0 +1,72 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+1D grid matrix: 500x500;  blocks=16x16;  grid=1x1
+single process matrix: 500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+1D grid matrix: 500x500;  blocks=32x32;  grid=1x1
+single process matrix: 500x500;  blocks=16x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+1D grid matrix: 500x500;  blocks=64x64;  grid=1x1
+single process matrix: 500x500;  blocks=16x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+1D grid matrix: 500x500;  blocks=16x16;  grid=1x1
+single process matrix: 500x500;  blocks=32x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+1D grid matrix: 500x500;  blocks=32x32;  grid=1x1
+single process matrix: 500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+1D grid matrix: 500x500;  blocks=64x64;  grid=1x1
+single process matrix: 500x500;  blocks=32x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+1D grid matrix: 500x500;  blocks=16x16;  grid=1x1
+single process matrix: 500x500;  blocks=64x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+1D grid matrix: 500x500;  blocks=32x32;  grid=1x1
+single process matrix: 500x500;  blocks=64x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+1D grid matrix: 500x500;  blocks=64x64;  grid=1x1
+single process matrix: 500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+1D grid matrix: 500x500;  blocks=16x16;  grid=1x1
+single process matrix: 500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+1D grid matrix: 500x500;  blocks=32x32;  grid=1x1
+single process matrix: 500x500;  blocks=16x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=1x1
+1D grid matrix: 500x500;  blocks=64x64;  grid=1x1
+single process matrix: 500x500;  blocks=16x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+1D grid matrix: 500x500;  blocks=16x16;  grid=1x1
+single process matrix: 500x500;  blocks=32x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+1D grid matrix: 500x500;  blocks=32x32;  grid=1x1
+single process matrix: 500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=1x1
+1D grid matrix: 500x500;  blocks=64x64;  grid=1x1
+single process matrix: 500x500;  blocks=32x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+1D grid matrix: 500x500;  blocks=16x16;  grid=1x1
+single process matrix: 500x500;  blocks=64x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+1D grid matrix: 500x500;  blocks=32x32;  grid=1x1
+single process matrix: 500x500;  blocks=64x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=1x1
+1D grid matrix: 500x500;  blocks=64x64;  grid=1x1
+single process matrix: 500x500;  blocks=64x64;  grid=1x1
+
diff --git a/tests/scalapack/scalapack_08.mpirun=10.output b/tests/scalapack/scalapack_08.mpirun=10.output
new file mode 100644 (file)
index 0000000..1136518
--- /dev/null
@@ -0,0 +1,72 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+1D grid matrix: 500x500;  blocks=16x16;  grid=10x1
+single process matrix: 500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+1D grid matrix: 500x500;  blocks=32x32;  grid=10x1
+single process matrix: 500x500;  blocks=16x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+1D grid matrix: 500x500;  blocks=64x64;  grid=10x1
+single process matrix: 500x500;  blocks=16x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+1D grid matrix: 500x500;  blocks=16x16;  grid=10x1
+single process matrix: 500x500;  blocks=32x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+1D grid matrix: 500x500;  blocks=32x32;  grid=10x1
+single process matrix: 500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+1D grid matrix: 500x500;  blocks=64x64;  grid=10x1
+single process matrix: 500x500;  blocks=32x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+1D grid matrix: 500x500;  blocks=16x16;  grid=10x1
+single process matrix: 500x500;  blocks=64x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+1D grid matrix: 500x500;  blocks=32x32;  grid=10x1
+single process matrix: 500x500;  blocks=64x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+1D grid matrix: 500x500;  blocks=64x64;  grid=10x1
+single process matrix: 500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+1D grid matrix: 500x500;  blocks=16x16;  grid=10x1
+single process matrix: 500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+1D grid matrix: 500x500;  blocks=32x32;  grid=10x1
+single process matrix: 500x500;  blocks=16x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=3x3
+1D grid matrix: 500x500;  blocks=64x64;  grid=10x1
+single process matrix: 500x500;  blocks=16x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+1D grid matrix: 500x500;  blocks=16x16;  grid=10x1
+single process matrix: 500x500;  blocks=32x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+1D grid matrix: 500x500;  blocks=32x32;  grid=10x1
+single process matrix: 500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=3x3
+1D grid matrix: 500x500;  blocks=64x64;  grid=10x1
+single process matrix: 500x500;  blocks=32x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+1D grid matrix: 500x500;  blocks=16x16;  grid=10x1
+single process matrix: 500x500;  blocks=64x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+1D grid matrix: 500x500;  blocks=32x32;  grid=10x1
+single process matrix: 500x500;  blocks=64x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=3x3
+1D grid matrix: 500x500;  blocks=64x64;  grid=10x1
+single process matrix: 500x500;  blocks=64x64;  grid=1x1
+
diff --git a/tests/scalapack/scalapack_08.mpirun=5.output b/tests/scalapack/scalapack_08.mpirun=5.output
new file mode 100644 (file)
index 0000000..9358ffb
--- /dev/null
@@ -0,0 +1,72 @@
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=2x2
+1D grid matrix: 500x500;  blocks=16x16;  grid=5x1
+single process matrix: 500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=2x2
+1D grid matrix: 500x500;  blocks=32x32;  grid=5x1
+single process matrix: 500x500;  blocks=16x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=2x2
+1D grid matrix: 500x500;  blocks=64x64;  grid=5x1
+single process matrix: 500x500;  blocks=16x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=2x2
+1D grid matrix: 500x500;  blocks=16x16;  grid=5x1
+single process matrix: 500x500;  blocks=32x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=2x2
+1D grid matrix: 500x500;  blocks=32x32;  grid=5x1
+single process matrix: 500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=2x2
+1D grid matrix: 500x500;  blocks=64x64;  grid=5x1
+single process matrix: 500x500;  blocks=32x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=2x2
+1D grid matrix: 500x500;  blocks=16x16;  grid=5x1
+single process matrix: 500x500;  blocks=64x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=2x2
+1D grid matrix: 500x500;  blocks=32x32;  grid=5x1
+single process matrix: 500x500;  blocks=64x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=2x2
+1D grid matrix: 500x500;  blocks=64x64;  grid=5x1
+single process matrix: 500x500;  blocks=64x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=2x2
+1D grid matrix: 500x500;  blocks=16x16;  grid=5x1
+single process matrix: 500x500;  blocks=16x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=2x2
+1D grid matrix: 500x500;  blocks=32x32;  grid=5x1
+single process matrix: 500x500;  blocks=16x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=16x16;  grid=2x2
+1D grid matrix: 500x500;  blocks=64x64;  grid=5x1
+single process matrix: 500x500;  blocks=16x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=2x2
+1D grid matrix: 500x500;  blocks=16x16;  grid=5x1
+single process matrix: 500x500;  blocks=32x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=2x2
+1D grid matrix: 500x500;  blocks=32x32;  grid=5x1
+single process matrix: 500x500;  blocks=32x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=32x32;  grid=2x2
+1D grid matrix: 500x500;  blocks=64x64;  grid=5x1
+single process matrix: 500x500;  blocks=32x64;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=2x2
+1D grid matrix: 500x500;  blocks=16x16;  grid=5x1
+single process matrix: 500x500;  blocks=64x16;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=2x2
+1D grid matrix: 500x500;  blocks=32x32;  grid=5x1
+single process matrix: 500x500;  blocks=64x32;  grid=1x1
+
+2D grid matrix: dim=500x500;  blocks=64x64;  grid=2x2
+1D grid matrix: 500x500;  blocks=64x64;  grid=5x1
+single process matrix: 500x500;  blocks=64x64;  grid=1x1
+

In the beginning the Universe was created. This has made a lot of people very angry and has been widely regarded as a bad move.

Douglas Adams


Typeset in Trocchi and Trocchi Bold Sans Serif.