#include <deal.II/base/config.h>
#include <deal.II/base/table.h>
+#include <deal.II/base/thread_management.h>
#include <deal.II/base/utilities.h>
#include <deal.II/base/smartpointer.h>
#include <deal.II/base/memory_consumption.h>
private:
- /**
- * Temporary vector for counting the
- * elements written into the
- * individual blocks when doing a
- * collective add or set.
- */
- std::vector<size_type> counter_within_block;
/**
- * Temporary vector for column
- * indices on each block when writing
- * local to global data on each
- * sparse matrix.
+ * A structure containing some fields used by the
+ * set() and add() functions that is used to pre-sort
+ * the input fields. Since one can reasonably expect
+ * to call set() and add() from multiple threads at once
+ * as long as the matrix indices that are touched are
+ * disjoint, these temporary data fields need to be
+ * guarded by a mutex; the structure therefore contains such
+ * a mutex as a member variable.
*/
- std::vector<std::vector<size_type> > column_indices;
+ struct TemporaryData
+ {
+ /**
+ * Temporary vector for counting the
+ * elements written into the
+ * individual blocks when doing a
+ * collective add or set.
+ */
+ std::vector<size_type> counter_within_block;
+
+ /**
+ * Temporary vector for column
+ * indices on each block when writing
+ * local to global data on each
+ * sparse matrix.
+ */
+ std::vector<std::vector<size_type> > column_indices;
+
+ /**
+ * Temporary vector for storing the
+ * local values (they need to be
+ * reordered when writing local to
+ * global).
+ */
+ std::vector<std::vector<double> > column_values;
+
+ /**
+ * A mutex variable used to guard access to the member
+ * variables of this structure;
+ */
+ Threads::Mutex mutex;
+
+ /**
+ * Copy operator. This is needed because the default copy
+ * operator of this class is deleted (since Threads::Mutex is
+ * not copyable) and hence the default copy operator of the
+ * enclosing class is also deleted.
+ *
+ * The implementation here simply does nothing -- TemporaryData
+ * objects are just scratch objects that are resized at the
+ * beginning of their use, so there is no point actually copying
+ * anything.
+ */
+ TemporaryData & operator = (const TemporaryData &)
+ {}
+ };
/**
- * Temporary vector for storing the
- * local values (they need to be
- * reordered when writing local to
- * global).
+ * A set of scratch arrays that can be used by the add()
+ * and set() functions that take pointers to data to
+ * pre-sort indices before use. Access from multiple threads
+ * is synchronized via the mutex variable that is part of the
+ * structure.
*/
- std::vector<std::vector<double> > column_values;
-
+ TemporaryData temporary_data;
/**
* Make the iterator class a
MemoryConsumption::memory_consumption(row_block_indices)+
MemoryConsumption::memory_consumption(column_block_indices)+
MemoryConsumption::memory_consumption(sub_objects)+
- MemoryConsumption::memory_consumption(counter_within_block)+
- MemoryConsumption::memory_consumption(column_indices)+
- MemoryConsumption::memory_consumption(column_values);
+ MemoryConsumption::memory_consumption(temporary_data.counter_within_block)+
+ MemoryConsumption::memory_consumption(temporary_data.column_indices)+
+ MemoryConsumption::memory_consumption(temporary_data.column_values)+
+ MemoryConsumption::memory_consumption(temporary_data.mutex);
for (unsigned int r=0; r<n_block_rows(); ++r)
for (unsigned int c=0; c<n_block_cols(); ++c)
{
prepare_set_operation();
+ // lock access to the temporary data structure to
+ // allow multiple threads to call this function concurrently
+ Threads::Mutex::ScopedLock lock (temporary_data.mutex);
+
// Resize scratch arrays
- if (column_indices.size() < this->n_block_cols())
+ if (temporary_data.column_indices.size() < this->n_block_cols())
{
- column_indices.resize (this->n_block_cols());
- column_values.resize (this->n_block_cols());
- counter_within_block.resize (this->n_block_cols());
+ temporary_data.column_indices.resize (this->n_block_cols());
+ temporary_data.column_values.resize (this->n_block_cols());
+ temporary_data.counter_within_block.resize (this->n_block_cols());
}
// Resize sub-arrays to n_cols. This
// whether the size of one is large
// enough before actually going
// through all of them.
- if (column_indices[0].size() < n_cols)
+ if (temporary_data.column_indices[0].size() < n_cols)
{
for (unsigned int i=0; i<this->n_block_cols(); ++i)
{
- column_indices[i].resize(n_cols);
- column_values[i].resize(n_cols);
+ temporary_data.column_indices[i].resize(n_cols);
+ temporary_data.column_values[i].resize(n_cols);
}
}
// Reset the number of added elements
// in each block to zero.
for (unsigned int i=0; i<this->n_block_cols(); ++i)
- counter_within_block[i] = 0;
+ temporary_data.counter_within_block[i] = 0;
// Go through the column indices to
// find out which portions of the
const std::pair<unsigned int, size_type>
col_index = this->column_block_indices.global_to_local(col_indices[j]);
- const size_type local_index = counter_within_block[col_index.first]++;
+ const size_type local_index = temporary_data.counter_within_block[col_index.first]++;
- column_indices[col_index.first][local_index] = col_index.second;
- column_values[col_index.first][local_index] = value;
+ temporary_data.column_indices[col_index.first][local_index] = col_index.second;
+ temporary_data.column_values[col_index.first][local_index] = value;
}
#ifdef DEBUG
// the right length has been obtained.
size_type length = 0;
for (unsigned int i=0; i<this->n_block_cols(); ++i)
- length += counter_within_block[i];
+ length += temporary_data.counter_within_block[i];
Assert (length <= n_cols, ExcInternalError());
#endif
row_index = this->row_block_indices.global_to_local (row);
for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
{
- if (counter_within_block[block_col] == 0)
+ if (temporary_data.counter_within_block[block_col] == 0)
continue;
block(row_index.first, block_col).set
(row_index.second,
- counter_within_block[block_col],
- &column_indices[block_col][0],
- &column_values[block_col][0],
+ temporary_data.counter_within_block[block_col],
+ &temporary_data.column_indices[block_col][0],
+ &temporary_data.column_values[block_col][0],
false);
}
}
return;
}
- // Resize scratch arrays
- if (column_indices.size() < this->n_block_cols())
+ // Lock scratch arrays, then resize them
+ Threads::Mutex::ScopedLock lock (temporary_data.mutex);
+
+ if (temporary_data.column_indices.size() < this->n_block_cols())
{
- column_indices.resize (this->n_block_cols());
- column_values.resize (this->n_block_cols());
- counter_within_block.resize (this->n_block_cols());
+ temporary_data.column_indices.resize (this->n_block_cols());
+ temporary_data.column_values.resize (this->n_block_cols());
+ temporary_data.counter_within_block.resize (this->n_block_cols());
}
// Resize sub-arrays to n_cols. This
// whether the size of one is large
// enough before actually going
// through all of them.
- if (column_indices[0].size() < n_cols)
+ if (temporary_data.column_indices[0].size() < n_cols)
{
for (unsigned int i=0; i<this->n_block_cols(); ++i)
{
- column_indices[i].resize(n_cols);
- column_values[i].resize(n_cols);
+ temporary_data.column_indices[i].resize(n_cols);
+ temporary_data.column_values[i].resize(n_cols);
}
}
// Reset the number of added elements
// in each block to zero.
for (unsigned int i=0; i<this->n_block_cols(); ++i)
- counter_within_block[i] = 0;
+ temporary_data.counter_within_block[i] = 0;
// Go through the column indices to
// find out which portions of the
const std::pair<unsigned int, size_type>
col_index = this->column_block_indices.global_to_local(col_indices[j]);
- const size_type local_index = counter_within_block[col_index.first]++;
+ const size_type local_index = temporary_data.counter_within_block[col_index.first]++;
- column_indices[col_index.first][local_index] = col_index.second;
- column_values[col_index.first][local_index] = value;
+ temporary_data.column_indices[col_index.first][local_index] = col_index.second;
+ temporary_data.column_values[col_index.first][local_index] = value;
}
#ifdef DEBUG
// the right length has been obtained.
size_type length = 0;
for (unsigned int i=0; i<this->n_block_cols(); ++i)
- length += counter_within_block[i];
+ length += temporary_data.counter_within_block[i];
Assert (length <= n_cols, ExcInternalError());
#endif
row_index = this->row_block_indices.global_to_local (row);
for (unsigned int block_col=0; block_col<n_block_cols(); ++block_col)
{
- if (counter_within_block[block_col] == 0)
+ if (temporary_data.counter_within_block[block_col] == 0)
continue;
block(row_index.first, block_col).add
(row_index.second,
- counter_within_block[block_col],
- &column_indices[block_col][0],
- &column_values[block_col][0],
+ temporary_data.counter_within_block[block_col],
+ &temporary_data.column_indices[block_col][0],
+ &temporary_data.column_values[block_col][0],
false,
col_indices_are_sorted);
}
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id$
+//
+// Copyright (C) 2000 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// the versions of BlockMatrixBase::set/add that take a pointer to a set of
+// values were not thread safe, even if they were writing into disjoint sets
+// of elements. this test verifies that the set() function is now indeed
+// thread safe under these conditions
+//
+// we test this by calling add() from a multitude of threads at once. without
+// the patch that fixed this, one can elicit a great deal of different memory
+// corruption errors and assertions from this test, depending on luck and
+// phase of the moon :-)
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/block_sparsity_pattern.h>
+#include <deal.II/lac/block_sparse_matrix.h>
+#include <deal.II/lac/block_vector.h>
+#include <fstream>
+#include <iomanip>
+#include <algorithm>
+
+
+
+void do_set (const bool even_or_odd,
+ BlockSparseMatrix<double> &bsm)
+{
+ BlockSparseMatrix<double>::size_type col_indices[5];
+ for (unsigned int i=0; i<5 ; ++i)
+ if (even_or_odd)
+ col_indices[i] = 2*i;
+ else
+ col_indices[i] = 2*i+1;
+
+ BlockSparseMatrix<double>::value_type values[5];
+ for (unsigned int i=0; i<5 ; ++i)
+ if (even_or_odd)
+ values[i] = 1;
+ else
+ values[i] = 2;
+
+ bsm.set (0, 5, col_indices, values, false);
+}
+
+
+void test ()
+{
+ std::ofstream logfile("block_matrices_02/output");
+ deallog << std::fixed;
+ deallog << std::setprecision(2);
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ BlockSparsityPattern bsp(2,2);
+ // set sizes
+ for (unsigned int i=0; i<2; ++i)
+ for (unsigned int j=0; j<2; ++j)
+ bsp.block(i,j).reinit ( 5, 5, 5);
+ bsp.collect_sizes ();
+
+ // make a full matrix
+ for (unsigned int row=0; row<10; ++row)
+ for (unsigned int i=0; i<10; ++i)
+ bsp.add (row, i);
+ bsp.compress ();
+
+ BlockSparseMatrix<double> bsm (bsp);
+
+ Threads::ThreadGroup<> tg;
+ for (unsigned int i=0; i<100; ++i)
+ {
+ tg += Threads::new_thread (&do_set, true, bsm);
+ tg += Threads::new_thread (&do_set, false, bsm);
+ }
+ tg.join_all ();
+
+ bsm.print_formatted (deallog.get_file_stream());
+}
+
+
+
+
+int main ()
+{
+ try
+ {
+ test ();
+ }
+ catch (std::exception &e)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << e.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ // abort
+ return 2;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ // abort
+ return 3;
+ };
+
+
+ return 0;
+}
--- /dev/null
+// ---------------------------------------------------------------------
+// $Id$
+//
+// Copyright (C) 2000 - 2013 by the deal.II authors
+//
+// This file is part of the deal.II library.
+//
+// The deal.II library is free software; you can use it, redistribute
+// it, and/or modify it under the terms of the GNU Lesser General
+// Public License as published by the Free Software Foundation; either
+// version 2.1 of the License, or (at your option) any later version.
+// The full text of the license can be found in the file LICENSE at
+// the top level of the deal.II distribution.
+//
+// ---------------------------------------------------------------------
+
+
+// the versions of BlockMatrixBase::set/add that take a pointer to a set of
+// values were not thread safe, even if they were writing into disjoint sets
+// of elements. this test verifies that the add() function is now indeed
+// thread safe under these conditions
+//
+// we test this by calling add() from a multitude of threads at once. without
+// the patch that fixed this, one can elicit a great deal of different memory
+// corruption errors and assertions from this test, depending on luck and
+// phase of the moon :-)
+
+#include "../tests.h"
+#include <deal.II/base/logstream.h>
+#include <deal.II/lac/block_sparsity_pattern.h>
+#include <deal.II/lac/block_sparse_matrix.h>
+#include <deal.II/lac/block_vector.h>
+#include <fstream>
+#include <iomanip>
+#include <algorithm>
+
+
+
+void do_add (const bool even_or_odd,
+ BlockSparseMatrix<double> &bsm)
+{
+ BlockSparseMatrix<double>::size_type col_indices[5];
+ for (unsigned int i=0; i<5 ; ++i)
+ if (even_or_odd)
+ col_indices[i] = 2*i;
+ else
+ col_indices[i] = 2*i+1;
+
+ BlockSparseMatrix<double>::value_type values[5];
+ for (unsigned int i=0; i<5 ; ++i)
+ if (even_or_odd)
+ values[i] = 1;
+ else
+ values[i] = 2;
+
+ bsm.add (0, 5, col_indices, values, false);
+}
+
+
+void test ()
+{
+ std::ofstream logfile("block_matrices_03/output");
+ deallog << std::fixed;
+ deallog << std::setprecision(2);
+ deallog.attach(logfile);
+ deallog.depth_console(0);
+ deallog.threshold_double(1.e-10);
+
+ BlockSparsityPattern bsp(2,2);
+ // set sizes
+ for (unsigned int i=0; i<2; ++i)
+ for (unsigned int j=0; j<2; ++j)
+ bsp.block(i,j).reinit ( 5, 5, 5);
+ bsp.collect_sizes ();
+
+ // make a full matrix
+ for (unsigned int row=0; row<10; ++row)
+ for (unsigned int i=0; i<10; ++i)
+ bsp.add (row, i);
+ bsp.compress ();
+
+ BlockSparseMatrix<double> bsm (bsp);
+
+ Threads::ThreadGroup<> tg;
+ for (unsigned int i=0; i<100; ++i)
+ {
+ tg += Threads::new_thread (&do_add, true, bsm);
+ tg += Threads::new_thread (&do_add, false, bsm);
+ }
+ tg.join_all ();
+
+ // divide whole matrix by 100 to get back to the effect of a single set
+ bsm /= 100;
+
+ bsm.print_formatted (deallog.get_file_stream());
+}
+
+
+
+
+int main ()
+{
+ try
+ {
+ test ();
+ }
+ catch (std::exception &e)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Exception on processing: " << e.what() << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ // abort
+ return 2;
+ }
+ catch (...)
+ {
+ std::cerr << std::endl << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ std::cerr << "Unknown exception!" << std::endl
+ << "Aborting!" << std::endl
+ << "----------------------------------------------------"
+ << std::endl;
+ // abort
+ return 3;
+ };
+
+
+ return 0;
+}