* <code>throw</code>, or by simply reaching the closing brace. In all of
* these cases, it is not necessary to remember to pop the prefix manually
* using LogStream::pop(). In this, it works just like the better known
- * std::unique_ptr and Threads::Mutex::ScopedLock classes.
+ * std::unique_ptr and std::lock_guard classes.
*/
class Prefix
{
// operation of this function;
// for this, acquire the lock
// until we quit this function
- Threads::Mutex::ScopedLock lock(
+ std::lock_guard<std::mutex> lock(
internal::FEToolsAddFENameHelper::fe_name_map_lock);
Assert(
{
// Make sure no other thread
// is just adding an element
- Threads::Mutex::ScopedLock lock(
+ std::lock_guard<std::mutex> lock(
internal::FEToolsAddFENameHelper::fe_name_map_lock);
AssertThrow(fe_name_map.find(name_part) != fe_name_map.end(),
FETools::ExcInvalidFEName(name));
// lock access to the temporary data structure to
// allow multiple threads to call this function concurrently
- Threads::Mutex::ScopedLock lock(temporary_data.mutex);
+ std::lock_guard<std::mutex> lock(temporary_data.mutex);
// Resize scratch arrays
if (temporary_data.column_indices.size() < this->n_block_cols())
}
// Lock scratch arrays, then resize them
- Threads::Mutex::ScopedLock lock(temporary_data.mutex);
+ std::lock_guard<std::mutex> lock(temporary_data.mutex);
if (temporary_data.column_indices.size() < this->n_block_cols())
{
#ifdef DEAL_II_WITH_MPI
// make this function thread safe
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
// allocate import_data in case it is not set up yet
if (import_data == nullptr && partitioner->n_import_indices() > 0)
// compress_requests.size() == 0
// make this function thread safe
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
Assert(partitioner->n_import_indices() == 0 || import_data != nullptr,
ExcNotInitialized());
return;
// make this function thread safe
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
// allocate import_data in case it is not set up yet
if (import_data == nullptr && partitioner->n_import_indices() > 0)
if (update_ghost_values_requests.size() > 0)
{
// make this function thread safe
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
partitioner->export_to_ghosted_array_finish(
ArrayView<Number>(data.values.get() + partitioner->local_size(),
VectorType & dst,
const VectorType &src) const
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
if (eigenvalues_are_initialized == false)
estimate_eigenvalues(src);
VectorType & dst,
const VectorType &src) const
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
if (eigenvalues_are_initialized == false)
estimate_eigenvalues(src);
VectorType & dst,
const VectorType &src) const
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
if (eigenvalues_are_initialized == false)
estimate_eigenvalues(src);
VectorType & dst,
const VectorType &src) const
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
if (eigenvalues_are_initialized == false)
estimate_eigenvalues(src);
// if in MT mode, block all other
// operations. if not in MT mode,
// this is a no-op
- Threads::Mutex::ScopedLock lock(this->lock);
+ std::lock_guard<std::mutex> lock(this->lock);
Vector<number>::operator=(v);
data_is_preloaded = false;
// if in MT mode, block all other
// operations. if not in MT mode,
// this is a no-op
- Threads::Mutex::ScopedLock lock(this->lock);
+ std::lock_guard<std::mutex> lock(this->lock);
// check that we have not called
// @p alert without the respective
// (there should be none, but who
// knows). if not in MT mode,
// this is a no-op
- Threads::Mutex::ScopedLock lock(this->lock);
+ std::lock_guard<std::mutex> lock(this->lock);
// this is too bad: someone
// requested the vector in advance,
{
AssertDimension(dst_view.size(), this->m());
AssertDimension(src_view.size(), this->n());
- Threads::Mutex::ScopedLock lock(this->mutex);
- const unsigned int n =
+ std::lock_guard<std::mutex> lock(this->mutex);
+ const unsigned int n =
Utilities::fixed_power<dim>(size > 0 ? size : eigenvalues[0].size());
tmp_array.resize_fast(n * 2);
constexpr int kernel_size = size > 0 ? size : 0;
{
AssertDimension(dst_view.size(), this->n());
AssertDimension(src_view.size(), this->m());
- Threads::Mutex::ScopedLock lock(this->mutex);
- const unsigned int n = size > 0 ? size : eigenvalues[0].size();
+ std::lock_guard<std::mutex> lock(this->mutex);
+ const unsigned int n = size > 0 ? size : eigenvalues[0].size();
tmp_array.resize_fast(Utilities::fixed_power<dim>(n));
constexpr int kernel_size = size > 0 ? size : 0;
internal::EvaluatorTensorProduct<internal::evaluate_general,
, current_alloc(0)
, log_statistics(log_statistics)
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
pool.initialize(initial_size);
}
inline VectorType *
GrowingVectorMemory<VectorType>::alloc()
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
++total_alloc;
++current_alloc;
inline void
GrowingVectorMemory<VectorType>::free(const VectorType *const v)
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (typename std::vector<entry_type>::iterator i = pool.data->begin();
i != pool.data->end();
inline void
GrowingVectorMemory<VectorType>::release_unused_memory()
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
if (pool.data != nullptr)
pool.data->clear();
inline std::size_t
GrowingVectorMemory<VectorType>::memory_consumption() const
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
std::size_t result = sizeof(*this);
const typename std::vector<entry_type>::const_iterator end = pool.data->end();
// that are within the range of one lock at once
const unsigned int next_bucket =
(*it / bucket_size_threading + 1) * bucket_size_threading;
- Threads::Mutex::ScopedLock lock(
+ std::lock_guard<std::mutex> lock(
mutexes[*it / bucket_size_threading]);
for (; it != end_unique && *it < next_bucket; ++it)
{
{
const unsigned int next_bucket =
(*it / bucket_size_threading + 1) * bucket_size_threading;
- Threads::Mutex::ScopedLock lock(
+ std::lock_guard<std::mutex> lock(
mutexes[*it / bucket_size_threading]);
for (; it != end_unique && *it < next_bucket; ++it)
if (row_lengths[*it] > 0)
// guard access to the aux_*
// variables in multithread mode
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int d = 0; d < dim + 1; ++d)
aux_values[d].resize(n_points);
// guard access to the aux_*
// variables in multithread mode
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int d = 0; d < dim + 1; ++d)
aux_values[d].resize(n_points);
// guard access to the aux_*
// variables in multithread mode
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int d = 0; d < dim + 1; ++d)
aux_values[d].resize(n_points);
// guard access to the aux_*
// variables in multithread mode
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int d = 0; d < dim + 1; ++d)
aux_gradients[d].resize(n_points);
// guard access to the aux_*
// variables in multithread mode
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int d = 0; d < dim + 1; ++d)
aux_values[d].resize(n_points);
// GSL functions may modify gsl_interp_accel *acc object (last argument).
// This can only work in multithreaded applications if we lock the data
// structures via a mutex.
- Threads::Mutex::ScopedLock lock(acc_mutex);
+ std::lock_guard<std::mutex> lock(acc_mutex);
const double x = p[0];
Assert(x >= interpolation_points.front() &&
// GSL functions may modify gsl_interp_accel *acc object (last argument).
// This can only work in multithreaded applications if we lock the data
// structures via a mutex.
- Threads::Mutex::ScopedLock lock(acc_mutex);
+ std::lock_guard<std::mutex> lock(acc_mutex);
const double x = p[0];
Assert(x >= interpolation_points.front() &&
// GSL functions may modify gsl_interp_accel *acc object (last argument).
// This can only work in multithreaded applications if we lock the data
// structures via a mutex.
- Threads::Mutex::ScopedLock lock(acc_mutex);
+ std::lock_guard<std::mutex> lock(acc_mutex);
const double x = p[0];
Assert(x >= interpolation_points.front() &&
double
mu_rand_seed(double seed)
{
- static Threads::Mutex rand_mutex;
- Threads::Mutex::ScopedLock lock(rand_mutex);
+ static Threads::Mutex rand_mutex;
+ std::lock_guard<std::mutex> lock(rand_mutex);
static boost::random::uniform_real_distribution<> uniform_distribution(0,
1);
mu_rand()
{
static Threads::Mutex rand_mutex;
- Threads::Mutex::ScopedLock lock(rand_mutex);
+ std::lock_guard<std::mutex> lock(rand_mutex);
static boost::random::uniform_real_distribution<> uniform_distribution(0,
1);
static boost::random::mt19937 rng(
// via a mutex, so that users can call 'const' functions from threads
// in parallel (and these 'const' functions can then call compress()
// which itself calls the current function)
- Threads::Mutex::ScopedLock lock(compress_mutex);
+ std::lock_guard<std::mutex> lock(compress_mutex);
// see if any of the contiguous ranges can be merged. do not use
// std::vector::erase in-place as it is quadratic in the number of
if (query_streambuf.flushed())
{
- Threads::Mutex::ScopedLock lock(write_lock);
+ std::lock_guard<std::mutex> lock(write_lock);
// Print the line head in case of a previous newline:
if (at_newline)
const bool print_job_id,
const std::ios_base::fmtflags flags)
{
- Threads::Mutex::ScopedLock lock(log_lock);
+ std::lock_guard<std::mutex> lock(log_lock);
file = &o;
o.setf(flags);
if (print_job_id)
void
LogStream::detach()
{
- Threads::Mutex::ScopedLock lock(log_lock);
+ std::lock_guard<std::mutex> lock(log_lock);
file = nullptr;
}
unsigned int
LogStream::depth_console(const unsigned int n)
{
- Threads::Mutex::ScopedLock lock(log_lock);
- const unsigned int h = std_depth;
- std_depth = n;
+ std::lock_guard<std::mutex> lock(log_lock);
+ const unsigned int h = std_depth;
+ std_depth = n;
return h;
}
unsigned int
LogStream::depth_file(const unsigned int n)
{
- Threads::Mutex::ScopedLock lock(log_lock);
- const unsigned int h = file_depth;
- file_depth = n;
+ std::lock_guard<std::mutex> lock(log_lock);
+ const unsigned int h = file_depth;
+ file_depth = n;
return h;
}
bool
LogStream::log_thread_id(const bool flag)
{
- Threads::Mutex::ScopedLock lock(log_lock);
- const bool h = print_thread_id;
- print_thread_id = flag;
+ std::lock_guard<std::mutex> lock(log_lock);
+ const bool h = print_thread_id;
+ print_thread_id = flag;
return h;
}
std::shared_ptr<tbb::affinity_partitioner>
TBBPartitioner::acquire_one_partitioner()
{
- dealii::Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
if (in_use)
return std::make_shared<tbb::affinity_partitioner>();
{
if (p.get() == my_partitioner.get())
{
- dealii::Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
in_use = false;
}
}
// of this function
// for this, acquire the lock
// until we quit this function
- Threads::Mutex::ScopedLock lock(coefficients_lock);
+ std::lock_guard<std::mutex> lock(coefficients_lock);
// The first 2 coefficients
// are hard-coded
// then get a pointer to the array
// of coefficients. do that in a MT
// safe way
- Threads::Mutex::ScopedLock lock(coefficients_lock);
+ std::lock_guard<std::mutex> lock(coefficients_lock);
return *recursive_coefficients[k];
}
// using a mutex to make sure they
// are not used by multiple threads
// at once
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
p_values.resize((values.size() == 0) ? 0 : n_sub);
p_grads.resize((grads.size() == 0) ? 0 : n_sub);
// are not used by multiple threads
// at once
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
p_values.resize((values.size() == 0) ? 0 : n_sub);
p_grads.resize((grads.size() == 0) ? 0 : n_sub);
// deal.II/create_mass_matrix_05)
// will start to produce random
// results in multithread mode
- static Threads::Mutex mutex;
- Threads::Mutex::ScopedLock lock(mutex);
+ static Threads::Mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
static std::vector<double> p_values;
static std::vector<Tensor<1, dim>> p_grads;
// using a mutex to make sure they are not used by multiple threads
// at once
{
- static Threads::Mutex mutex;
- Threads::Mutex::ScopedLock lock(mutex);
+ static Threads::Mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
static std::vector<Tensor<1, dim>> p_values;
static std::vector<Tensor<2, dim>> p_grads;
void
TimerOutput::enter_subsection(const std::string §ion_name)
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
Assert(section_name.empty() == false, ExcMessage("Section string is empty."));
Assert(!active_sections.empty(),
ExcMessage("Cannot exit any section because none has been entered!"));
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
if (section_name != "")
{
void
TimerOutput::reset()
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
sections.clear();
active_sections.clear();
timer_all.restart();
// initialization upon first request
if (this->prolongation[refinement_case - 1][child].n() == 0)
{
- Threads::Mutex::ScopedLock lock(this->mutex);
+ std::lock_guard<std::mutex> lock(this->mutex);
// if matrix got updated while waiting for the lock
if (this->prolongation[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->restriction[refinement_case - 1][child].n() == 0)
{
- Threads::Mutex::ScopedLock lock(this->mutex);
+ std::lock_guard<std::mutex> lock(this->mutex);
// if matrix got updated while waiting for the lock...
if (this->restriction[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->prolongation[refinement_case - 1][child].n() == 0)
{
- Threads::Mutex::ScopedLock lock(this->mutex);
+ std::lock_guard<std::mutex> lock(this->mutex);
// if matrix got updated while waiting for the lock
if (this->prolongation[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->restriction[refinement_case - 1][child].n() == 0)
{
- Threads::Mutex::ScopedLock lock(this->mutex);
+ std::lock_guard<std::mutex> lock(this->mutex);
// if matrix got updated while waiting for the lock...
if (this->restriction[refinement_case - 1][child].n() ==
Assert(i < this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert(component < dim, ExcIndexRange(component, 0, dim));
- Threads::Mutex::ScopedLock lock(cache_mutex);
+ std::lock_guard<std::mutex> lock(cache_mutex);
if (cached_point != p || cached_values.size() == 0)
{
Assert(i < this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert(component < dim, ExcIndexRange(component, 0, dim));
- Threads::Mutex::ScopedLock lock(cache_mutex);
+ std::lock_guard<std::mutex> lock(cache_mutex);
if (cached_point != p || cached_grads.size() == 0)
{
Assert(i < this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert(component < dim, ExcIndexRange(component, 0, dim));
- Threads::Mutex::ScopedLock lock(cache_mutex);
+ std::lock_guard<std::mutex> lock(cache_mutex);
if (cached_point != p || cached_grad_grads.size() == 0)
{
// initialization upon first request
if (this->prolongation[refinement_case - 1][child].n() == 0)
{
- Threads::Mutex::ScopedLock lock(this->mutex);
+ std::lock_guard<std::mutex> lock(this->mutex);
// if matrix got updated while waiting for the lock
if (this->prolongation[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->restriction[refinement_case - 1][child].n() == 0)
{
- Threads::Mutex::ScopedLock lock(this->mutex);
+ std::lock_guard<std::mutex> lock(this->mutex);
// if matrix got updated while waiting for the lock...
if (this->restriction[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->restriction[refinement_case - 1][child].n() == 0)
{
- Threads::Mutex::ScopedLock lock(this->mutex);
+ std::lock_guard<std::mutex> lock(this->mutex);
// check if updated while waiting for lock
if (this->restriction[refinement_case - 1][child].n() ==
// restriction matrix
if (this->prolongation[refinement_case - 1][child].n() == 0)
{
- Threads::Mutex::ScopedLock lock(this->mutex);
+ std::lock_guard<std::mutex> lock(this->mutex);
if (this->prolongation[refinement_case - 1][child].n() ==
this->dofs_per_cell)
euler_dof_handler->get_fe().n_components()));
{
- Threads::Mutex::ScopedLock lock(fe_values_mutex);
+ std::lock_guard<std::mutex> lock(fe_values_mutex);
fe_values.reinit(dof_cell);
fe_values.get_function_values(*euler_vector, values);
}
// fill shift vector for each support point using an fe_values object. make
// sure that the fe_values variable isn't used simultaneously from different
// threads
- Threads::Mutex::ScopedLock lock(fe_values_mutex);
+ std::lock_guard<std::mutex> lock(fe_values_mutex);
fe_values.reinit(dof_cell);
if (mg_vector)
{
}
case svd:
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
AssertDimension(v.size(), this->n());
AssertDimension(w.size(), this->m());
// Compute V^T v
}
case inverse_svd:
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
AssertDimension(w.size(), this->n());
AssertDimension(v.size(), this->m());
// Compute U^T v
}
case svd:
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
AssertDimension(w.size(), this->n());
AssertDimension(v.size(), this->m());
}
case inverse_svd:
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
AssertDimension(v.size(), this->n());
AssertDimension(w.size(), this->m());
// https://stackoverflow.com/questions/3548069/multiplying-three-matrices-in-blas-with-the-middle-one-being-diagonal
// http://mathforum.org/kb/message.jspa?messageID=3546564
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
// First, get V*B into "work" array
work.resize(kk * nn);
// following http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=768#p2577
number
LAPACKFullMatrix<number>::norm(const char type) const
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
Assert(state == LAPACKSupport::matrix ||
state == LAPACKSupport::inverse_matrix,
number
LAPACKFullMatrix<number>::reciprocal_condition_number(const number a_norm) const
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
Assert(state == cholesky, ExcState(state));
number rcond = 0.;
number
LAPACKFullMatrix<number>::reciprocal_condition_number() const
{
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
Assert(property == upper_triangular || property == lower_triangular,
ExcProperty(property));
number rcond = 0.;
Assert(property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
const bool use_values = (std::isnan(eigenvalue_limits.first) ||
std::isnan(eigenvalue_limits.second)) ?
Assert(property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
const bool use_values = (std::isnan(eigenvalue_limits.first) ||
std::isnan(eigenvalue_limits.second)) ?
ExcDimensionMismatch(grid->blacs_context,
VT->grid->blacs_context));
}
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
std::vector<NumberType> sv(std::min(n_rows, n_columns));
ExcMessage(
"Use identical block-cyclic distribution for matrices A and B"));
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
if (grid->mpi_process_is_active)
{
Assert(state == LAPACKSupport::cholesky,
ExcMessage(
"Matrix has to be in Cholesky state before calling this function."));
- Threads::Mutex::ScopedLock lock(mutex);
- NumberType rcond = 0.;
+ std::lock_guard<std::mutex> lock(mutex);
+ NumberType rcond = 0.;
if (grid->mpi_process_is_active)
{
Assert(state == LAPACKSupport::matrix ||
state == LAPACKSupport::inverse_matrix,
ExcMessage("norms can be called in matrix state only."));
- Threads::Mutex::ScopedLock lock(mutex);
- NumberType res = 0.;
+ std::lock_guard<std::mutex> lock(mutex);
+ NumberType res = 0.;
if (grid->mpi_process_is_active)
{
ExcMessage("norms can be called in matrix state only."));
Assert(property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
- Threads::Mutex::ScopedLock lock(mutex);
- NumberType res = 0.;
+ std::lock_guard<std::mutex> lock(mutex);
+ NumberType res = 0.;
if (grid->mpi_process_is_active)
{
// accessed
static Threads::Mutex m;
{
- Threads::Mutex::ScopedLock l(m);
+ std::lock_guard<std::mutex> l(m);
++counter;
}
// accessed
static Threads::Mutex m;
{
- Threads::Mutex::ScopedLock l(m);
+ std::lock_guard<std::mutex> l(m);
++counter;
}
cell->get_dof_indices(local_dof_indices);
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int i = 0; i < dofs_per_cell; ++i)
for (unsigned int j = 0; j < dofs_per_cell; ++j)
linear_system.matrix.add(local_dof_indices[i],
cell->get_dof_indices(local_dof_indices);
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int i = 0; i < dofs_per_cell; ++i)
for (unsigned int j = 0; j < dofs_per_cell; ++j)
linear_system.matrix.add(local_dof_indices[i],
cell->get_dof_indices(local_dof_indices);
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int i = 0; i < dofs_per_cell; ++i)
for (unsigned int j = 0; j < dofs_per_cell; ++j)
linear_system.matrix.add(local_dof_indices[i],
cell->get_dof_indices(local_dof_indices);
- Threads::Mutex::ScopedLock lock(mutex);
+ std::lock_guard<std::mutex> lock(mutex);
for (unsigned int i = 0; i < dofs_per_cell; ++i)
for (unsigned int j = 0; j < dofs_per_cell; ++j)
linear_system.matrix.add(local_dof_indices[i],
virtual void
end_sweep()
{
- static Threads::Mutex mutex;
- Threads::Mutex::ScopedLock lock(mutex);
+ static Threads::Mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
end_sweep_flags[time_step_number] = true;
}