* A lock that makes sure that this class gives reasonable results even when
* used with several threads.
*/
+#ifdef DEAL_II_WITH_THREADS
Threads::Mutex mutex;
+#endif
};
// they're in an anonymous namespace) in order to make icc happy
// (which otherwise reports a multiply defined symbol when linking
// libraries for more than one space dimension together
+#ifdef DEAL_II_WITH_THREADS
static Threads::Mutex fe_name_map_lock;
+#endif
// This is the map used by FETools::get_fe_by_name and
// FETools::add_fe_name. It is only accessed by functions in this
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"));
if (name_end < name.size())
name.erase(name_end);
- // first make sure that no other
- // thread intercepts the
- // operation of this function;
- // for this, acquire the lock
- // until we quit this function
+ // first make sure that no other
+ // thread intercepts the
+ // operation of this function;
+ // for this, acquire the lock
+ // until we quit this function
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(
internal::FEToolsAddFENameHelper::fe_name_map_lock);
+#endif
Assert(
internal::FEToolsAddFENameHelper::fe_name_map[dim][spacedim].find(name) ==
{
// Make sure no other thread
// is just adding an element
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(
internal::FEToolsAddFENameHelper::fe_name_map_lock);
+#endif
AssertThrow(fe_name_map.find(name_part) != fe_name_map.end(),
FETools::ExcInvalidFEName(name));
// lock access to the temporary data structure to
// allow multiple threads to call this function concurrently
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(temporary_data.mutex);
+# endif
// Resize scratch arrays
if (temporary_data.column_indices.size() < this->n_block_cols())
return;
}
- // Lock scratch arrays, then resize them
+ // Lock scratch arrays, then resize them
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(temporary_data.mutex);
+# endif
if (temporary_data.column_indices.size() < this->n_block_cols())
{
#ifdef DEAL_II_WITH_MPI
// make this function thread safe
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
// allocate import_data in case it is not set up yet
if (import_data == nullptr && partitioner->n_import_indices() > 0)
// compress_requests.size() == 0
// make this function thread safe
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
Assert(partitioner->n_import_indices() == 0 || import_data != nullptr,
ExcNotInitialized());
partitioner->n_import_indices() == 0)
return;
- // make this function thread safe
+ // make this function thread safe
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
// allocate import_data in case it is not set up yet
if (import_data == nullptr && partitioner->n_import_indices() > 0)
if (update_ghost_values_requests.size() > 0)
{
// make this function thread safe
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
partitioner->export_to_ghosted_array_finish(
ArrayView<Number>(data.values.get() + partitioner->local_size(),
VectorType & dst,
const VectorType &src) const
{
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
if (eigenvalues_are_initialized == false)
estimate_eigenvalues(src);
VectorType & dst,
const VectorType &src) const
{
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
if (eigenvalues_are_initialized == false)
estimate_eigenvalues(src);
VectorType & dst,
const VectorType &src) const
{
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
if (eigenvalues_are_initialized == false)
estimate_eigenvalues(src);
VectorType & dst,
const VectorType &src) const
{
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
if (eigenvalues_are_initialized == false)
estimate_eigenvalues(src);
if (filename != "")
kill_file();
- // if in MT mode, block all other
- // operations. if not in MT mode,
- // this is a no-op
+ // if in MT mode, block all other
+ // operations. if not in MT mode,
+ // this is a no-op
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->lock);
+#endif
Vector<number>::operator=(v);
data_is_preloaded = false;
// if in MT mode, block all other
// operations. if not in MT mode,
// this is a no-op
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->lock);
+#endif
// check that we have not called
// @p alert without the respective
// possibly existing @p alert
// calls. if not in MT mode, this
// is a no-op
+#ifdef DEAL_II_WITH_THREADS
lock.lock();
+#endif
// if data was already preloaded,
// then there is no more need to
// needed
data_is_preloaded = false;
- // release lock. the lock is
- // also released in the other
- // branch of the if-clause
+// release lock. the lock is
+// also released in the other
+// branch of the if-clause
+#ifdef DEAL_II_WITH_THREADS
lock.unlock();
+#endif
}
}
// (there should be none, but who
// knows). if not in MT mode,
// this is a no-op
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->lock);
+#endif
// this is too bad: someone
// requested the vector in advance,
{
AssertDimension(dst_view.size(), this->m());
AssertDimension(src_view.size(), this->n());
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
- const unsigned int n =
+# endif
+ const unsigned int n =
Utilities::fixed_power<dim>(size > 0 ? size : eigenvalues[0].size());
tmp_array.resize_fast(n * 2);
constexpr int kernel_size = size > 0 ? size : 0;
{
AssertDimension(dst_view.size(), this->n());
AssertDimension(src_view.size(), this->m());
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
- const unsigned int n = size > 0 ? size : eigenvalues[0].size();
+# endif
+ const unsigned int n = size > 0 ? size : eigenvalues[0].size();
tmp_array.resize_fast(Utilities::fixed_power<dim>(n));
constexpr int kernel_size = size > 0 ? size : 0;
internal::EvaluatorTensorProduct<internal::evaluate_general,
, current_alloc(0)
, log_statistics(log_statistics)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
pool.initialize(initial_size);
}
inline VectorType *
GrowingVectorMemory<VectorType>::alloc()
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
++total_alloc;
++current_alloc;
inline void
GrowingVectorMemory<VectorType>::free(const VectorType *const v)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
for (typename std::vector<entry_type>::iterator i = pool.data->begin();
i != pool.data->end();
inline void
GrowingVectorMemory<VectorType>::release_unused_memory()
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
if (pool.data != nullptr)
pool.data->clear();
inline std::size_t
GrowingVectorMemory<VectorType>::memory_consumption() const
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
std::size_t result = sizeof(*this);
const typename std::vector<entry_type>::const_iterator end = pool.data->end();
static constexpr unsigned int bucket_size_threading = 256;
void
- compute_row_lengths(const unsigned int begin,
- const unsigned int end,
- const DoFInfo & dof_info,
+ compute_row_lengths(const unsigned int begin,
+ const unsigned int end,
+ const DoFInfo & dof_info,
+#ifdef DEAL_II_WITH_THREADS
std::vector<Threads::Mutex> &mutexes,
- std::vector<unsigned int> & row_lengths)
+#else
+ std::vector<Threads::Mutex> &,
+#endif
+ std::vector<unsigned int> &row_lengths)
{
std::vector<unsigned int> scratch;
const unsigned int n_components = dof_info.start_components.back();
// that are within the range of one lock at once
const unsigned int next_bucket =
(*it / bucket_size_threading + 1) * bucket_size_threading;
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(
mutexes[*it / bucket_size_threading]);
+#endif
+
for (; it != end_unique && *it < next_bucket; ++it)
{
AssertIndexRange(*it, row_lengths.size());
const unsigned int end,
const DoFInfo & dof_info,
const std::vector<unsigned int> &row_lengths,
- std::vector<Threads::Mutex> & mutexes,
- dealii::SparsityPattern & connectivity_dof)
+#ifdef DEAL_II_WITH_THREADS
+ std::vector<Threads::Mutex> &mutexes,
+#else
+ std::vector<Threads::Mutex> &,
+#endif
+ dealii::SparsityPattern &connectivity_dof)
{
std::vector<unsigned int> scratch;
const unsigned int n_components = dof_info.start_components.back();
{
const unsigned int next_bucket =
(*it / bucket_size_threading + 1) * bucket_size_threading;
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(
mutexes[*it / bucket_size_threading]);
+#endif
for (; it != end_unique && *it < next_bucket; ++it)
if (row_lengths[*it] > 0)
connectivity_dof.add(*it, block);
// guard access to the aux_*
// variables in multithread mode
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
for (unsigned int d = 0; d < dim + 1; ++d)
aux_values[d].resize(n_points);
// guard access to the aux_*
// variables in multithread mode
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
for (unsigned int d = 0; d < dim + 1; ++d)
aux_values[d].resize(n_points);
// guard access to the aux_*
// variables in multithread mode
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
for (unsigned int d = 0; d < dim + 1; ++d)
aux_values[d].resize(n_points);
// guard access to the aux_*
// variables in multithread mode
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
for (unsigned int d = 0; d < dim + 1; ++d)
aux_gradients[d].resize(n_points);
// guard access to the aux_*
// variables in multithread mode
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
for (unsigned int d = 0; d < dim + 1; ++d)
aux_values[d].resize(n_points);
// GSL functions may modify gsl_interp_accel *acc object (last argument).
// This can only work in multithreaded applications if we lock the data
// structures via a mutex.
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(acc_mutex);
+# endif
const double x = p[0];
Assert(x >= interpolation_points.front() &&
// GSL functions may modify gsl_interp_accel *acc object (last argument).
// This can only work in multithreaded applications if we lock the data
// structures via a mutex.
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(acc_mutex);
+# endif
const double x = p[0];
Assert(x >= interpolation_points.front() &&
// GSL functions may modify gsl_interp_accel *acc object (last argument).
// This can only work in multithreaded applications if we lock the data
// structures via a mutex.
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(acc_mutex);
+# endif
const double x = p[0];
Assert(x >= interpolation_points.front() &&
double
mu_rand_seed(double seed)
{
+# ifdef DEAL_II_WITH_THREADS
static Threads::Mutex rand_mutex;
std::lock_guard<std::mutex> lock(rand_mutex);
+# endif
static boost::random::uniform_real_distribution<> uniform_distribution(0,
1);
double
mu_rand()
{
- static Threads::Mutex rand_mutex;
- std::lock_guard<std::mutex> lock(rand_mutex);
+# ifdef DEAL_II_WITH_THREADS
+ static Threads::Mutex rand_mutex;
+ std::lock_guard<std::mutex> lock(rand_mutex);
+# endif
static boost::random::uniform_real_distribution<> uniform_distribution(0,
1);
static boost::random::mt19937 rng(
// via a mutex, so that users can call 'const' functions from threads
// in parallel (and these 'const' functions can then call compress()
// which itself calls the current function)
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(compress_mutex);
+#endif
// see if any of the contiguous ranges can be merged. do not use
// std::vector::erase in-place as it is quadratic in the number of
namespace
{
+#ifdef DEAL_II_WITH_THREADS
Threads::Mutex log_lock;
Threads::Mutex write_lock;
+#endif
} // namespace
if (query_streambuf.flushed())
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(write_lock);
+#endif
// Print the line head in case of a previous newline:
if (at_newline)
const bool print_job_id,
const std::ios_base::fmtflags flags)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(log_lock);
+#endif
file = &o;
o.setf(flags);
if (print_job_id)
void
LogStream::detach()
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(log_lock);
+#endif
file = nullptr;
}
unsigned int
LogStream::depth_console(const unsigned int n)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(log_lock);
- const unsigned int h = std_depth;
- std_depth = n;
+#endif
+ const unsigned int h = std_depth;
+ std_depth = n;
return h;
}
unsigned int
LogStream::depth_file(const unsigned int n)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(log_lock);
- const unsigned int h = file_depth;
- file_depth = n;
+#endif
+ const unsigned int h = file_depth;
+ file_depth = n;
return h;
}
bool
LogStream::log_thread_id(const bool flag)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(log_lock);
- const bool h = print_thread_id;
- print_thread_id = flag;
+#endif
+ const bool h = print_thread_id;
+ print_thread_id = flag;
return h;
}
// more fine-grained solution
namespace
{
+#ifdef DEAL_II_WITH_THREADS
Threads::Mutex coefficients_lock;
-}
+#endif
+} // namespace
// of this function
// for this, acquire the lock
// until we quit this function
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(coefficients_lock);
+#endif
// The first 2 coefficients
// are hard-coded
}
else if (k == 2)
{
+#ifdef DEAL_II_WITH_THREADS
coefficients_lock.unlock();
+#endif
compute_coefficients(1);
+#ifdef DEAL_II_WITH_THREADS
coefficients_lock.lock();
+#endif
std::vector<double> c2(3);
// allow the called
// function to acquire it
// itself
+#ifdef DEAL_II_WITH_THREADS
coefficients_lock.unlock();
+#endif
compute_coefficients(k - 1);
+#ifdef DEAL_II_WITH_THREADS
coefficients_lock.lock();
+#endif
std::vector<double> ck(k + 1);
// then get a pointer to the array
// of coefficients. do that in a MT
// safe way
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(coefficients_lock);
+#endif
return *recursive_coefficients[k];
}
// using a mutex to make sure they
// are not used by multiple threads
// at once
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
p_values.resize((values.size() == 0) ? 0 : n_sub);
p_grads.resize((grads.size() == 0) ? 0 : n_sub);
// are not used by multiple threads
// at once
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
p_values.resize((values.size() == 0) ? 0 : n_sub);
p_grads.resize((grads.size() == 0) ? 0 : n_sub);
Assert(fourth_derivatives.size() == n_pols || fourth_derivatives.size() == 0,
ExcDimensionMismatch(fourth_derivatives.size(), n_pols));
- // have a few scratch
- // arrays. because we don't want to
- // re-allocate them every time this
- // function is called, we make them
- // static. however, in return we
- // have to ensure that the calls to
- // the use of these variables is
- // locked with a mutex. if the
- // mutex is removed, several tests
- // (notably
- // deal.II/create_mass_matrix_05)
- // will start to produce random
- // results in multithread mode
+// have a few scratch
+// arrays. because we don't want to
+// re-allocate them every time this
+// function is called, we make them
+// static. however, in return we
+// have to ensure that the calls to
+// the use of these variables is
+// locked with a mutex. if the
+// mutex is removed, several tests
+// (notably
+// deal.II/create_mass_matrix_05)
+// will start to produce random
+// results in multithread mode
+#ifdef DEAL_II_WITH_THREADS
static Threads::Mutex mutex;
std::lock_guard<std::mutex> lock(mutex);
+#endif
static std::vector<double> p_values;
static std::vector<Tensor<1, dim>> p_grads;
// using a mutex to make sure they are not used by multiple threads
// at once
{
+#ifdef DEAL_II_WITH_THREADS
static Threads::Mutex mutex;
std::lock_guard<std::mutex> lock(mutex);
+#endif
static std::vector<Tensor<1, dim>> p_values;
static std::vector<Tensor<2, dim>> p_grads;
void
TimerOutput::enter_subsection(const std::string §ion_name)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
Assert(section_name.empty() == false, ExcMessage("Section string is empty."));
Assert(!active_sections.empty(),
ExcMessage("Cannot exit any section because none has been entered!"));
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
if (section_name != "")
{
void
TimerOutput::reset()
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
sections.clear();
active_sections.clear();
timer_all.restart();
// initialization upon first request
if (this->prolongation[refinement_case - 1][child].n() == 0)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
+#endif
// if matrix got updated while waiting for the lock
if (this->prolongation[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->restriction[refinement_case - 1][child].n() == 0)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
+#endif
// if matrix got updated while waiting for the lock...
if (this->restriction[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->prolongation[refinement_case - 1][child].n() == 0)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
+#endif
// if matrix got updated while waiting for the lock
if (this->prolongation[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->restriction[refinement_case - 1][child].n() == 0)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
+#endif
// if matrix got updated while waiting for the lock...
if (this->restriction[refinement_case - 1][child].n() ==
Assert(i < this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert(component < dim, ExcIndexRange(component, 0, dim));
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(cache_mutex);
+#endif
if (cached_point != p || cached_values.size() == 0)
{
Assert(i < this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert(component < dim, ExcIndexRange(component, 0, dim));
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(cache_mutex);
+#endif
if (cached_point != p || cached_grads.size() == 0)
{
Assert(i < this->dofs_per_cell, ExcIndexRange(i, 0, this->dofs_per_cell));
Assert(component < dim, ExcIndexRange(component, 0, dim));
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(cache_mutex);
+#endif
if (cached_point != p || cached_grad_grads.size() == 0)
{
// initialization upon first request
if (this->prolongation[refinement_case - 1][child].n() == 0)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
+#endif
// if matrix got updated while waiting for the lock
if (this->prolongation[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->restriction[refinement_case - 1][child].n() == 0)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
+#endif
// if matrix got updated while waiting for the lock...
if (this->restriction[refinement_case - 1][child].n() ==
// initialization upon first request
if (this->restriction[refinement_case - 1][child].n() == 0)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
+#endif
// check if updated while waiting for lock
if (this->restriction[refinement_case - 1][child].n() ==
// restriction matrix
if (this->prolongation[refinement_case - 1][child].n() == 0)
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(this->mutex);
+#endif
if (this->prolongation[refinement_case - 1][child].n() ==
this->dofs_per_cell)
euler_dof_handler->get_fe().n_components()));
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(fe_values_mutex);
+#endif
fe_values.reinit(dof_cell);
fe_values.get_function_values(*euler_vector, values);
}
// fill shift vector for each support point using an fe_values object. make
// sure that the fe_values variable isn't used simultaneously from different
// threads
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(fe_values_mutex);
+#endif
fe_values.reinit(dof_cell);
if (mg_vector)
{
}
case svd:
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
AssertDimension(v.size(), this->n());
AssertDimension(w.size(), this->m());
// Compute V^T v
}
case inverse_svd:
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
AssertDimension(w.size(), this->n());
AssertDimension(v.size(), this->m());
// Compute U^T v
}
case svd:
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
AssertDimension(w.size(), this->n());
AssertDimension(v.size(), this->m());
}
case inverse_svd:
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
AssertDimension(v.size(), this->n());
AssertDimension(w.size(), this->m());
// https://stackoverflow.com/questions/3548069/multiplying-three-matrices-in-blas-with-the-middle-one-being-diagonal
// http://mathforum.org/kb/message.jspa?messageID=3546564
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
// First, get V*B into "work" array
work.resize(kk * nn);
// following http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=2&t=768#p2577
number
LAPACKFullMatrix<number>::norm(const char type) const
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
Assert(state == LAPACKSupport::matrix ||
state == LAPACKSupport::inverse_matrix,
number
LAPACKFullMatrix<number>::reciprocal_condition_number(const number a_norm) const
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
Assert(state == cholesky, ExcState(state));
number rcond = 0.;
number
LAPACKFullMatrix<number>::reciprocal_condition_number() const
{
+#ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+#endif
Assert(property == upper_triangular || property == lower_triangular,
ExcProperty(property));
number rcond = 0.;
Assert(property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
const bool use_values = (std::isnan(eigenvalue_limits.first) ||
std::isnan(eigenvalue_limits.second)) ?
Assert(property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
const bool use_values = (std::isnan(eigenvalue_limits.first) ||
std::isnan(eigenvalue_limits.second)) ?
ExcDimensionMismatch(grid->blacs_context,
VT->grid->blacs_context));
}
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
std::vector<NumberType> sv(std::min(n_rows, n_columns));
ExcMessage(
"Use identical block-cyclic distribution for matrices A and B"));
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
+# endif
if (grid->mpi_process_is_active)
{
Assert(state == LAPACKSupport::cholesky,
ExcMessage(
"Matrix has to be in Cholesky state before calling this function."));
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
- NumberType rcond = 0.;
+# endif
+ NumberType rcond = 0.;
if (grid->mpi_process_is_active)
{
Assert(state == LAPACKSupport::matrix ||
state == LAPACKSupport::inverse_matrix,
ExcMessage("norms can be called in matrix state only."));
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
- NumberType res = 0.;
+# endif
+ NumberType res = 0.;
if (grid->mpi_process_is_active)
{
ExcMessage("norms can be called in matrix state only."));
Assert(property == LAPACKSupport::symmetric,
ExcMessage("Matrix has to be symmetric for this operation."));
+# ifdef DEAL_II_WITH_THREADS
std::lock_guard<std::mutex> lock(mutex);
- NumberType res = 0.;
+# endif
+ NumberType res = 0.;
if (grid->mpi_process_is_active)
{