#include <deal.II/base/exceptions.h>
#include <deal.II/base/template_constraints.h>
#include <deal.II/base/synchronous_iterator.h>
+#include <deal.II/base/thread_management.h>
#include <deal.II/base/std_cxx11/tuple.h>
#include <deal.II/base/std_cxx11/bind.h>
#endif
}
+
+// --------------------- for loop affinity partitioner -----------------------
+
+ /**
+ * A class that wraps a TBB affinity partitioner in a thread-safe way. In
+ * Vector, we use a shared pointer to share an affinity partitioner
+ * between different vectors of the same size for improving data (and
+ * NUMA) locality. However, when an outer task does multiple vector
+ * operations, the shared pointer could lead to race conditions. This
+ * class only allows one instance to get a partitioner. The other objects
+ * cannot use that object and need to create their own copy.
+ */
+ namespace internal
+ {
+ class TBBPartitioner
+ {
+ public:
+ /**
+ * Constructor.
+ */
+ TBBPartitioner()
+#ifdef DEAL_II_WITH_THREADS
+ :
+ in_use(false)
+#endif
+ {}
+
+#ifdef DEAL_II_WITH_THREADS
+ /**
+ * Return a pointer to the affinity partitioner in case it is free. If
+ * it is already in use (i.e., someone else called acquire without
+ * releasing it afterwards), the null pointer is returned.
+ */
+ tbb::affinity_partitioner *acquire()
+ {
+ dealii::Threads::Mutex::ScopedLock lock(mutex);
+ if (in_use)
+ return NULL;
+
+ in_use = true;
+ return &partitioner;
+ }
+
+ /**
+ * After using the partitioner in a tbb loop through acquire(), this
+ * call makes the partitioner available again.
+ */
+ void release()
+ {
+ dealii::Threads::Mutex::ScopedLock lock(mutex);
+ in_use = false;
+ }
+
+ private:
+ /**
+ * The stored partitioner
+ */
+ tbb::affinity_partitioner partitioner;
+
+ /**
+ * A flag to indicate whether the partitioner has been acquired but not
+ * released yet, i.e., it is in use somewhere else.
+ */
+ bool in_use;
+
+ /**
+ * A mutex to guard the access to the in_use flag.
+ */
+ dealii::Threads::Mutex mutex;
+#endif
+ };
+ }
}
template <typename> class VectorView;
+namespace parallel
+{
+ namespace internal
+ {
+ class TBBPartitioner;
+ }
+}
*/
Number *val;
+ /**
+ * For parallel loops with TBB, this member variable stores the affinity
+ * information of loops.
+ */
+ std_cxx11::shared_ptr<parallel::internal::TBBPartitioner> thread_loop_partitioner;
+
/**
* Make all other vector types friends.
*/
-template <typename Number>
-inline
-void Vector<Number>::reinit (const size_type n,
- const bool omit_zeroing_entries)
-{
- if (n==0)
- {
- if (val) deallocate();
- val = 0;
- max_vec_size = vec_size = 0;
- return;
- };
-
- if (n>max_vec_size)
- {
- if (val) deallocate();
- max_vec_size = n;
- allocate();
- };
- vec_size = n;
- if (omit_zeroing_entries == false)
- *this = static_cast<Number>(0);
-}
-
-
-
-// declare function that is implemented in vector.templates.h
-namespace internal
-{
- namespace Vector
- {
- template <typename T, typename U>
- void copy_vector (const dealii::Vector<T> &src,
- dealii::Vector<U> &dst);
- }
-}
-
-
-
-template <typename Number>
-inline
-Vector<Number> &
-Vector<Number>::operator= (const Vector<Number> &v)
-{
- dealii::internal::Vector::copy_vector (v, *this);
- return *this;
-}
-
-
-
-#ifdef DEAL_II_WITH_CXX11
-template <typename Number>
-inline
-Vector<Number> &
-Vector<Number>::operator= (Vector<Number> &&v)
-{
- Subscriptor::operator=(std::move(v));
-
- if (val) deallocate();
-
- vec_size = v.vec_size;
- max_vec_size = v.max_vec_size;
- val = v.val;
-
- v.vec_size = 0;
- v.max_vec_size = 0;
- v.val = nullptr;
-
- return *this;
-}
-#endif
-
-
-
-template <typename Number>
-template <typename Number2>
-inline
-Vector<Number> &
-Vector<Number>::operator= (const Vector<Number2> &v)
-{
- internal::Vector::copy_vector (v, *this);
- return *this;
-}
-
-
-
template <typename Number>
inline
std::size_t Vector<Number>::size () const
template <typename Functor>
void vectorized_transform(Functor &functor,
- size_type vec_size)
+ size_type vec_size,
+ parallel::internal::TBBPartitioner *partitioner = NULL)
{
#ifdef DEAL_II_WITH_THREADS
// only go to the parallel function in case there are at least 4 parallel
if (vec_size >= 4*internal::Vector::minimum_parallel_grain_size &&
MultithreadInfo::n_threads() > 1)
{
+ // in case the partitioner was used by another thread, create a
+ // private partitioner here for use in the loop (we could also use the
+ // auto partitioner of TBB but that increases code size considerably
+ // without major benefit)
+ tbb::affinity_partitioner *tbb_partitioner = (partitioner == NULL) ?
+ NULL : partitioner->acquire();
+ tbb::affinity_partitioner private_partitioner;
+ if (tbb_partitioner == NULL)
+ tbb_partitioner = &private_partitioner;
+
tbb::parallel_for (tbb::blocked_range<size_type> (0,
vec_size,
internal::Vector::minimum_parallel_grain_size),
functor,
- tbb::auto_partitioner());
+ *tbb_partitioner);
+ if (partitioner != NULL)
+ partitioner->release();
}
else if (vec_size > 0)
-#endif
functor(0,vec_size);
+#else
+ functor(0,vec_size);
+#endif
}
}
}
};
-
- // function used in the header file
- namespace Vector
- {
- template <typename T, typename U>
- void copy_vector (const dealii::Vector<T> &src,
- dealii::Vector<U> &dst)
- {
- if (PointerComparison::equal(&src, &dst))
- return;
-
- const typename dealii::Vector<T>::size_type vec_size = src.size();
- const typename dealii::Vector<U>::size_type dst_size = dst.size();
- if (dst_size != vec_size)
- dst.reinit (src, true);
- dealii::internal::Vector_copy<U,T> copier;
- copier.dst = dst.begin();
- copier.src = src.begin();
- internal::vectorized_transform(copier,vec_size);
- }
- }
}
Subscriptor(std::move(v)),
vec_size(v.vec_size),
max_vec_size(v.max_vec_size),
- val(v.val)
+ val(v.val),
+ thread_loop_partitioner(std::move(v.thread_loop_partitioner))
{
v.vec_size = 0;
v.max_vec_size = 0;
#endif
+template <typename Number>
+inline
+Vector<Number> &
+Vector<Number>::operator= (const Vector<Number> &v)
+{
+ if (PointerComparison::equal(this, &v))
+ return *this;
+
+ if (vec_size != v.vec_size)
+ reinit (v, true);
+
+ dealii::internal::Vector_copy<Number,Number> copier;
+ copier.dst = val;
+ copier.src = v.val;
+ internal::vectorized_transform(copier,vec_size,thread_loop_partitioner.get());
+
+ return *this;
+}
+
+
+
+#ifdef DEAL_II_WITH_CXX11
+template <typename Number>
+inline
+Vector<Number> &
+Vector<Number>::operator= (Vector<Number> &&v)
+{
+ Subscriptor::operator=(std::move(v));
+
+ if (val) deallocate();
+
+ vec_size = v.vec_size;
+ max_vec_size = v.max_vec_size;
+ val = v.val;
+ thread_loop_partitioner = std::move(v.thread_loop_partitioner);
+
+ v.vec_size = 0;
+ v.max_vec_size = 0;
+ v.val = nullptr;
+
+ return *this;
+}
+#endif
+
+
+
+template <typename Number>
+template <typename Number2>
+inline
+Vector<Number> &
+Vector<Number>::operator= (const Vector<Number2> &v)
+{
+ if (vec_size != v.vec_size)
+ reinit (v, true);
+
+ dealii::internal::Vector_copy<Number,Number2> copier;
+ copier.dst = val;
+ copier.src = v.val;
+ internal::vectorized_transform(copier,vec_size,thread_loop_partitioner.get());
+
+ return *this;
+}
+
+
+
+template <typename Number>
+inline
+void Vector<Number>::reinit (const size_type n,
+ const bool omit_zeroing_entries)
+{
+ thread_loop_partitioner.reset();
+ if (n==0)
+ {
+ if (val) deallocate();
+ val = 0;
+ max_vec_size = vec_size = 0;
+ return;
+ };
+
+ if (n>max_vec_size)
+ {
+ if (val) deallocate();
+ max_vec_size = n;
+ allocate();
+ };
+ vec_size = n;
+ thread_loop_partitioner.reset(new parallel::internal::TBBPartitioner());
+ if (omit_zeroing_entries == false)
+ *this = static_cast<Number>(0);
+}
+
+
+
template <typename Number>
template <typename Number2>
void Vector<Number>::reinit (const Vector<Number2> &v,
const bool omit_zeroing_entries)
{
- reinit (v.size(), omit_zeroing_entries);
+ thread_loop_partitioner = v.thread_loop_partitioner;
+
+ if (v.vec_size==0)
+ {
+ if (val) deallocate();
+ val = 0;
+ max_vec_size = vec_size = 0;
+ return;
+ };
+
+ if (v.vec_size>max_vec_size)
+ {
+ if (val) deallocate();
+ max_vec_size = v.vec_size;
+ allocate();
+ };
+ vec_size = v.vec_size;
+ if (omit_zeroing_entries == false)
+ *this = static_cast<Number>(0);
}
setter.dst = val;
setter.value = s;
- internal::vectorized_transform(setter,vec_size);
+ internal::vectorized_transform(setter,vec_size,thread_loop_partitioner.get());
return *this;
}
vector_multiply.val = val;
vector_multiply.factor = factor;
- internal::vectorized_transform(vector_multiply,vec_size);
+ internal::vectorized_transform(vector_multiply,vec_size,thread_loop_partitioner.get());
return *this;
}
vector_add_av.val = val;
vector_add_av.v_val = v.val;
vector_add_av.factor = a;
- internal::vectorized_transform(vector_add_av,vec_size);
+ internal::vectorized_transform(vector_add_av,vec_size,thread_loop_partitioner.get());
}
vector_sadd_xav.v_val = v.val;
vector_sadd_xav.a = a;
vector_sadd_xav.x = x;
- internal::vectorized_transform(vector_sadd_xav,vec_size);
+ internal::vectorized_transform(vector_sadd_xav,vec_size,thread_loop_partitioner.get());
}
internal::Vectorization_subtract_v<Number> vector_subtract;
vector_subtract.val = val;
vector_subtract.v_val = v.val;
- internal::vectorized_transform(vector_subtract,vec_size);
+ internal::vectorized_transform(vector_subtract,vec_size,thread_loop_partitioner.get());
return *this;
}
internal::Vectorization_add_factor<Number> vector_add;
vector_add.val = val;
vector_add.factor = v;
- internal::vectorized_transform(vector_add,vec_size);
+ internal::vectorized_transform(vector_add,vec_size,thread_loop_partitioner.get());
}
internal::Vectorization_add_v<Number> vector_add;
vector_add.val = val;
vector_add.v_val = v.val;
- internal::vectorized_transform(vector_add,vec_size);
+ internal::vectorized_transform(vector_add,vec_size,thread_loop_partitioner.get());
}
vector_add.w_val = w.val;
vector_add.a = a;
vector_add.b = b;
- internal::vectorized_transform(vector_add,vec_size);
+ internal::vectorized_transform(vector_add,vec_size,thread_loop_partitioner.get());
}
vector_sadd.val = val;
vector_sadd.v_val = v.val;
vector_sadd.x = x;
- internal::vectorized_transform(vector_sadd,vec_size);
+ internal::vectorized_transform(vector_sadd,vec_size,thread_loop_partitioner.get());
}
vector_sadd.x = x;
vector_sadd.a = a;
vector_sadd.b = b;
- internal::vectorized_transform(vector_sadd,vec_size);
+ internal::vectorized_transform(vector_sadd,vec_size,thread_loop_partitioner.get());
}
internal::Vectorization_scale<Number> vector_scale;
vector_scale.val = val;
vector_scale.v_val = s.val;
- internal::vectorized_transform(vector_scale,vec_size);
+ internal::vectorized_transform(vector_scale,vec_size,thread_loop_partitioner.get());
}
vector_equ.val = val;
vector_equ.u_val = u.val;
vector_equ.a = a;
- internal::vectorized_transform(vector_equ,vec_size);
+ internal::vectorized_transform(vector_equ,vec_size,thread_loop_partitioner.get());
}
vector_equ.v_val = v.val;
vector_equ.a = a;
vector_equ.b = b;
- internal::vectorized_transform(vector_equ,vec_size);
+ internal::vectorized_transform(vector_equ,vec_size,thread_loop_partitioner.get());
}
vector_equ.a = a;
vector_equ.b = b;
vector_equ.c = c;
- internal::vectorized_transform(vector_equ,vec_size);
+ internal::vectorized_transform(vector_equ,vec_size,thread_loop_partitioner.get());
}
vector_ratio.val = val;
vector_ratio.a_val = a.val;
vector_ratio.b_val = b.val;
- internal::vectorized_transform(vector_ratio,vec_size);
+ internal::vectorized_transform(vector_ratio,vec_size,thread_loop_partitioner.get());
}