Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
Go to the documentation of this file.
17 #ifndef _TBB_scheduler_common_H
18 #define _TBB_scheduler_common_H
27 #if TBB_USE_ASSERT > 1
35 #define private public
46 #ifndef __TBB_SCHEDULER_MUTEX_TYPE
47 #define __TBB_SCHEDULER_MUTEX_TYPE tbb::spin_mutex
55 #if __TBB_TASK_GROUP_CONTEXT
56 #define __TBB_CONTEXT_ARG1(context) context
57 #define __TBB_CONTEXT_ARG(arg1, context) arg1, context
59 #define __TBB_CONTEXT_ARG1(context)
60 #define __TBB_CONTEXT_ARG(arg1, context) arg1
63 #if __TBB_TASK_ISOLATION
64 #define __TBB_ISOLATION_EXPR(isolation) isolation
65 #define __TBB_ISOLATION_ARG(arg1, isolation) arg1, isolation
67 #define __TBB_ISOLATION_EXPR(isolation)
68 #define __TBB_ISOLATION_ARG(arg1, isolation) arg1
74 #define TBB_TRACE(x) ((void)std::printf x)
76 #define TBB_TRACE(x) ((void)(0))
79 #if !__TBB_CPU_CTL_ENV_PRESENT
83 #if _MSC_VER && !defined(__INTEL_COMPILER)
87 #pragma warning (disable: 4100 4127 4312 4244 4267 4706)
91 namespace interface7 {
93 class task_arena_base;
98 using namespace interface7::internal;
101 template<
typename SchedulerTraits>
class custom_scheduler;
102 class generic_scheduler;
106 class observer_proxy;
107 class task_scheduler_observer_v3;
109 #if __TBB_TASK_PRIORITY
113 inline intptr_t normalize_priority (
priority_t p ) {
121 inline void assert_priority_valid ( intptr_t
p ) {
125 inline intptr_t& priority (
task& t ) {
126 return t.prefix().context->my_priority;
135 #if __TBB_TASK_GROUP_CONTEXT
146 extern uintptr_t the_context_state_propagation_epoch;
151 extern context_state_propagation_mutex_type the_context_state_propagation_mutex;
167 #if __TBB_PREVIEW_CRITICAL_TASKS
168 es_task_critical = 0x8,
211 template <
typename T>
212 void poison_value ( T& val ) { val = * punned_cast<T*>(&venom); }
215 inline bool is_alive( uintptr_t v ) {
return v != venom; }
224 #if __TBB_RECYCLE_TO_ENQUEUE
225 __TBB_ASSERT( (
unsigned)
task->state()<=(unsigned)task::to_enqueue,
"corrupt task (invalid state)" );
235 #define poison_value(g) ((void)0)
245 #if __TBB_TASK_GROUP_CONTEXT
250 inline bool CancellationInfoPresent (
task& t ) {
251 return t.prefix().context->my_cancellation_requested != 0;
254 #if TBB_USE_CAPTURED_EXCEPTION
255 inline tbb_exception* TbbCurrentException( task_group_context*, tbb_exception* src) {
return src->move(); }
256 inline tbb_exception* TbbCurrentException( task_group_context* c, captured_exception* src) {
258 runtime_warning(
"Exact exception propagation is requested by application but the linked library is built without support for it");
261 #define TbbRethrowException(TbbCapturedException) (TbbCapturedException)->throw_self()
265 #define TbbCurrentException(context, TbbCapturedException) \
266 context->my_version_and_traits & task_group_context::exact_exception \
267 ? tbb_exception_ptr::allocate() \
268 : tbb_exception_ptr::allocate( *(TbbCapturedException) );
269 #define TbbRethrowException(TbbCapturedException) \
271 if( governor::rethrow_exception_broken() ) fix_broken_rethrow(); \
272 (TbbCapturedException)->throw_self(); \
276 #define TbbRegisterCurrentException(context, TbbCapturedException) \
277 if ( context->cancel_group_execution() ) { \
279 context->my_exception = TbbCurrentException( context, TbbCapturedException ); \
282 #define TbbCatchAll(context) \
283 catch ( tbb_exception& exc ) { \
284 TbbRegisterCurrentException( context, &exc ); \
285 } catch ( std::exception& exc ) { \
286 TbbRegisterCurrentException( context, captured_exception::allocate(typeid(exc).name(), exc.what()) ); \
288 TbbRegisterCurrentException( context, captured_exception::allocate("...", "Unidentified exception") );\
298 #if defined(__TBB_time_stamp) && !__TBB_STEALING_PAUSE
312 }
while ( prev < finish );
314 #ifdef __TBB_STEALING_PAUSE
317 static const long PauseTime = 1500;
319 static const long PauseTime = 80;
345 #if __TBB_PREVIEW_RESUMABLE_TASKS
346 tbb::atomic<bool>* my_scheduler_is_recalled;
356 #if __TBB_PREVIEW_CRITICAL_TASKS
357 unsigned hint_for_critical;
372 statistics_counters *my_counters;
379 void fill_with_canary_pattern (
size_t first,
size_t last ) {
389 my_task_pool_size = byte_size /
sizeof(
task*);
393 fill_with_canary_pattern( 0, my_task_pool_size );
400 if( task_pool_ptr ) {
403 task_pool_ptr = NULL;
404 my_task_pool_size = 0;
409 #if !__TBB_CPU_CTL_ENV_PRESENT
435 __TBB_ASSERT( my_fenv_ptr,
"cpu_ctl_env is not initialized." );
437 return memcmp( (
void*)my_fenv_ptr, (
void*)ctl.
my_fenv_ptr,
sizeof(fenv_t) );
442 fegetenv( my_fenv_ptr );
445 __TBB_ASSERT( my_fenv_ptr,
"cpu_ctl_env is not initialized." );
446 fesetenv( my_fenv_ptr );
task **__TBB_atomic task_pool
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
void free_task_pool()
Deallocate task pool that was allocated by means of allocate_task_pool.
bool bounded_pause()
Pause for a few times and return false if saturated.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
void reset_extra_state(task *t)
task_extra_state
Definitions for bits in task_prefix::extra_state.
void poison_pointer(T *__TBB_atomic &)
Tag for v3 tasks (i.e. tasks in TBB 2.1-2.2)
const cpu_ctl_env & set_env() const
Class that implements exponential backoff.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
Base class for user-defined tasks.
Set if the task has been stolen.
Work stealing task scheduler.
Task is known to be a small task and must not be cached.
task to be recycled as continuation
void fill_with_canary_pattern(size_t, size_t)
__TBB_atomic size_t head
Index of the first ready task in the deque.
static const int priority_stride_v4
cpu_ctl_env(const cpu_ctl_env &src)
Memory prefix to a task object.
cpu_ctl_env & operator=(const cpu_ctl_env &src)
Tag for v1 tasks (i.e. tasks in TBB 1.0 and 2.0)
Bitwise-OR of local_task and small_task.
bool ConcurrentWaitsEnabled(task &t)
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Set if ref_count might be changed by another thread. Used for debugging.
void allocate_task_pool(size_t n)
#define __TBB_SCHEDULER_MUTEX_TYPE
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
auto first(Container &c) -> decltype(begin(c))
Disable caching for a small task.
unsigned hint_for_pop
Hint provided for operations with the container of starvation-resistant tasks.
void __TBB_Pause(int32_t)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
const size_t task_alignment
Alignment for a task object.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Pads type T to fill out to a multiple of cache line size.
A template to select either 32-bit or 64-bit constant as compile time, depending on machine word size...
static const intptr_t num_priority_levels
Task is known to have been allocated by this scheduler.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
bool operator!=(const cpu_ctl_env &ctl) const
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
#define __TBB_STEALING_PAUSE
void assert_task_valid(const task *)
#define __TBB_time_stamp()
auto last(Container &c) -> decltype(begin(c))
__TBB_SCHEDULER_MUTEX_TYPE scheduler_mutex_type
Mutex type for global locks in the scheduler.
void const char const char int ITT_FORMAT __itt_group_sync p
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
task **__TBB_atomic task_pool_ptr
Task pool of the scheduler that owns this slot.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
Task is known to be a small task.
size_t my_task_pool_size
Capacity of the primary task pool (number of elements - pointers to task).
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
Copyright © 2005-2020 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.