35 extern generic_scheduler* (*AllocateSchedulerPtr)( market&, bool );
41 #if __TBB_TASK_GROUP_CONTEXT 42 context_state_propagation_mutex_type the_context_state_propagation_mutex;
44 uintptr_t the_context_state_propagation_epoch = 0;
55 #if __TBB_TASK_GROUP_CONTEXT 59 #if __TBB_TASK_PRIORITY 78 #if _MSC_VER && !defined(__INTEL_COMPILER) 81 #pragma warning(disable:4355) 89 , my_co_context(m.worker_stack_size(), genuine ? NULL : this)
91 , my_small_task_count(1)
93 , my_cilk_state(cs_none)
100 #if __TBB_PREVIEW_CRITICAL_TASKS 103 #if __TBB_PREVIEW_RESUMABLE_TASKS 105 my_current_is_recalled = NULL;
106 my_post_resume_action = PRA_NONE;
107 my_post_resume_arg = NULL;
113 #if __TBB_TASK_PRIORITY 114 my_ref_top_priority = &m.my_global_top_priority;
115 my_ref_reload_epoch = &m.my_global_reload_epoch;
117 #if __TBB_TASK_GROUP_CONTEXT 119 my_context_state_propagation_epoch = the_context_state_propagation_epoch;
120 my_context_list_head.my_prev = &my_context_list_head;
121 my_context_list_head.my_next = &my_context_list_head;
122 ITT_SYNC_CREATE(&my_context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList);
128 #if _MSC_VER && !defined(__INTEL_COMPILER) 130 #endif // warning 4355 is back 132 #if TBB_USE_ASSERT > 1 143 for (
size_t i = 0; i < H; ++i )
144 __TBB_ASSERT( tp[i] == poisoned_ptr,
"Task pool corrupted" );
145 for (
size_t i = H; i < T; ++i ) {
149 tp[i]->prefix().extra_state ==
es_task_proxy,
"task in the deque has invalid state" );
153 __TBB_ASSERT( tp[i] == poisoned_ptr,
"Task pool corrupted" );
164 #if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64 166 __asm mov eax, fs:[0x18]
169 NT_TIB *pteb = (NT_TIB*)NtCurrentTeb();
171 __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit,
"invalid stack info in TEB" );
172 __TBB_ASSERT( stack_size >0,
"stack_size not initialized?" );
190 void *stack_base = &stack_size;
191 #if __linux__ && !__bg__ 195 size_t np_stack_size = 0;
197 void *stack_limit = NULL;
199 #if __TBB_PREVIEW_RESUMABLE_TASKS 201 stack_limit = my_co_context.get_stack_limit();
202 __TBB_ASSERT( (uintptr_t)stack_base > (uintptr_t)stack_limit,
"stack size must be positive" );
204 stack_size = size_t((
char*)stack_base - (
char*)stack_limit);
208 pthread_attr_t np_attr_stack;
209 if( !stack_limit && 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) {
210 if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) {
212 pthread_attr_t attr_stack;
213 if ( 0 == pthread_attr_init(&attr_stack) ) {
214 if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) ) {
215 if ( np_stack_size < stack_size ) {
218 rsb_base = stack_limit;
219 stack_size = np_stack_size/2;
221 stack_limit = (
char*)stack_limit + stack_size;
227 pthread_attr_destroy(&attr_stack);
230 my_rsb_stealing_threshold = (uintptr_t)((
char*)rsb_base + stack_size/2);
235 stack_size = size_t((
char*)stack_base - (
char*)stack_limit);
237 pthread_attr_destroy(&np_attr_stack);
240 __TBB_ASSERT( stack_size>0,
"stack size must be positive" );
245 #if __TBB_TASK_GROUP_CONTEXT 251 void generic_scheduler::cleanup_local_context_list () {
253 bool wait_for_concurrent_destroyers_to_leave =
false;
254 uintptr_t local_count_snapshot = my_context_state_propagation_epoch;
255 my_local_ctx_list_update.store<
relaxed>(1);
263 if ( my_nonlocal_ctx_list_update.load<
relaxed>() || local_count_snapshot != the_context_state_propagation_epoch )
264 lock.
acquire(my_context_list_mutex);
268 while ( node != &my_context_list_head ) {
275 wait_for_concurrent_destroyers_to_leave =
true;
278 my_local_ctx_list_update.store<
release>(0);
280 if ( wait_for_concurrent_destroyers_to_leave )
297 #if __TBB_PREVIEW_CRITICAL_TASKS 300 #if __TBB_TASK_GROUP_CONTEXT 301 cleanup_local_context_list();
303 free_task<small_local_task>( *my_dummy_task );
305 #if __TBB_HOARD_NONLOCAL_TASKS 306 while(
task* t = my_nonlocal_free_list ) {
308 my_nonlocal_free_list = p.
next;
325 #if __TBB_COUNT_TASK_NODES 326 my_market->update_task_node_count( my_task_node_count );
340 #if __TBB_HOARD_NONLOCAL_TASKS 341 if( (t = my_nonlocal_free_list) ) {
354 __TBB_ASSERT( t,
"another thread emptied the my_return_list" );
360 #if __TBB_COUNT_TASK_NODES 361 ++my_task_node_count;
367 #if __TBB_PREFETCHING 370 #if __TBB_HOARD_NONLOCAL_TASKS 386 #if __TBB_COUNT_TASK_NODES 387 ++my_task_node_count;
392 #if __TBB_TASK_GROUP_CONTEXT 415 task* old = s.my_return_list;
421 if(
as_atomic(s.my_return_list).compare_and_swap(&t, old )==old ) {
422 #if __TBB_PREFETCHING 439 if ( T + num_tasks <= my_arena_slot->my_task_pool_size )
457 for (
size_t i = H; i < T; ++i )
466 if ( new_size < 2 * my_arena_slot->my_task_pool_size )
472 for (
size_t i = H; i < T; ++i )
494 bool sync_prepare_done =
false;
510 else if( !sync_prepare_done ) {
513 sync_prepare_done =
true;
536 task** victim_task_pool;
537 bool sync_prepare_done =
false;
539 victim_task_pool = victim_arena_slot->
task_pool;
545 if( sync_prepare_done )
556 else if( !sync_prepare_done ) {
559 sync_prepare_done =
true;
563 #if __TBB_STEALING_ABORT_ON_CONTENTION 564 if(!backoff.bounded_pause()) {
580 "not really locked victim's task pool?" );
581 return victim_task_pool;
585 task** victim_task_pool )
const {
586 __TBB_ASSERT( victim_arena_slot,
"empty victim arena slot pointer" );
599 __TBB_ASSERT( ref_count>=0,
"attempt to spawn task whose parent has a ref_count<0" );
600 __TBB_ASSERT( ref_count!=0,
"attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
606 "backwards compatibility to TBB 2.0 tasks is broken" );
607 #if __TBB_TASK_ISOLATION 619 #if __TBB_TASK_PRIORITY 631 #if __TBB_PREVIEW_CRITICAL_TASKS 632 bool generic_scheduler::handled_as_critical(
task& t ) {
635 #if __TBB_TASK_ISOLATION 641 my_arena->my_critical_task_stream.push(
665 #if __TBB_PREVIEW_CRITICAL_TASKS 666 if( !handled_as_critical( *first ) )
691 for(
task* t = first; ; t = t_next ) {
697 #if __TBB_PREVIEW_CRITICAL_TASKS 698 if( !handled_as_critical( *t ) )
704 if(
size_t num_tasks = tasks.
size() ) {
726 #if __TBB_TASK_GROUP_CONTEXT 728 "all the root tasks in list must share the same context");
752 #if __TBB_TASK_PRIORITY 753 class auto_indicator :
no_copy {
754 volatile bool& my_indicator;
756 auto_indicator (
volatile bool& indicator ) : my_indicator(indicator) { my_indicator = true ;}
757 ~auto_indicator () { my_indicator =
false; }
765 #if __TBB_TASK_ISOLATION 767 bool tasks_omitted =
false;
768 while ( !t && T>H0 ) {
769 t =
get_task( --T, isolation, tasks_omitted );
770 if ( !tasks_omitted ) {
776 if ( t && tasks_omitted ) {
806 #if __TBB_TASK_ISOLATION 821 __TBB_ASSERT( my_offloaded_tasks,
"At least one task is expected to be already offloaded" );
828 auto_indicator indicator( my_pool_reshuffling_pending );
838 for (
size_t src = H0; src<T0; ++src ) {
842 intptr_t
p = priority( *t );
843 if ( p<*my_ref_top_priority ) {
844 offload_task( *t, p );
860 #if __TBB_TASK_ISOLATION 871 task **link = &offloaded_tasks;
872 while (
task *t = *link ) {
875 if ( priority(*t) >= top_priority ) {
879 task* next = *next_ptr;
888 if ( link == &offloaded_tasks ) {
889 offloaded_tasks = NULL;
891 offloaded_task_list_link = NULL;
898 offloaded_task_list_link = link;
901 size_t num_tasks = tasks.
size();
914 if ( t ) --num_tasks;
922 uintptr_t reload_epoch = *my_ref_reload_epoch;
925 || my_local_reload_epoch - reload_epoch > uintptr_t(-1)/2,
926 "Reload epoch counter overflow?" );
927 if ( my_local_reload_epoch == reload_epoch )
930 intptr_t top_priority = effective_reference_priority();
932 task *t = reload_tasks( my_offloaded_tasks, my_offloaded_task_list_tail_link,
__TBB_ISOLATION_ARG( top_priority, isolation ) );
949 my_local_reload_epoch = reload_epoch;
954 #if __TBB_TASK_ISOLATION 964 __TBB_ASSERT( !is_poisoned( result ),
"The poisoned task is going to be processed" );
965 #if __TBB_TASK_ISOLATION 970 if ( !omit && !
is_proxy( *result ) )
973 tasks_omitted =
true;
978 if ( !result || !
is_proxy( *result ) )
988 #if __TBB_TASK_ISOLATION 990 if ( !tasks_omitted )
1000 free_task<small_task>( tp );
1001 #if __TBB_TASK_ISOLATION 1002 if ( tasks_omitted )
1013 size_t H0 = (size_t)-1, T = T0;
1014 task* result = NULL;
1015 bool task_pool_empty =
false;
1024 if ( (intptr_t)H0 > (intptr_t)T ) {
1028 && H0 == T + 1,
"victim/thief arbitration algorithm failure" );
1031 task_pool_empty =
true;
1033 }
else if ( H0 == T ) {
1036 task_pool_empty =
true;
1045 #if __TBB_TASK_ISOLATION 1046 result =
get_task( T, isolation, tasks_omitted );
1050 }
else if ( !tasks_omitted ) {
1058 }
while ( !result && !task_pool_empty );
1060 #if __TBB_TASK_ISOLATION 1061 if ( tasks_omitted ) {
1062 if ( task_pool_empty ) {
1125 free_task<no_cache_small_task>(tp);
1144 task* result = NULL;
1147 bool tasks_omitted =
false;
1159 result = victim_pool[H-1];
1175 tasks_omitted =
true;
1176 }
else if ( !tasks_omitted ) {
1182 }
while ( !result );
1186 ITT_NOTIFY( sync_acquired, (
void*)((uintptr_t)&victim_slot+
sizeof( uintptr_t )) );
1188 if ( tasks_omitted ) {
1190 victim_pool[H-1] = NULL;
1195 #if __TBB_PREFETCHING 1199 if ( tasks_omitted )
1205 #if __TBB_PREVIEW_CRITICAL_TASKS 1213 if(
my_arena->my_critical_task_stream.empty(0) )
1215 task* critical_task = NULL;
1218 #if __TBB_TASK_ISOLATION 1220 critical_task =
my_arena->my_critical_task_stream.pop_specific( 0, start_lane, isolation );
1226 return critical_task;
1239 free_task<no_cache_small_task>(*tp);
1246 __TBB_ASSERT ( my_arena_index < my_arena->my_num_slots,
"arena slot index is out-of-bound" );
1250 "entering arena without tasks to share" );
1271 __TBB_ASSERT(!genuine || index,
"workers should have index > 0");
1291 #if __TBB_TASK_GROUP_CONTEXT 1294 #if __TBB_FP_CONTEXT 1295 s->default_context()->capture_fp_settings();
1299 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
1300 s->
my_market->my_masters.push_front( *s );
1307 #if __TBB_TASK_GROUP_CONTEXT 1308 a->my_default_ctx = s->default_context();
1315 s->
my_market->register_master( s->master_exec_resource );
1318 #if __TBB_ARENA_OBSERVER 1319 __TBB_ASSERT( !a || a->my_observers.empty(),
"Just created arena cannot have any observers associated with it" );
1321 #if __TBB_SCHEDULER_OBSERVER 1322 the_global_observer_list.notify_entry_observers( s->my_last_global_observer,
false );
1330 #if __TBB_SCHEDULER_OBSERVER 1332 the_global_observer_list.notify_exit_observers( s.my_last_global_observer,
true );
1360 #if __TBB_ARENA_OBSERVER 1362 a->my_observers.notify_exit_observers( my_last_local_observer,
false );
1364 #if __TBB_SCHEDULER_OBSERVER 1365 the_global_observer_list.notify_exit_observers( my_last_global_observer,
false );
1368 m->unregister_master( master_exec_resource );
1372 #if __TBB_STATISTICS 1377 #if __TBB_TASK_GROUP_CONTEXT 1379 default_context()->~task_group_context();
1382 context_state_propagation_mutex_type::scoped_lock
lock(the_context_state_propagation_mutex);
1392 return m->
release( a != NULL, blocking_terminate );
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
static const kind_type dying
generic_scheduler * allocate_scheduler(market &m, bool genuine)
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
#define __TBB_FetchAndDecrementWrelease(P)
static bool is_shared(intptr_t tat)
True if the proxy is stored both in its sender's pool and in the destination mailbox.
void cleanup_scheduler()
Cleans up this scheduler (the scheduler might be destroyed).
void assert_task_valid(const task *)
Base class for user-defined tasks.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void __TBB_store_relaxed(volatile T &location, V value)
Represents acquisition of a mutex.
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
void const char const char int ITT_FORMAT __itt_group_sync s
bool recipient_is_idle()
True if thread that owns this mailbox is looking for work.
task is in ready pool, or is going to be put there, or was just taken off.
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
#define ITT_SYNC_CREATE(obj, type, name)
static const intptr_t num_priority_levels
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
bool is_worker() const
True if running on a worker thread, false otherwise.
mail_outbox * outbox
Mailbox to which this was mailed.
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
generic_scheduler *(* AllocateSchedulerPtr)(market &, bool)
Pointer to the scheduler factory function.
void pause()
Pause for a while.
#define __TBB_control_consistency_helper()
unsigned char state
A task::state_type, stored as a byte for compactness.
__TBB_atomic size_t head
Index of the first ready task in the deque.
void __TBB_store_with_release(volatile T &location, V value)
intptr_t reference_count
A reference count.
Class that implements exponential backoff.
bool is_critical(task &t)
const isolation_tag no_isolation
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
void publish_task_pool()
Used by workers to enter the task pool.
void set_ref_count(int count)
Set reference count.
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
static const kind_type detached
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
tbb::task * parent
The task whose reference count includes me.
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
void destroy()
Destroy and deallocate this scheduler object.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
void fill_with_canary_pattern(size_t, size_t)
task * my_free_list
Free list of small tasks that can be reused.
task object is on free list, or is going to be put there, or was just taken off.
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
size_t my_task_pool_size
Capacity of the primary task pool (number of elements - pointers to task).
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
Vector that grows without reallocations, and stores items in the reverse order.
task **__TBB_atomic task_pool
void acquire(spin_mutex &m)
Acquire lock.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
#define __TBB_ISOLATION_ARG(arg1, isolation)
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
isolation_tag isolation
The tag used for task isolation.
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
task_proxy * pop(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get next piece of mail, or NULL if mailbox is empty.
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
intptr_t isolation_tag
A tag for task isolation.
static bool is_proxy(const task &t)
True if t is a task_proxy.
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
bool is_quiescent_local_task_pool_reset() const
context_list_node_t * my_next
void spawn(task &first, task *&next) __TBB_override
For internal use only.
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
bool is_quiescent_local_task_pool_empty() const
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
Set if the task has been stolen.
Work stealing task scheduler.
Set if ref_count might be changed by another thread. Used for debugging.
void copy_memory(T *dst) const
Copies the contents of the vector into the dst array.
static const kind_type binding_required
auto first(Container &c) -> decltype(begin(c))
#define GATHER_STATISTIC(x)
Memory prefix to a task object.
intptr_t my_priority
Priority level of the task group (in normalized representation)
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
void push(task_proxy *t)
Push task_proxy onto the mailbox queue of another thread.
void local_spawn(task *first, task *&next)
virtual ~scheduler()=0
Pure virtual destructor;.
atomic< T > & as_atomic(T &t)
state_type state() const
Current execution state.
task_group_context * context()
This method is deprecated and will be removed in the future.
static bool is_version_3_task(task &t)
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
task **__TBB_atomic task_pool_ptr
Task pool of the scheduler that owns this slot.
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
bool is_local_task_pool_quiescent() const
scheduler_properties my_properties
Smart holder for the empty task class with automatic destruction.
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
#define __TBB_ISOLATION_EXPR(isolation)
void on_thread_leaving()
Notification that worker or master leaves its arena.
#define __TBB_cl_evict(p)
market * my_market
The market I am in.
A scheduler with a customized evaluation loop.
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
Used to form groups of tasks.
#define ITT_NOTIFY(name, obj)
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
static const intptr_t mailbox_bit
void leave_task_pool()
Leave the task pool.
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
task * next_offloaded
Pointer to the next offloaded lower priority task.
static const intptr_t location_mask
Base class for types that should not be copied or assigned.
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
tbb::task * next
"next" field for list of task
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
task * my_dummy_task
Fake root task created by slave threads.
bool is_task_pool_published() const
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
size_t worker_stack_size() const
Returns the requested stack size of worker threads.
bool type
Indicates that a scheduler acts as a master or a worker.
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
static const size_t min_task_pool_size
void deallocate_task(task &t)
Return task object to the memory allocator.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
T __TBB_load_relaxed(const volatile T &location)
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
task object is freshly allocated or recycled.
void const char const char int ITT_FORMAT __itt_group_sync p
atomic< unsigned > my_limit
The maximal number of currently busy slots.
generic_scheduler(market &, bool)
#define __TBB_cl_prefetch(p)
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
void assert_task_pool_valid() const
void Scheduler_OneTimeInitialization(bool itt_present)
Defined in scheduler.cpp.
void attach_arena(arena *, size_t index, bool is_master)
void allocate_task_pool(size_t n)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
task * extract_task()
Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary. ...
void poison_pointer(T *__TBB_atomic &)
void acquire_task_pool() const
Locks the local task pool.
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
bool outermost
Indicates that a scheduler is on outermost level.
static const unsigned ref_external
Reference increment values for externals and workers.
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
static const intptr_t pool_bit
unsigned short affinity_id
An id as used for specifying affinity.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
#define __TBB_CONTEXT_ARG(arg1, context)
#define __TBB_PREVIEW_RESUMABLE_TASKS
unsigned short get()
Get a random number.
void atomic_fence()
Sequentially consistent full memory fence.
virtual void local_wait_for_all(task &parent, task *child)=0
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
scheduler * owner
Obsolete. The scheduler that owns the task.
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
void local_spawn_root_and_wait(task *first, task *&next)
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
void release_task_pool() const
Unlocks the local task pool.
void push_back(const T &val)