36 #if __TBB_SURVIVE_THREAD_SWITCH
40 #define CILKLIB_NAME "cilkrts20.dll"
42 #define CILKLIB_NAME "libcilkrts.so"
54 static atomic<do_once_state> cilkrts_load_state;
56 bool initialize_cilk_interop() {
74 handle_perror(status,
"TBB failed to initialize task scheduler TLS\n");
84 runtime_warning(
"TBB is unloaded while tbb::task_scheduler_init object is alive?" );
88 runtime_warning(
"failed to destroy task scheduler TLS: %s", strerror(status));
93 rml::tbb_server* server = NULL;
96 if( status != ::rml::factory::st_success ) {
98 runtime_warning(
"rml::tbb_factory::make_server failed with status %x, falling back on private rml", status );
111 __TBB_ASSERT( (uintptr_t(
s)&1) == 0,
"Bad pointer to the scheduler" );
113 return uintptr_t(
s) | uintptr_t((
s && (
s->my_arena ||
s->is_worker()))? 1 : 0);
127 #if __TBB_SURVIVE_THREAD_SWITCH
128 if( watch_stack_handler ) {
132 if( (*watch_stack_handler)(&
s->my_cilk_unwatch_thunk, o) ) {
134 s->my_cilk_unwatch_thunk.routine = NULL;
138 s->my_cilk_state = generic_scheduler::cs_running;
149 #if __TBB_SURVIVE_THREAD_SWITCH
159 #if __TBB_SURVIVE_THREAD_SWITCH
168 s->my_auto_initialized =
true;
177 __TBB_ASSERT(
s->my_ref_count == 1,
"weakly initialized scheduler must have refcount equal to 1" );
178 __TBB_ASSERT( !
s->my_arena,
"weakly initialized scheduler must have no arena" );
179 __TBB_ASSERT(
s->my_auto_initialized,
"weakly initialized scheduler is supposed to be auto-initialized" );
181 __TBB_ASSERT(
s->my_arena_index == 0,
"Master thread must occupy the first slot in its arena" );
182 s->my_arena_slot->my_scheduler =
s;
183 #if __TBB_TASK_GROUP_CONTEXT
184 s->my_arena->my_default_ctx =
s->default_context();
190 if ( !auto_init )
s->my_ref_count += 1;
191 __TBB_ASSERT(
s->my_arena,
"scheduler is not initialized fully" );
199 __TBB_ASSERT(
s,
"Somehow a local scheduler creation for a master thread failed");
201 s->my_auto_initialized = auto_init;
208 if (0 == --(
s->my_ref_count)) {
209 ok =
s->cleanup_master( blocking );
217 if(
s &&
s->my_auto_initialized ) {
218 if( !--(
s->my_ref_count) ) {
223 s->cleanup_master(
false );
236 #if __TBB_SURVIVE_THREAD_SWITCH
237 if( watch_stack_handler )
247 #if __TBB_SURVIVE_THREAD_SWITCH
254 uintptr_t thread_id = GetCurrentThreadId();
256 uintptr_t thread_id = uintptr_t(pthread_self());
261 __TBB_ASSERT( !current &&
s->my_cilk_state==generic_scheduler::cs_limbo ||
262 current==
s &&
s->my_cilk_state==generic_scheduler::cs_running,
"invalid adoption" );
265 runtime_warning(
"redundant adoption of %p by thread %p\n",
s, (
void*)thread_id );
266 s->my_cilk_state = generic_scheduler::cs_running;
272 __TBB_ASSERT( current==
s &&
s->my_cilk_state==generic_scheduler::cs_running,
"invalid orphaning" );
274 s->my_cilk_state = generic_scheduler::cs_limbo;
280 __TBB_ASSERT( !current &&
s->my_cilk_state==generic_scheduler::cs_limbo ||
281 current==
s &&
s->my_cilk_state==generic_scheduler::cs_running,
"invalid release" );
283 s->my_cilk_state = generic_scheduler::cs_freed;
285 s->my_cilk_unwatch_thunk.routine = NULL;
296 #if __TBB_NUMA_SUPPORT
298 #if __TBB_WEAK_SYMBOLS_PRESENT
299 #pragma weak initialize_numa_topology
300 #pragma weak subscribe_arena
301 #pragma weak unsubscribe_arena
305 size_t groups_num,
int& nodes_count,
int*& indexes_list,
int*& concurrency_list );
313 #if _WIN32 || _WIN64 || __linux__
314 static void (*initialize_numa_topology_handler)(
315 size_t groups_num,
int& nodes_count,
int*& indexes_list,
int*& concurrency_list ) = NULL;
321 static void (*unsubscribe_arena_handler)(
324 #if _WIN32 || _WIN64 || __linux__
333 #define DEBUG_SUFFIX "_debug"
339 #define TBBBIND_NAME "tbbbind" DEBUG_SUFFIX ".dll"
341 #define TBBBIND_NAME "libtbbbind" DEBUG_SUFFIX __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION)
354 namespace numa_topology {
356 int numa_nodes_count = 0;
357 int* numa_indexes = NULL;
358 int* default_concurrency_list = NULL;
359 static tbb::atomic<do_once_state> numa_topology_init_state;
365 void initialization_impl() {
368 #if _WIN32 || _WIN64 || __linux__
369 bool load_tbbbind =
true;
370 #if _WIN32 && !_WIN64
373 GetNativeSystemInfo(&si);
374 load_tbbbind = si.dwNumberOfProcessors <= 32;
377 if (load_tbbbind &&
dynamic_link(TBBBIND_NAME, TbbBindLinkTable, 3)) {
378 int number_of_groups = 1;
380 number_of_groups = NumberOfProcessorGroups();
382 initialize_numa_topology_handler(
383 number_of_groups, numa_nodes_count, numa_indexes, default_concurrency_list);
385 if (numa_nodes_count==1 && numa_indexes[0] >= 0) {
387 "default_concurrency() should be equal to governor::default_num_threads() on single"
388 "NUMA node systems.");
394 static int dummy_index = -1;
397 numa_nodes_count = 1;
398 numa_indexes = &dummy_index;
399 default_concurrency_list = &dummy_concurrency;
401 subscribe_arena_handler = dummy_subscribe_arena;
402 unsubscribe_arena_handler = dummy_unsubscribe_arena;
409 unsigned nodes_count() {
411 return numa_nodes_count;
414 void fill(
int* indexes_array ) {
416 for (
int i = 0; i < numa_nodes_count; i++ ) {
417 indexes_array[i] = numa_indexes[i];
421 int default_concurrency(
int node_id ) {
424 return default_concurrency_list[node_id];
432 int numa_id,
int num_slots ) {
434 return (numa_id >= 0 && numa_topology::nodes_count() > 1) ?
435 subscribe_arena_handler(ta, numa_id, num_slots) : NULL;
439 __TBB_ASSERT(observer != NULL,
"Trying to access observer via NULL pointer");
440 unsubscribe_arena_handler(observer);
454 initialize( number_of_threads, 0 );
458 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
459 uintptr_t new_mode = thread_stack_size & propagation_mode_mask;
462 if( number_of_threads!=deferred ) {
465 "number_of_threads for task_scheduler_init must be automatic or positive" );
467 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
468 if (
s->master_outermost_level() ) {
469 uintptr_t &vt =
s->default_context()->my_version_and_traits;
472 : new_mode & propagation_mode_captured ? vt & ~
task_group_context::exact_exception : vt;
476 my_scheduler = static_cast<scheduler*>((
generic_scheduler*)((uintptr_t)
s | prev_mode));
482 __TBB_ASSERT_RELEASE( !thread_stack_size,
"deferred initialization ignores stack size setting" );
487 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
488 uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact;
489 my_scheduler = (
scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact);
493 __TBB_ASSERT_RELEASE(
s,
"task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()");
494 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
495 if (
s->master_outermost_level() ) {
496 uintptr_t &vt =
s->default_context()->my_version_and_traits;
505 internal_terminate(
false);
508 #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
509 bool task_scheduler_init::internal_blocking_terminate(
bool throwing ) {
510 bool ok = internal_terminate(
true );
511 #if TBB_USE_EXCEPTIONS
512 if( throwing && !ok )
519 #endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE