36 #if __TBB_SURVIVE_THREAD_SWITCH    40 #define CILKLIB_NAME "cilkrts20.dll"    42 #define CILKLIB_NAME "libcilkrts.so"    54 static atomic<do_once_state> cilkrts_load_state;
    56 bool initialize_cilk_interop() {
    74         handle_perror(status, 
"TBB failed to initialize task scheduler TLS\n");
    84         runtime_warning( 
"TBB is unloaded while tbb::task_scheduler_init object is alive?" );
    88         runtime_warning(
"failed to destroy task scheduler TLS: %s", strerror(status));
    93     rml::tbb_server* server = NULL;
    96         if( status != ::rml::factory::st_success ) {
    98             runtime_warning( 
"rml::tbb_factory::make_server failed with status %x, falling back on private rml", status );
   111     __TBB_ASSERT( (uintptr_t(
s)&1) == 0, 
"Bad pointer to the scheduler" );
   113     return uintptr_t(
s) | uintptr_t((
s && (
s->my_arena || 
s->is_worker()))? 1 : 0);
   127 #if __TBB_SURVIVE_THREAD_SWITCH   128     if( watch_stack_handler ) {
   132         if( (*watch_stack_handler)(&
s->my_cilk_unwatch_thunk, o) ) {
   134             s->my_cilk_unwatch_thunk.routine = NULL;
   138             s->my_cilk_state = generic_scheduler::cs_running;
   149 #if __TBB_SURVIVE_THREAD_SWITCH   159 #if __TBB_SURVIVE_THREAD_SWITCH   168     s->my_auto_initialized = 
true;
   177             __TBB_ASSERT( 
s->my_ref_count == 1, 
"weakly initialized scheduler must have refcount equal to 1" );
   178             __TBB_ASSERT( !
s->my_arena, 
"weakly initialized scheduler  must have no arena" );
   179             __TBB_ASSERT( 
s->my_auto_initialized, 
"weakly initialized scheduler is supposed to be auto-initialized" );
   181             __TBB_ASSERT( 
s->my_arena_index == 0, 
"Master thread must occupy the first slot in its arena" );
   182             s->my_arena_slot->my_scheduler = 
s;
   183 #if __TBB_TASK_GROUP_CONTEXT   184             s->my_arena->my_default_ctx = 
s->default_context(); 
   190         if ( !auto_init ) 
s->my_ref_count += 1;
   191         __TBB_ASSERT( 
s->my_arena, 
"scheduler is not initialized fully" );
   199     __TBB_ASSERT(
s, 
"Somehow a local scheduler creation for a master thread failed");
   201     s->my_auto_initialized = auto_init;
   208     if (0 == --(
s->my_ref_count)) {
   209         ok = 
s->cleanup_master( blocking );
   217     if( 
s && 
s->my_auto_initialized ) {
   218         if( !--(
s->my_ref_count) ) {
   223             s->cleanup_master( 
false );
   236 #if __TBB_SURVIVE_THREAD_SWITCH   237     if( watch_stack_handler )
   247 #if __TBB_SURVIVE_THREAD_SWITCH   254     uintptr_t thread_id = GetCurrentThreadId();
   256     uintptr_t thread_id = uintptr_t(pthread_self());
   261             __TBB_ASSERT( !current && 
s->my_cilk_state==generic_scheduler::cs_limbo ||
   262                           current==
s && 
s->my_cilk_state==generic_scheduler::cs_running, 
"invalid adoption" );
   265                 runtime_warning( 
"redundant adoption of %p by thread %p\n", 
s, (
void*)thread_id );
   266             s->my_cilk_state = generic_scheduler::cs_running;
   272             __TBB_ASSERT( current==
s && 
s->my_cilk_state==generic_scheduler::cs_running, 
"invalid orphaning" );
   274             s->my_cilk_state = generic_scheduler::cs_limbo;
   280             __TBB_ASSERT( !current && 
s->my_cilk_state==generic_scheduler::cs_limbo ||
   281                           current==
s && 
s->my_cilk_state==generic_scheduler::cs_running, 
"invalid release" );
   283             s->my_cilk_state = generic_scheduler::cs_freed;
   285             s->my_cilk_unwatch_thunk.routine = NULL;
   296 #if __TBB_NUMA_SUPPORT   298 #if __TBB_WEAK_SYMBOLS_PRESENT   299 #pragma weak initialize_numa_topology   300 #pragma weak allocate_binding_handler   301 #pragma weak deallocate_binding_handler   302 #pragma weak bind_to_node   303 #pragma weak restore_affinity   307     size_t groups_num, 
int& nodes_count, 
int*& indexes_list, 
int*& concurrency_list );
   312 void bind_to_node( binding_handler* handler_ptr, 
int slot_num, 
int numa_id );
   318 #if _WIN32 || _WIN64 || __linux__   319 static void (*initialize_numa_topology_ptr)(
   320     size_t groups_num, 
int& nodes_count, 
int*& indexes_list, 
int*& concurrency_list ) = NULL;
   323 static binding_handler* (*allocate_binding_handler_ptr)( 
int slot_num ) = NULL;
   324 static void (*deallocate_binding_handler_ptr)( binding_handler* handler_ptr ) = NULL;
   326 static void (*bind_to_node_ptr)( binding_handler* handler_ptr, 
int slot_num, 
int numa_id ) = NULL;
   327 static void (*restore_affinity_ptr)( binding_handler* handler_ptr, 
int slot_num ) = NULL;
   329 #if _WIN32 || _WIN64 || __linux__   339 static const unsigned LinkTableSize = 5;
   342 #define DEBUG_SUFFIX "_debug"   348 #define TBBBIND_NAME "tbbbind" DEBUG_SUFFIX ".dll"   350 #define TBBBIND_NAME "libtbbbind" DEBUG_SUFFIX  __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION)   355 static binding_handler* dummy_allocate_binding_handler ( 
int ) { 
return NULL; }
   356 static void dummy_deallocate_binding_handler ( binding_handler* ) { }
   357 static void dummy_bind_to_node ( binding_handler*, 
int, 
int ) { }
   358 static void dummy_restore_affinity ( binding_handler*, 
int ) { }
   364 namespace numa_topology {
   366 int  numa_nodes_count = 0;
   367 int* numa_indexes = NULL;
   368 int* default_concurrency_list = NULL;
   369 static tbb::atomic<do_once_state> numa_topology_init_state;
   375 void initialization_impl() {
   378 #if _WIN32 || _WIN64 || __linux__   379     bool load_tbbbind = 
true;
   380 #if _WIN32 && !_WIN64   383     GetNativeSystemInfo(&si);
   384     load_tbbbind = si.dwNumberOfProcessors <= 32;
   387     if (load_tbbbind && 
dynamic_link(TBBBIND_NAME, TbbBindLinkTable, LinkTableSize)) {
   388         int number_of_groups = 1;
   390         number_of_groups = NumberOfProcessorGroups();
   392         initialize_numa_topology_ptr(
   393             number_of_groups, numa_nodes_count, numa_indexes, default_concurrency_list);
   395         if (numa_nodes_count==1 && numa_indexes[0] >= 0) {
   397                 "default_concurrency() should be equal to governor::default_num_threads() on single"   398                 "NUMA node systems.");
   404     static int dummy_index = -1;
   407     numa_nodes_count = 1;
   408     numa_indexes = &dummy_index;
   409     default_concurrency_list = &dummy_concurrency;
   411     allocate_binding_handler_ptr = dummy_allocate_binding_handler;
   412     deallocate_binding_handler_ptr = dummy_deallocate_binding_handler;
   414     bind_to_node_ptr = dummy_bind_to_node;
   415     restore_affinity_ptr = dummy_restore_affinity;
   422 unsigned nodes_count() {
   424     return numa_nodes_count;
   427 void fill( 
int* indexes_array ) {
   429     for ( 
int i = 0; i < numa_nodes_count; i++ ) {
   430         indexes_array[i] = numa_indexes[i];
   434 int default_concurrency( 
int node_id ) {
   437         return default_concurrency_list[node_id];
   444 binding_handler* construct_binding_handler(
int slot_num) {
   445     __TBB_ASSERT(allocate_binding_handler_ptr, 
"tbbbind loading was not perfromed");
   446     return allocate_binding_handler_ptr(slot_num);
   449 void destroy_binding_handler(binding_handler* handler_ptr) {
   450     __TBB_ASSERT(deallocate_binding_handler_ptr, 
"tbbbind loading was not perfromed");
   451     deallocate_binding_handler_ptr(handler_ptr);
   454 void bind_thread_to_node(binding_handler* handler_ptr, 
int slot_num , 
int numa_id) {
   456     __TBB_ASSERT(bind_to_node_ptr, 
"tbbbind loading was not perfromed");
   457     bind_to_node_ptr(handler_ptr, slot_num, numa_id);
   460 void restore_affinity_mask(binding_handler* handler_ptr, 
int slot_num) {
   462     __TBB_ASSERT(restore_affinity_ptr, 
"tbbbind loading was not perfromed");
   463     restore_affinity_ptr(handler_ptr, slot_num);
   478     initialize( number_of_threads, 0 );
   482 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS   483     uintptr_t new_mode = thread_stack_size & propagation_mode_mask;
   486     if( number_of_threads!=deferred ) {
   489                     "number_of_threads for task_scheduler_init must be automatic or positive" );
   491 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS   492         if ( 
s->master_outermost_level() ) {
   493             uintptr_t &vt = 
s->default_context()->my_version_and_traits;
   496                     : new_mode & propagation_mode_captured ? vt & ~
task_group_context::exact_exception : vt;
   500             my_scheduler = static_cast<scheduler*>((
generic_scheduler*)((uintptr_t)
s | prev_mode));
   506         __TBB_ASSERT_RELEASE( !thread_stack_size, 
"deferred initialization ignores stack size setting" );
   511 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS   512     uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact;
   513     my_scheduler = (
scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact);
   517     __TBB_ASSERT_RELEASE( 
s, 
"task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()");
   518 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS   519     if ( 
s->master_outermost_level() ) {
   520         uintptr_t &vt = 
s->default_context()->my_version_and_traits;
   529     internal_terminate(
false);
   532 #if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE   533 bool task_scheduler_init::internal_blocking_terminate( 
bool throwing ) {
   534     bool ok = internal_terminate( 
true );
   535 #if TBB_USE_EXCEPTIONS   536     if( throwing && !ok )
   543 #endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE __cilk_tbb_pfn_stack_op routine
 
static generic_scheduler * tls_scheduler_of(uintptr_t v)
Converts TLS value to the scheduler pointer.
 
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
 
Used to form groups of tasks.
 
Work stealing task scheduler.
 
bool gcc_rethrow_exception_broken()
 
static void one_time_init()
 
bool cpu_has_speculation()
check for transaction support.
 
Association between a handler name and location of pointer to it.
 
static generic_scheduler * init_scheduler(int num_threads, stack_size_type stack_size, bool auto_init)
Processes scheduler initialization request (possibly nested) in a master thread.
 
#define __TBB_ASSERT_RELEASE(predicate, message)
 
static basic_tls< uintptr_t > theTLS
TLS for scheduler instances associated with individual threads.
 
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
 
void __TBB_EXPORTED_FUNC handle_perror(int error_code, const char *aux_info)
Throws std::runtime_error with what() returning error_code description prefixed with aux_info.
 
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
 
static unsigned default_num_threads()
 
const int DYNAMIC_LINK_GLOBAL
 
static rml::tbb_factory theRMLServerFactory
 
static void acquire_resources()
Create key for thread-local storage and initialize RML.
 
std::size_t stack_size_type
 
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
 
static void print_version_info()
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void * data
 
void __TBB_EXPORTED_METHOD terminate()
Inverse of method initialize.
 
static uintptr_t tls_value_of(generic_scheduler *s)
Computes the value of the TLS.
 
void bind_to_node(binding_handler *handler_ptr, int slot_num, int numa_id)
 
void dynamic_unlink_all()
 
void atomic_do_once(const F &initializer, atomic< do_once_state > &state)
One-time initialization function.
 
void initialize_numa_topology(size_t groups_num, int &nodes_count, int *&indexes_list, int *&concurrency_list)
 
static void auto_terminate(void *scheduler)
The internal routine to undo automatic initialization.
 
static bool is_rethrow_broken
 
static void initialize_rml_factory()
 
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
 
static bool is_speculation_enabled
 
static void release_resources()
Destroy the thread-local storage key and deinitialize RML.
 
void deallocate_binding_handler(binding_handler *handler_ptr)
 
static generic_scheduler * init_scheduler_weak()
Automatic initialization of scheduler in a master thread with default settings without arena.
 
tbb_server * make_private_server(tbb_client &client)
Factory method called from task.cpp to create a private_server.
 
void PrintRMLVersionInfo(void *arg, const char *server_info)
A callback routine to print RML version information on stderr.
 
binding_handler * allocate_binding_handler(int slot_num)
 
__cilk_tbb_pfn_unwatch_stacks routine
 
void __TBB_EXPORTED_METHOD initialize(int number_of_threads=automatic)
Ensure that scheduler exists for this thread.
 
static rml::tbb_server * create_rml_server(rml::tbb_client &)
 
static generic_scheduler * local_scheduler_if_initialized()
 
void destroy_process_mask()
 
OPEN_INTERNAL_NAMESPACE bool dynamic_link(const char *, const dynamic_link_descriptor *, size_t, dynamic_link_handle *handle, int)
 
void DoOneTimeInitializations()
Performs thread-safe lazy one-time general TBB initialization.
 
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
 
void const char const char int ITT_FORMAT __itt_group_sync s
 
static bool terminate_scheduler(generic_scheduler *s, bool blocking)
Processes scheduler termination request (possibly nested) in a master thread.
 
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
 
void restore_affinity(binding_handler *handler_ptr, int slot_num)
 
#define DLD(s, h)
The helper to construct dynamic_link_descriptor structure.
 
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
 
CILK_EXPORT __cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk *u, struct __cilk_tbb_stack_op_thunk o)
 
static bool initialization_done()
 
static int __TBB_EXPORTED_FUNC default_num_threads()
Returns the number of threads TBB scheduler would create if initialized by default.
 
bool internal_terminate(bool blocking)
 
static const int automatic
Typedef for number of threads that is automatic.
 
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
 
static bool UsePrivateRML
 
void PrintExtraVersionInfo(const char *category, const char *format,...)
Prints arbitrary extra TBB version information on stderr.
 
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.