35 extern generic_scheduler* (*AllocateSchedulerPtr)( market&, bool );
    41 #if __TBB_TASK_GROUP_CONTEXT    42 context_state_propagation_mutex_type the_context_state_propagation_mutex;
    44 uintptr_t the_context_state_propagation_epoch = 0;
    55 #if __TBB_TASK_GROUP_CONTEXT    59 #if __TBB_TASK_PRIORITY    78 #if _MSC_VER && !defined(__INTEL_COMPILER)    81     #pragma warning(disable:4355)    89     , my_co_context(m.worker_stack_size(), genuine ? NULL : this)
    91     , my_small_task_count(1)   
    93     , my_cilk_state(cs_none)
   100 #if __TBB_PREVIEW_CRITICAL_TASKS   103 #if __TBB_PREVIEW_RESUMABLE_TASKS   105     my_current_is_recalled = NULL;
   106     my_post_resume_action = PRA_NONE;
   107     my_post_resume_arg = NULL;
   113 #if __TBB_TASK_PRIORITY   114     my_ref_top_priority = &m.my_global_top_priority;
   115     my_ref_reload_epoch = &m.my_global_reload_epoch;
   117 #if __TBB_TASK_GROUP_CONTEXT   119     my_context_state_propagation_epoch = the_context_state_propagation_epoch;
   120     my_context_list_head.my_prev = &my_context_list_head;
   121     my_context_list_head.my_next = &my_context_list_head;
   122     ITT_SYNC_CREATE(&my_context_list_mutex, SyncType_Scheduler, SyncObj_ContextsList);
   128 #if _MSC_VER && !defined(__INTEL_COMPILER)   130 #endif // warning 4355 is back   132 #if TBB_USE_ASSERT > 1   143     for ( 
size_t i = 0; i < H; ++i )
   144         __TBB_ASSERT( tp[i] == poisoned_ptr, 
"Task pool corrupted" );
   145     for ( 
size_t i = H; i < T; ++i ) {
   149                 tp[i]->prefix().extra_state == 
es_task_proxy, 
"task in the deque has invalid state" );
   153         __TBB_ASSERT( tp[i] == poisoned_ptr, 
"Task pool corrupted" );
   164 #if defined(_MSC_VER)&&_MSC_VER<1400 && !_WIN64   166     __asm mov eax, fs:[0x18]
   169     NT_TIB  *pteb = (NT_TIB*)NtCurrentTeb();
   171     __TBB_ASSERT( &pteb < pteb->StackBase && &pteb > pteb->StackLimit, 
"invalid stack info in TEB" );
   172     __TBB_ASSERT( stack_size >0, 
"stack_size not initialized?" );
   190     void    *stack_base = &stack_size;
   191 #if __linux__ && !__bg__   195     size_t  np_stack_size = 0;
   197     void    *stack_limit = NULL;
   199 #if __TBB_PREVIEW_RESUMABLE_TASKS   201         stack_limit = my_co_context.get_stack_limit();
   202         __TBB_ASSERT( (uintptr_t)stack_base > (uintptr_t)stack_limit, 
"stack size must be positive" );
   204         stack_size = size_t((
char*)stack_base - (
char*)stack_limit);
   208     pthread_attr_t  np_attr_stack;
   209     if( !stack_limit && 0 == pthread_getattr_np(pthread_self(), &np_attr_stack) ) {
   210         if ( 0 == pthread_attr_getstack(&np_attr_stack, &stack_limit, &np_stack_size) ) {
   212             pthread_attr_t  attr_stack;
   213             if ( 0 == pthread_attr_init(&attr_stack) ) {
   214                 if ( 0 == pthread_attr_getstacksize(&attr_stack, &stack_size) ) {
   215                     if ( np_stack_size < stack_size ) {
   218                         rsb_base = stack_limit;
   219                         stack_size = np_stack_size/2;
   221                         stack_limit = (
char*)stack_limit + stack_size;
   227                 pthread_attr_destroy(&attr_stack);
   230             my_rsb_stealing_threshold = (uintptr_t)((
char*)rsb_base + stack_size/2);
   235             stack_size = size_t((
char*)stack_base - (
char*)stack_limit);
   237         pthread_attr_destroy(&np_attr_stack);
   240     __TBB_ASSERT( stack_size>0, 
"stack size must be positive" );
   245 #if __TBB_TASK_GROUP_CONTEXT   251 void generic_scheduler::cleanup_local_context_list () {
   253     bool wait_for_concurrent_destroyers_to_leave = 
false;
   254     uintptr_t local_count_snapshot = my_context_state_propagation_epoch;
   255     my_local_ctx_list_update.store<
relaxed>(1);
   263         if ( my_nonlocal_ctx_list_update.load<
relaxed>() || local_count_snapshot != the_context_state_propagation_epoch )
   264             lock.acquire(my_context_list_mutex);
   268         while ( node != &my_context_list_head ) {
   275                 wait_for_concurrent_destroyers_to_leave = 
true;
   278     my_local_ctx_list_update.store<
release>(0);
   280     if ( wait_for_concurrent_destroyers_to_leave )
   296 #if __TBB_TASK_PRIORITY   299 #if __TBB_PREVIEW_CRITICAL_TASKS   302 #if __TBB_TASK_GROUP_CONTEXT   303     cleanup_local_context_list();
   307 #if __TBB_HOARD_NONLOCAL_TASKS   308     while( 
task* t = my_nonlocal_free_list ) {
   310         my_nonlocal_free_list = 
p.next;
   327 #if __TBB_COUNT_TASK_NODES   328     my_market->update_task_node_count( my_task_node_count );
   342 #if __TBB_HOARD_NONLOCAL_TASKS   343         if( (t = my_nonlocal_free_list) ) {
   356             __TBB_ASSERT( t, 
"another thread emptied the my_return_list" );
   362 #if __TBB_COUNT_TASK_NODES   363             ++my_task_node_count;
   369 #if __TBB_PREFETCHING   372 #if __TBB_HOARD_NONLOCAL_TASKS   388 #if __TBB_COUNT_TASK_NODES   389         ++my_task_node_count;
   394 #if __TBB_TASK_GROUP_CONTEXT   417         task* old = 
s.my_return_list;
   423         if( 
as_atomic(
s.my_return_list).compare_and_swap(&t, old )==old ) {
   424 #if __TBB_PREFETCHING   441     if ( T + num_tasks <= my_arena_slot->my_task_pool_size )
   459     for ( 
size_t i = H; i < T; ++i )
   468         if ( new_size < 2 * my_arena_slot->my_task_pool_size )
   474     for ( 
size_t i = H; i < T; ++i )
   496     bool sync_prepare_done = 
false;
   512         else if( !sync_prepare_done ) {
   515             sync_prepare_done = 
true;
   538     task** victim_task_pool;
   539     bool sync_prepare_done = 
false;
   541         victim_task_pool = victim_arena_slot->
task_pool;
   547             if( sync_prepare_done )
   558         else if( !sync_prepare_done ) {
   561             sync_prepare_done = 
true;
   565 #if __TBB_STEALING_ABORT_ON_CONTENTION   566         if(!backoff.bounded_pause()) {
   582                   "not really locked victim's task pool?" );
   583     return victim_task_pool;
   587                                                 task** victim_task_pool )
 const {
   588     __TBB_ASSERT( victim_arena_slot, 
"empty victim arena slot pointer" );
   601         __TBB_ASSERT( ref_count>=0, 
"attempt to spawn task whose parent has a ref_count<0" );
   602         __TBB_ASSERT( ref_count!=0, 
"attempt to spawn task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
   608                   "backwards compatibility to TBB 2.0 tasks is broken" );
   609 #if __TBB_TASK_ISOLATION   621 #if __TBB_TASK_PRIORITY   630         free_task<small_task>(proxy);
   635 #if __TBB_PREVIEW_CRITICAL_TASKS   636 bool generic_scheduler::handled_as_critical( 
task& t ) {
   639 #if __TBB_TASK_ISOLATION   645     my_arena->my_critical_task_stream.push(
   660     if ( &
first->prefix().next == &next ) {
   669 #if __TBB_PREVIEW_CRITICAL_TASKS   670         if( !handled_as_critical( *
first ) )
   701 #if __TBB_PREVIEW_CRITICAL_TASKS   702             if( !handled_as_critical( *t ) )
   708         if( 
size_t num_tasks = tasks.
size() ) {
   730 #if __TBB_TASK_GROUP_CONTEXT   732                     "all the root tasks in list must share the same context");
   753     s->my_arena->enqueue_task(t, (intptr_t)prio, 
s->my_random );
   756 #if __TBB_TASK_PRIORITY   757 class auto_indicator : 
no_copy {
   758     volatile bool& my_indicator;
   760     auto_indicator ( 
volatile bool& indicator ) : my_indicator(indicator) { my_indicator = true ;}
   761     ~auto_indicator () { my_indicator = 
false; }
   769 #if __TBB_TASK_ISOLATION   771     bool tasks_omitted = 
false;
   772     while ( !t && T>H0 ) {
   773         t = 
get_task( --T, isolation, tasks_omitted );
   774         if ( !tasks_omitted ) {
   780     if ( t && tasks_omitted ) {
   810 #if __TBB_TASK_ISOLATION   825     __TBB_ASSERT( my_offloaded_tasks, 
"At least one task is expected to be already offloaded" );
   832     auto_indicator indicator( my_pool_reshuffling_pending );
   842     for ( 
size_t src = H0; src<T0; ++src ) {
   846                 intptr_t 
p = priority( *t );
   847                 if ( 
p<*my_ref_top_priority ) {
   848                     offload_task( *t, 
p );
   864 #if __TBB_TASK_ISOLATION   875     task **link = &offloaded_tasks;
   876     while ( 
task *t = *link ) {
   877         task** next_ptr = &t->prefix().next_offloaded;
   879         if ( priority(*t) >= top_priority ) {
   880             tasks.push_back( t );
   883             task* next = *next_ptr;
   884             t->prefix().owner = 
this;
   892     if ( link == &offloaded_tasks ) {
   893         offloaded_tasks = NULL;
   895         offloaded_task_list_link = NULL;
   902         offloaded_task_list_link = link;
   905     size_t num_tasks = tasks.size();
   918     if ( t ) --num_tasks;
   926     uintptr_t reload_epoch = *my_ref_reload_epoch;
   929                   || my_local_reload_epoch - reload_epoch > uintptr_t(-1)/2,
   930                   "Reload epoch counter overflow?" );
   931     if ( my_local_reload_epoch == reload_epoch )
   934     intptr_t top_priority = effective_reference_priority();
   936     task *t = reload_tasks( my_offloaded_tasks, my_offloaded_task_list_tail_link, 
__TBB_ISOLATION_ARG( top_priority, isolation ) );
   953     my_local_reload_epoch = reload_epoch;
   958 #if __TBB_TASK_ISOLATION   965         || is_local_task_pool_quiescent(), 
"Is it safe to get a task at position T?" );
   967     task* result = my_arena_slot->task_pool_ptr[T];
   968     __TBB_ASSERT( !is_poisoned( result ), 
"The poisoned task is going to be processed" );
   969 #if __TBB_TASK_ISOLATION   974     if ( !omit && !is_proxy( *result ) )
   977         tasks_omitted = 
true;
   982     if ( !result || !is_proxy( *result ) )
   986     task_proxy& tp = static_cast<task_proxy&>(*result);
   990         __TBB_ASSERT( is_version_3_task( *t ), 
"backwards compatibility with TBB 2.0 broken" );
   991         my_innermost_running_task = t; 
   992 #if __TBB_TASK_ISOLATION   994         if ( !tasks_omitted )
   998             t->note_affinity( my_affinity_id );
  1004     free_task<small_task>( tp );
  1005 #if __TBB_TASK_ISOLATION  1006     if ( tasks_omitted )
  1007         my_arena_slot->task_pool_ptr[T] = NULL;
  1017     size_t H0 = (size_t)-1, T = T0;
  1018     task* result = NULL;
  1019     bool task_pool_empty = 
false;
  1028             if ( (intptr_t)H0 > (intptr_t)T ) {
  1032                     && H0 == T + 1, 
"victim/thief arbitration algorithm failure" );
  1035                 task_pool_empty = 
true;
  1037             } 
else if ( H0 == T ) {
  1040                 task_pool_empty = 
true;
  1049 #if __TBB_TASK_ISOLATION  1050         result = 
get_task( T, isolation, tasks_omitted );
  1054         } 
else if ( !tasks_omitted ) {
  1062     } 
while ( !result && !task_pool_empty );
  1064 #if __TBB_TASK_ISOLATION  1065     if ( tasks_omitted ) {
  1066         if ( task_pool_empty ) {
  1129             free_task<no_cache_small_task>(tp);
  1148     task* result = NULL;
  1151     bool tasks_omitted = 
false;
  1163         result = victim_pool[H-1];
  1171                 task_proxy& tp = *static_cast<task_proxy*>(result);
  1179             tasks_omitted = 
true;
  1180         } 
else if ( !tasks_omitted ) {
  1186     } 
while ( !result );
  1190     ITT_NOTIFY( sync_acquired, (
void*)((uintptr_t)&victim_slot+
sizeof( uintptr_t )) );
  1192     if ( tasks_omitted ) {
  1194         victim_pool[H-1] = NULL;
  1199 #if __TBB_PREFETCHING  1203     if ( tasks_omitted )
  1209 #if __TBB_PREVIEW_CRITICAL_TASKS  1217     if( 
my_arena->my_critical_task_stream.empty(0) )
  1219     task* critical_task = NULL;
  1222 #if __TBB_TASK_ISOLATION  1224         critical_task = 
my_arena->my_critical_task_stream.pop_specific( 0, start_lane, isolation );
  1230     return critical_task;
  1243         free_task<no_cache_small_task>(*tp);
  1250     __TBB_ASSERT ( my_arena_index < my_arena->my_num_slots, 
"arena slot index is out-of-bound" );
  1254                    "entering arena without tasks to share" );
  1275     __TBB_ASSERT(!genuine || index, 
"workers should have index > 0");
  1276     s->my_arena_index = index; 
  1277     s->my_dummy_task->prefix().ref_count = 2;
  1281         s->init_stack_info();
  1292     task& t = *
s->my_dummy_task;
  1295 #if __TBB_TASK_GROUP_CONTEXT  1298 #if __TBB_FP_CONTEXT  1299     s->default_context()->capture_fp_settings();
  1302     s->init_stack_info();
  1303     context_state_propagation_mutex_type::scoped_lock 
lock(the_context_state_propagation_mutex);
  1304     s->my_market->my_masters.push_front( *
s );
  1309         s->attach_arena( a, 0, 
true );
  1310         s->my_arena_slot->my_scheduler = 
s;
  1311 #if __TBB_TASK_GROUP_CONTEXT  1312         a->my_default_ctx = 
s->default_context(); 
  1315     __TBB_ASSERT( 
s->my_arena_index == 0, 
"Master thread must occupy the first slot in its arena" );
  1319     s->my_market->register_master( 
s->master_exec_resource );
  1322 #if __TBB_ARENA_OBSERVER  1323     __TBB_ASSERT( !a || a->my_observers.empty(), 
"Just created arena cannot have any observers associated with it" );
  1325 #if __TBB_SCHEDULER_OBSERVER  1326     the_global_observer_list.notify_entry_observers( 
s->my_last_global_observer, 
false );
  1333     __TBB_ASSERT( !
s.my_arena_slot, 
"cleaning up attached worker" );
  1334 #if __TBB_SCHEDULER_OBSERVER  1336         the_global_observer_list.notify_exit_observers( 
s.my_last_global_observer, 
true );
  1338     s.cleanup_scheduler();
  1364 #if __TBB_ARENA_OBSERVER  1366         a->my_observers.notify_exit_observers( my_last_local_observer, 
false );
  1368 #if __TBB_SCHEDULER_OBSERVER  1369     the_global_observer_list.notify_exit_observers( my_last_global_observer, 
false );
  1372     m->unregister_master( master_exec_resource );
  1376 #if __TBB_STATISTICS  1381 #if __TBB_TASK_GROUP_CONTEXT  1383         default_context()->~task_group_context();
  1386     context_state_propagation_mutex_type::scoped_lock 
lock(the_context_state_propagation_mutex);
  1396     return m->
release(  a != NULL, blocking_terminate );
 void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
 
#define GATHER_STATISTIC(x)
 
void spawn(task &first, task *&next) __TBB_override
For internal use only.
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp end
 
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
 
Used to form groups of tasks.
 
Work stealing task scheduler.
 
const isolation_tag no_isolation
 
static const kind_type detached
 
isolation_tag isolation
The tag used for task isolation.
 
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
 
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
 
atomic< unsigned > my_limit
The maximal number of currently busy slots.
 
task * my_dummy_task
Fake root task created by slave threads.
 
generic_scheduler *(* AllocateSchedulerPtr)(market &, bool)
Pointer to the scheduler factory function.
 
static const intptr_t num_priority_levels
 
task **__TBB_atomic task_pool
 
bool is_task_pool_published() const
 
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
 
#define __TBB_ISOLATION_EXPR(isolation)
 
void deallocate_task(task &t)
Return task object to the memory allocator.
 
bool push(task_proxy *t)
Push task_proxy onto the mailbox queue of another thread.
 
scheduler * owner
Obsolete. The scheduler that owns the task.
 
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t new_size
 
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
 
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
 
Smart holder for the empty task class with automatic destruction.
 
Base class for user-defined tasks.
 
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
 
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
 
state_type state() const
Current execution state.
 
unsigned char state
A task::state_type, stored as a byte for compactness.
 
task_proxy * pop(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get next piece of mail, or NULL if mailbox is empty.
 
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
 
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
 
mail_outbox * outbox
Mailbox to which this was mailed.
 
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
 
void allocate_task_pool(size_t n)
 
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
 
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
 
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
 
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
 
void push_back(const T &val)
 
#define __TBB_CONTEXT_ARG(arg1, context)
 
#define __TBB_PREVIEW_RESUMABLE_TASKS
 
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
 
void pause()
Pause for a while.
 
unsigned short get()
Get a random number.
 
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
 
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
 
market * my_market
The market I am in.
 
static const kind_type dying
 
#define __TBB_cl_prefetch(p)
 
void assert_task_valid(const task *)
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
 
bool is_quiescent_local_task_pool_empty() const
 
__TBB_atomic size_t head
Index of the first ready task in the deque.
 
void __TBB_store_relaxed(volatile T &location, V value)
 
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
 
static bool is_version_3_task(task &t)
 
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
 
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
 
static const intptr_t pool_bit
 
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
 
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
 
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
 
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
 
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
 
void __TBB_store_with_release(volatile T &location, V value)
 
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
 
void destroy()
Destroy and deallocate this scheduler object.
 
bool recipient_is_idle()
True if thread that owns this mailbox is looking for work.
 
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
 
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
 
static const kind_type binding_required
 
void local_spawn(task *first, task *&next)
 
task is in ready pool, or is going to be put there, or was just taken off.
 
A scheduler with a customized evaluation loop.
 
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
 
void publish_task_pool()
Used by workers to enter the task pool.
 
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
 
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
 
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
 
task_group_context * context()
This method is deprecated and will be removed in the future.
 
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
 
bool is_local_task_pool_quiescent() const
 
intptr_t isolation_tag
A tag for task isolation.
 
void release_task_pool() const
Unlocks the local task pool.
 
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
 
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
 
void local_spawn_root_and_wait(task *first, task *&next)
 
Vector that grows without reallocations, and stores items in the reverse order.
 
#define __TBB_control_consistency_helper()
 
void const char const char int ITT_FORMAT __itt_group_sync p
 
bool is_quiescent_local_task_pool_reset() const
 
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
 
static const intptr_t mailbox_bit
 
unsigned short affinity_id
An id as used for specifying affinity.
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
 
bool is_worker() const
True if running on a worker thread, false otherwise.
 
Represents acquisition of a mutex.
 
void acquire_task_pool() const
Locks the local task pool.
 
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
 
#define __TBB_ISOLATION_ARG(arg1, isolation)
 
static const intptr_t location_mask
 
Base class for types that should not be copied or assigned.
 
task * my_free_list
Free list of small tasks that can be reused.
 
void on_thread_leaving()
Notification that worker or master leaves its arena.
 
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
 
task **__TBB_atomic task_pool_ptr
Task pool of the scheduler that owns this slot.
 
void set_ref_count(int count)
Set reference count.
 
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
 
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
 
task * extract_task()
Returns a pointer to the encapsulated task or NULL, and frees proxy if necessary.
 
void cleanup_scheduler()
Cleans up this scheduler (the scheduler might be destroyed).
 
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
 
Class that implements exponential backoff.
 
void atomic_fence()
Sequentially consistent full memory fence.
 
auto first(Container &c) -> decltype(begin(c))
 
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
 
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
 
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
 
#define __TBB_FetchAndDecrementWrelease(P)
 
void const char const char int ITT_FORMAT __itt_group_sync s
 
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
 
bool outermost
Indicates that a scheduler is on outermost level.
 
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
 
size_t worker_stack_size() const
Returns the requested stack size of worker threads.
 
virtual void local_wait_for_all(task &parent, task *child)=0
 
tbb::task * parent
The task whose reference count includes me.
 
#define ITT_SYNC_CREATE(obj, type, name)
 
context_list_node_t * my_next
 
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
 
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
 
#define __TBB_cl_evict(p)
 
tbb::task * next
"next" field for list of task
 
size_t my_task_pool_size
Capacity of the primary task pool (number of elements - pointers to task).
 
#define ITT_NOTIFY(name, obj)
 
T __TBB_load_relaxed(const volatile T &location)
 
atomic< T > & as_atomic(T &t)
 
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
 
void poison_pointer(T *__TBB_atomic &)
 
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
 
void copy_memory(T *dst) const
Copies the contents of the vector into the dst array.
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
 
void assert_task_pool_valid() const
 
void fill_with_canary_pattern(size_t, size_t)
 
static const size_t min_task_pool_size
 
void Scheduler_OneTimeInitialization(bool itt_present)
Defined in scheduler.cpp.
 
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
 
task object is on free list, or is going to be put there, or was just taken off.
 
Set if the task has been stolen.
 
void leave_task_pool()
Leave the task pool.
 
Set if ref_count might be changed by another thread. Used for debugging.
 
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
 
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
 
intptr_t reference_count
A reference count.
 
static bool is_proxy(const task &t)
True if t is a task_proxy.
 
bool is_critical(task &t)
 
task object is freshly allocated or recycled.
 
scheduler_properties my_properties
 
Memory prefix to a task object.
 
static const unsigned ref_external
Reference increment values for externals and workers.
 
generic_scheduler * allocate_scheduler(market &m, bool genuine)
 
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
 
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
 
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
 
virtual void __TBB_EXPORTED_METHOD note_affinity(affinity_id id)
Invoked by scheduler to notify task that it ran on unexpected thread.
 
static bool is_shared(intptr_t tat)
True if the proxy is stored both in its sender's pool and in the destination mailbox.
 
virtual ~scheduler()=0
Pure virtual destructor;.
 
generic_scheduler(market &, bool)