17 #ifndef _TBB_custom_scheduler_H    18 #define _TBB_custom_scheduler_H    38 #if __TBB_x86_32||__TBB_x86_64    51 template<
typename SchedulerTraits>
    75         if( SchedulerTraits::itt_possible )
    77         if( SchedulerTraits::has_slow_atomic && 
p.ref_count==1 )
    81 #if __TBB_PREVIEW_RESUMABLE_TASKS    82             if (old_ref_count == internal::abandon_flag + 2) {
    86                 tbb::task::resume(
p.abandoned_scheduler);
    90             if (old_ref_count > 1) {
    99         __TBB_ASSERT(
p.ref_count==0, 
"completion of task caused predecessor's reference count to underflow");
   100         if( SchedulerTraits::itt_possible )
   105 #if __TBB_TASK_ISOLATION   109             p.isolation = isolation;
   113 #if __TBB_RECYCLE_TO_ENQUEUE   114         if (
p.state==task::to_enqueue) {
   120         if( bypass_slot==NULL )
   122 #if __TBB_PREVIEW_CRITICAL_TASKS   141         s->assert_task_pool_valid();
   155 template<
typename SchedulerTraits>
   158     bool outermost_worker_level = worker_outermost_level();
   159     bool outermost_dispatch_level = outermost_worker_level || master_outermost_level();
   160     bool can_steal_here = can_steal();
   161     bool outermost_current_worker_level = outermost_worker_level;
   162 #if __TBB_PREVIEW_RESUMABLE_TASKS   163     outermost_current_worker_level &= my_properties.genuine;
   165     my_inbox.set_is_idle( 
true );
   166 #if __TBB_HOARD_NONLOCAL_TASKS   169 #if __TBB_TASK_PRIORITY   170     if ( outermost_dispatch_level ) {
   171         if ( intptr_t skipped_priority = my_arena->my_skipped_fifo_priority ) {
   175             if ( my_arena->my_skipped_fifo_priority.compare_and_swap(0, skipped_priority) == skipped_priority
   176                  && skipped_priority > my_arena->my_top_priority )
   178                 my_market->update_arena_priority( *my_arena, skipped_priority );
   185     size_t n = my_arena->my_limit-1;
   189     for( 
int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) {
   192         if( completion_ref_count == 1 ) {
   193             if( SchedulerTraits::itt_possible ) {
   194                 if( failure_count!=-1 ) {
   195                     ITT_NOTIFY(sync_prepare, &completion_ref_count);
   199                 ITT_NOTIFY(sync_acquired, &completion_ref_count);
   209         if ( outermost_current_worker_level ) {
   210             if ( ( my_arena->my_num_workers_allotted < my_arena->num_workers_active() ) ) {
   211                 if ( SchedulerTraits::itt_possible && failure_count != -1 )
   216 #if __TBB_PREVIEW_RESUMABLE_TASKS   217         else if ( *my_arena_slot->my_scheduler_is_recalled ) {
   219             if ( my_inbox.is_idle_state(
true) )
   220                 my_inbox.set_is_idle(
false);
   224 #if __TBB_TASK_PRIORITY   225         const int p = 
int(my_arena->my_top_priority);
   227         static const int p = 0;
   231         if ( n && !my_inbox.empty() ) {
   233 #if __TBB_TASK_ISOLATION   237             if ( isolation != 
no_isolation && !t && !my_inbox.empty()
   238                      && my_inbox.is_idle_state( 
true ) ) {
   241                 my_inbox.set_is_idle( 
false );
   251                  !my_arena->my_task_stream.empty(
p) && (
   252 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD   255                      t = my_arena->my_task_stream.pop( 
p, my_arena_slot->hint_for_pop )
   258             ITT_NOTIFY(sync_acquired, &my_arena->my_task_stream);
   261 #if __TBB_TASK_PRIORITY   264             __TBB_ASSERT( !is_proxy(*t), 
"The proxy task cannot be offloaded" );
   271 #if __TBB_PREVIEW_CRITICAL_TASKS   274             ITT_NOTIFY(sync_acquired, &my_arena->my_critical_task_stream);
   277 #endif // __TBB_PREVIEW_CRITICAL_TASKS   282 #if __TBB_ARENA_OBSERVER   283         my_arena->my_observers.notify_entry_observers( my_last_local_observer, is_worker() );
   285 #if __TBB_SCHEDULER_OBSERVER   286         the_global_observer_list.notify_entry_observers( my_last_global_observer, is_worker() );
   288         if ( SchedulerTraits::itt_possible && failure_count != -1 ) {
   296         if( SchedulerTraits::itt_possible && failure_count==-1 ) {
   306         const int failure_threshold = 2*
int(n+1);
   307         if( failure_count>=failure_threshold ) {
   311             failure_count = failure_threshold;
   314 #if __TBB_TASK_PRIORITY   316             if ( my_arena->my_orphaned_tasks ) {
   318                 ++my_arena->my_abandonment_epoch;
   319                 task* orphans = (
task*)__TBB_FetchAndStoreW( &my_arena->my_orphaned_tasks, 0 );
   323                     my_local_reload_epoch--;
   324                     t = reload_tasks( orphans, link, 
__TBB_ISOLATION_ARG( effective_reference_priority(), isolation ) );
   326                         *link = my_offloaded_tasks;
   327                         if ( !my_offloaded_tasks )
   328                             my_offloaded_task_list_tail_link = link;
   329                         my_offloaded_tasks = orphans;
   331                     __TBB_ASSERT( !my_offloaded_tasks == !my_offloaded_task_list_tail_link, NULL );
   333                         if( SchedulerTraits::itt_possible )
   335                         __TBB_ASSERT( !is_proxy(*t), 
"The proxy task cannot be offloaded" );
   343             const int yield_threshold = 10;
   345             const int yield_threshold = 100;
   347             if( yield_count++ >= yield_threshold ) {
   350 #if __TBB_TASK_PRIORITY   351                 if( outermost_current_worker_level || my_arena->my_top_priority > my_arena->my_bottom_priority ) {
   352                     if ( my_arena->is_out_of_work() && outermost_current_worker_level ) {
   354                     if ( outermost_current_worker_level && my_arena->is_out_of_work() ) {
   356                         if( SchedulerTraits::itt_possible )
   360 #if __TBB_TASK_PRIORITY   362                 if ( my_offloaded_tasks ) {
   365                     my_local_reload_epoch--;
   370                     if ( !outermost_worker_level && *my_ref_top_priority > my_arena->my_top_priority ) {
   372                         my_ref_top_priority = &my_arena->my_top_priority;
   374                         __TBB_ASSERT(my_ref_reload_epoch == &my_arena->my_reload_epoch, NULL);
   380             n = my_arena->my_limit-1;
   383     if ( my_inbox.is_idle_state( 
true ) )
   384         my_inbox.set_is_idle( 
false );
   388 template<
typename SchedulerTraits>
   397 #if __TBB_TASK_ISOLATION   399             "A task from another isolated region is going to be executed" );
   402 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_ASSERT   408         assert_task_pool_valid();
   409 #if __TBB_PREVIEW_CRITICAL_TASKS   415                               "Received task must be critical one" );
   416                 ITT_NOTIFY(sync_acquired, &my_arena->my_critical_task_stream);
   418                 my_innermost_running_task = t; 
   423 #if __TBB_TASK_PRIORITY   424                 intptr_t 
p = priority(*t);
   425                 if ( 
p != *my_ref_top_priority
   427                     assert_priority_valid(
p);
   428                     if ( 
p != my_arena->my_top_priority ) {
   429                         my_market->update_arena_priority( *my_arena, 
p );
   431                     if ( 
p < effective_reference_priority() ) {
   432                         if ( !my_offloaded_tasks ) {
   436                             *my_offloaded_task_list_tail_link = NULL;
   438                         offload_task( *t, 
p );
   440                         if ( is_task_pool_published() ) {
   453 #if __TBB_PREVIEW_CRITICAL_TASKS   458         my_innermost_running_task = t;
   461 #if __TBB_TASK_GROUP_CONTEXT   467             GATHER_STATISTIC( my_counters.avg_arena_concurrency += my_arena->num_workers_active() );
   468             GATHER_STATISTIC( my_counters.avg_assigned_workers += my_arena->my_num_workers_allotted );
   469 #if __TBB_TASK_PRIORITY   471             GATHER_STATISTIC( my_counters.avg_market_prio += my_market->my_global_top_priority );
   479                               "if task::execute() returns task, it must be marked as allocated" );
   483                 affinity_id next_affinity=t_next->prefix().affinity;
   484                 if (next_affinity != 0 && next_affinity != my_affinity_id)
   489         assert_task_pool_valid();
   490         switch( t->
state() ) {
   498                 free_task<no_hint>( *t );
   500                 assert_task_pool_valid();
   506 #if __TBB_RECYCLE_TO_ENQUEUE   508             case task::to_enqueue: 
   510                 __TBB_ASSERT( t_next != t, 
"a task returned from method execute() can not be recycled in another way" );
   514                 assert_task_pool_valid();
   518                 __TBB_ASSERT( t_next, 
"reexecution requires that method execute() return another task" );
   519                 __TBB_ASSERT( t_next != t, 
"a task returned from method execute() can not be recycled in another way" );
   523                 assert_task_pool_valid();
   528 #if __TBB_PREVIEW_RESUMABLE_TASKS   529             case task::to_resume:
   533                 free_task<no_hint>(*t);
   534                 __TBB_ASSERT(!my_properties.genuine && my_properties.outermost,
   535                     "Only a coroutine on outermost level can be left.");
   541                 __TBB_ASSERT( 
false, 
"task is in READY state upon return from method execute()" );
   556 template<
typename SchedulerTraits>
   561 #if __TBB_TASK_GROUP_CONTEXT   564     assert_task_pool_valid();
   567     if( SchedulerTraits::itt_possible )
   573     task* old_innermost_running_task = my_innermost_running_task;
   577     bool cleanup = !is_worker() && &
parent==my_dummy_task;
   579     __TBB_ASSERT(my_properties.outermost || my_innermost_running_task!=my_dummy_task, 
"The outermost property should be set out of a dispatch loop");
   580     my_properties.outermost &= my_innermost_running_task==my_dummy_task;
   581 #if __TBB_PREVIEW_CRITICAL_TASKS   582     my_properties.has_taken_critical_task |= 
is_critical(*my_innermost_running_task);
   584 #if __TBB_TASK_PRIORITY   586     volatile intptr_t *old_ref_top_priority = my_ref_top_priority;
   589     volatile uintptr_t *old_ref_reload_epoch = my_ref_reload_epoch;
   590     if ( !outermost_level() ) {
   594         my_ref_top_priority = &
parent.prefix().context->my_priority;
   595         my_ref_reload_epoch = &my_arena->my_reload_epoch;
   596         if (my_ref_reload_epoch != old_ref_reload_epoch)
   597             my_local_reload_epoch = *my_ref_reload_epoch - 1;
   600 #if __TBB_TASK_ISOLATION   601     isolation_tag isolation = my_innermost_running_task->prefix().isolation;
   608 #if __TBB_PREVIEW_RESUMABLE_TASKS   611     tbb::atomic<bool> recall_flag;
   613     if (outermost_level() && my_wait_task == NULL && my_properties.genuine) {
   614         __TBB_ASSERT(my_arena_slot->my_scheduler == 
this, NULL);
   615         __TBB_ASSERT(my_arena_slot->my_scheduler_is_recalled == NULL, NULL);
   616         my_arena_slot->my_scheduler_is_recalled = &recall_flag;
   617         my_current_is_recalled = &recall_flag;
   619     __TBB_ASSERT(my_arena_slot->my_scheduler_is_recalled != NULL, NULL);
   620     task* old_wait_task = my_wait_task;
   623 #if TBB_USE_EXCEPTIONS   637 #if __TBB_PREVIEW_RESUMABLE_TASKS   639                 my_innermost_running_task = old_innermost_running_task;
   640                 my_properties = old_properties;
   641                 my_wait_task = old_wait_task;
   647             if ( 
parent.prefix().ref_count == 1 ) {
   653 #if __TBB_PREVIEW_RESUMABLE_TASKS   655             if ( &recall_flag != my_arena_slot->my_scheduler_is_recalled ) {
   656                 __TBB_ASSERT( my_arena_slot->my_scheduler_is_recalled != NULL, 
"A broken recall flag" );
   657                 if ( *my_arena_slot->my_scheduler_is_recalled ) {
   658                     if ( !resume_original_scheduler() ) {
   660                         __TBB_ASSERT( !my_properties.genuine && my_properties.outermost,
   661                             "Only a coroutine on outermost level can be left." );
   663                         my_innermost_running_task = old_innermost_running_task;
   664                         my_properties = old_properties;
   665                         my_wait_task = old_wait_task;
   672             __TBB_ASSERT( is_task_pool_published() || is_quiescent_local_task_pool_reset(), NULL );
   674             assert_task_pool_valid();
   680 #if __TBB_HOARD_NONLOCAL_TASKS   682         for (; my_nonlocal_free_list; my_nonlocal_free_list = t ) {
   684             free_nonlocal_small_task( *my_nonlocal_free_list );
   688             __TBB_ASSERT( !is_task_pool_published() && is_quiescent_local_task_pool_reset(), NULL );
   690             my_innermost_running_task = old_innermost_running_task;
   691             my_properties = old_properties;
   692 #if __TBB_TASK_PRIORITY   693             my_ref_top_priority = old_ref_top_priority;
   694             if(my_ref_reload_epoch != old_ref_reload_epoch)
   695                 my_local_reload_epoch = *old_ref_reload_epoch-1;
   696             my_ref_reload_epoch = old_ref_reload_epoch;
   698 #if __TBB_PREVIEW_RESUMABLE_TASKS   699             if (&recall_flag != my_arena_slot->my_scheduler_is_recalled) {
   702                 tbb::task::suspend(recall_functor(&recall_flag));
   703                 if (my_inbox.is_idle_state(
true))
   704                     my_inbox.set_is_idle(
false);
   707             __TBB_ASSERT(&recall_flag == my_arena_slot->my_scheduler_is_recalled, NULL);
   708             __TBB_ASSERT(!(my_wait_task->prefix().ref_count & internal::abandon_flag), NULL);
   709             my_wait_task = old_wait_task;
   715 #if __TBB_PREVIEW_RESUMABLE_TASKS   716             if ( *my_arena_slot->my_scheduler_is_recalled )
   719             if ( &recall_flag == my_arena_slot->my_scheduler_is_recalled || old_wait_task != NULL )
   723             tbb::task::suspend( recall_functor(&recall_flag) );
   724             if ( my_inbox.is_idle_state(
true) )
   725                 my_inbox.set_is_idle(
false);
   732 #if TBB_USE_EXCEPTIONS   735     TbbCatchAll( my_innermost_running_task->prefix().context );
   736     t = my_innermost_running_task;
   739 #if __TBB_RECYCLE_TO_ENQUEUE   741         ||  t->state() == task::to_enqueue
   746         if( SchedulerTraits::itt_possible )
   749             if( SchedulerTraits::itt_possible )
   750                 ITT_NOTIFY(sync_acquired, &t->prefix().ref_count);
   759 #if __TBB_PREVIEW_RESUMABLE_TASKS   761     my_wait_task = old_wait_task;
   762     if (my_wait_task == NULL) {
   763         __TBB_ASSERT(outermost_level(), 
"my_wait_task could be NULL only on outermost level");
   764         if (&recall_flag != my_arena_slot->my_scheduler_is_recalled) {
   768             tbb::task::suspend(recall_functor(&recall_flag));
   769             if (my_inbox.is_idle_state(
true))
   770                 my_inbox.set_is_idle(
false);
   772         __TBB_ASSERT(my_arena_slot->my_scheduler == 
this, NULL);
   773         my_arena_slot->my_scheduler_is_recalled = NULL;
   774         my_current_is_recalled = NULL;
   778     my_innermost_running_task = old_innermost_running_task;
   779     my_properties = old_properties;
   780 #if __TBB_TASK_PRIORITY   781     my_ref_top_priority = old_ref_top_priority;
   782     if(my_ref_reload_epoch != old_ref_reload_epoch)
   783         my_local_reload_epoch = *old_ref_reload_epoch-1;
   784     my_ref_reload_epoch = old_ref_reload_epoch;
   787         if ( 
parent.prefix().ref_count != 1) {
   790                 "Worker thread exits nested dispatch loop prematurely" );
   793         parent.prefix().ref_count = 0;
   798 #if __TBB_TASK_GROUP_CONTEXT   801     if ( parent_ctx->my_cancellation_requested ) {
   803         if ( master_outermost_level() && parent_ctx == default_context() ) {
   806             parent_ctx->my_cancellation_requested = 0;
   814             context_guard.restore_default();
   815             TbbRethrowException( pe );
   818     __TBB_ASSERT(!is_worker() || !CancellationInfoPresent(*my_dummy_task),
   819         "Worker's dummy task context modified");
   820     __TBB_ASSERT(!master_outermost_level() || !CancellationInfoPresent(*my_dummy_task),
   821         "Unexpected exception or cancellation data in the master's dummy task");
   823     assert_task_pool_valid();
 #define GATHER_STATISTIC(x)
 
Used to form groups of tasks.
 
Work stealing task scheduler.
 
const isolation_tag no_isolation
 
isolation_tag isolation
The tag used for task isolation.
 
task * next_offloaded
Pointer to the next offloaded lower priority task.
 
static const intptr_t num_priority_levels
 
#define __TBB_ISOLATION_EXPR(isolation)
 
scheduler * owner
Obsolete. The scheduler that owns the task.
 
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
 
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
 
Base class for user-defined tasks.
 
virtual task * execute()=0
Should be overridden by derived classes.
 
state_type state() const
Current execution state.
 
unsigned char state
A task::state_type, stored as a byte for compactness.
 
#define ITT_STACK(precond, name, obj)
 
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
 
#define __TBB_fallthrough
 
virtual ~task()
Destructor.
 
void assert_task_valid(const task *)
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
 
bool ConcurrentWaitsEnabled(task &t)
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
 
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
 
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
 
task to be recycled as continuation
 
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
 
void local_spawn(task *first, task *&next)
 
task is in ready pool, or is going to be put there, or was just taken off.
 
A scheduler with a customized evaluation loop.
 
custom_scheduler(market &m, bool genuine)
 
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
 
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
 
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
 
intptr_t isolation_tag
A tag for task isolation.
 
static const bool has_slow_atomic
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
 
task is running, and will be destroyed after method execute() completes.
 
#define __TBB_control_consistency_helper()
 
void const char const char int ITT_FORMAT __itt_group_sync p
 
void local_wait_for_all(task &parent, task *child) __TBB_override
Scheduler loop that dispatches tasks.
 
unsigned short affinity_id
An id as used for specifying affinity.
 
bool is_enqueued_task() const
True if the task was enqueued.
 
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
 
#define __TBB_ISOLATION_ARG(arg1, isolation)
 
bool process_bypass_loop(context_guard_helper< SchedulerTraits::itt_possible > &context_guard, __TBB_ISOLATION_ARG(task *t, isolation_tag isolation))
Implements the bypass loop of the dispatch loop (local_wait_for_all).
 
void reset_extra_state(task *t)
 
static const bool itt_possible
 
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
 
static const bool itt_possible
 
custom_scheduler< SchedulerTraits > scheduler_type
 
#define __TBB_FetchAndDecrementWrelease(P)
 
Exception container that preserves the exact copy of the original exception.
 
void const char const char int ITT_FORMAT __itt_group_sync s
 
#define ITT_SYNC_CREATE(obj, type, name)
 
Traits classes for scheduler.
 
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
 
tbb::task * next
"next" field for list of task
 
void wait_for_all(task &parent, task *child) __TBB_override
Entry point from client code to the scheduler loop that dispatches tasks.
 
#define ITT_NOTIFY(name, obj)
 
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
 
void poison_pointer(T *__TBB_atomic &)
 
task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation)) __TBB_override
Try getting a task from the mailbox or stealing from another scheduler.
 
void tally_completion_of_predecessor(task &s, __TBB_ISOLATION_ARG(task *&bypass_slot, isolation_tag isolation))
Decrements ref_count of a predecessor.
 
Set if ref_count might be changed by another thread. Used for debugging.
 
intptr_t reference_count
A reference count.
 
bool is_critical(task &t)
 
static const bool has_slow_atomic
 
task object is freshly allocated or recycled.
 
Memory prefix to a task object.
 
Bit-field representing properties of a sheduler.
 
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
 
static generic_scheduler * allocate_scheduler(market &m, bool genuine)