28 #if __TBB_STATISTICS_STDOUT 35 #if __TBB_NUMA_SUPPORT 38 binding_handler* binding_handler_ptr;
40 numa_binding_observer( task_arena* ta,
int numa_id,
int num_slots )
41 : task_scheduler_observer(*ta)
42 , my_numa_node_id(numa_id)
43 , binding_handler_ptr(
tbb::
internal::construct_binding_handler(num_slots))
47 tbb::internal::bind_thread_to_node(
55 ~numa_binding_observer(){
56 tbb::internal::destroy_binding_handler(binding_handler_ptr);
61 int numa_id,
int num_slots ) {
62 numa_binding_observer* binding_observer = NULL;
64 if (numa_id >= 0 && numa_topology::nodes_count() > 1) {
65 binding_observer =
new numa_binding_observer(ta, numa_id, num_slots);
66 __TBB_ASSERT(binding_observer,
"Failure during NUMA binding observer allocation and construction");
67 binding_observer->observe(
true);
69 return binding_observer;
72 void destroy_binding_observer( numa_binding_observer* binding_observer ) {
73 __TBB_ASSERT(binding_observer,
"Trying to deallocate NULL pointer");
74 binding_observer->observe(
false);
75 delete binding_observer;
91 #if __TBB_TASK_GROUP_CONTEXT 96 #if __TBB_TASK_PRIORITY 103 my_ref_top_priority = &a->my_top_priority;
104 my_ref_reload_epoch = &a->my_reload_epoch;
106 my_local_reload_epoch = *my_ref_reload_epoch;
112 return !slot &&
as_atomic( slot ).compare_and_swap( &
s, NULL ) == NULL;
118 size_t index =
s.my_arena_index;
119 if ( index < lower || index >= upper ) index =
s.my_random.get() % (upper - lower) + lower;
122 for (
size_t i = index; i < upper; ++i )
124 for (
size_t i = lower; i < index; ++i )
129 template <
bool as_worker>
149 __TBB_ASSERT(
s.my_innermost_running_task ==
s.my_dummy_task, NULL );
159 s.attach_arena(
this, index,
false );
161 #if !__TBB_FP_CONTEXT 165 #if __TBB_ARENA_OBSERVER 166 __TBB_ASSERT( !
s.my_last_local_observer,
"There cannot be notified local observers when entering arena" );
167 my_observers.notify_entry_observers(
s.my_last_local_observer,
true );
173 s.local_wait_for_all( *
s.my_dummy_task, NULL );
178 __TBB_ASSERT(
s.my_innermost_running_task ==
s.my_dummy_task, NULL );
182 "Worker cannot leave arena while its task pool is not reset" );
195 s.my_innermost_running_task =
s.my_dummy_task;
196 s.local_wait_for_all(*
s.my_dummy_task,t);
199 #if __TBB_ARENA_OBSERVER 200 my_observers.notify_exit_observers(
s.my_last_local_observer,
true );
201 s.my_last_local_observer = NULL;
203 #if __TBB_TASK_PRIORITY 204 if (
s.my_offloaded_tasks )
205 orphan_offloaded_tasks(
s );
208 ++
s.my_counters.arena_roundtrips;
209 *
my_slots[index].my_counters +=
s.my_counters;
210 s.my_counters.reset();
216 __TBB_ASSERT(
s.my_innermost_running_task ==
s.my_dummy_task, NULL );
223 on_thread_leaving<ref_worker>();
227 __TBB_ASSERT( !my_guard,
"improperly allocated arena?" );
230 #if __TBB_TASK_PRIORITY 231 __TBB_ASSERT( !my_reload_epoch && !my_orphaned_tasks && !my_skipped_fifo_priority,
"New arena object is not zeroed" );
240 #if __TBB_TASK_PRIORITY 241 my_bottom_priority = my_top_priority = normalized_normal_priority;
244 #if __TBB_ARENA_OBSERVER 245 my_observers.my_arena =
this;
247 #if __TBB_PREVIEW_RESUMABLE_TASKS 248 my_co_cache.init(4 * num_slots);
256 #if __TBB_PREVIEW_RESUMABLE_TASKS 263 #if __TBB_PREVIEW_CRITICAL_TASKS 267 my_slots[i].my_counters =
new (
NFS_Allocate(1,
sizeof(statistics_counters), NULL) ) statistics_counters;
272 #if __TBB_PREVIEW_CRITICAL_TASKS 274 ITT_SYNC_CREATE(&my_critical_task_stream, SyncType_Scheduler, SyncObj_CriticalTaskStream);
276 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 277 my_local_concurrency_mode =
false;
278 my_global_concurrency_mode =
false;
280 #if !__TBB_FP_CONTEXT 290 unsigned char* storage = (
unsigned char*)
NFS_Allocate( 1, n, NULL );
292 memset( storage, 0, n );
301 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 304 #if !__TBB_STATISTICS_EARLY_DUMP 308 intptr_t drained = 0;
321 #if __TBB_PREVIEW_RESUMABLE_TASKS 323 my_co_cache.cleanup();
325 #if __TBB_PREVIEW_CRITICAL_TASKS 326 __TBB_ASSERT( my_critical_task_stream.drain()==0,
"Not all critical tasks were executed");
328 #if __TBB_COUNT_TASK_NODES 329 my_market->update_task_node_count( -drained );
333 #if __TBB_TASK_GROUP_CONTEXT 334 __TBB_ASSERT( my_default_ctx,
"Master thread never entered the arena?" );
335 my_default_ctx->~task_group_context();
338 #if __TBB_ARENA_OBSERVER 339 if ( !my_observers.empty() )
340 my_observers.clear();
346 #if TBB_USE_ASSERT > 1 353 void arena::dump_arena_statistics () {
354 statistics_counters total;
356 #if __TBB_STATISTICS_EARLY_DUMP 359 *
my_slots[i].my_counters +=
s->my_counters;
365 dump_statistics( *
my_slots[i].my_counters, i );
368 dump_statistics( *
my_slots[0].my_counters, 0 );
369 #if __TBB_STATISTICS_STDOUT 370 #if !__TBB_STATISTICS_TOTALS_ONLY 371 printf(
"----------------------------------------------\n" );
373 dump_statistics( total, workers_counters_total );
375 dump_statistics( total, arena_counters_total );
376 #if !__TBB_STATISTICS_TOTALS_ONLY 377 printf(
"==============================================\n" );
383 #if __TBB_TASK_PRIORITY 388 inline bool arena::may_have_tasks ( generic_scheduler*
s,
bool& tasks_present,
bool& dequeuing_possible ) {
389 if ( !
s ||
s->my_arena !=
this )
391 dequeuing_possible |=
s->worker_outermost_level();
392 if (
s->my_pool_reshuffling_pending ) {
395 tasks_present =
true;
398 if (
s->my_offloaded_tasks ) {
399 tasks_present =
true;
400 if (
s->my_local_reload_epoch < *
s->my_ref_reload_epoch ) {
409 void arena::orphan_offloaded_tasks(generic_scheduler&
s) {
412 ++my_abandonment_epoch;
413 __TBB_ASSERT(
s.my_offloaded_task_list_tail_link && !*
s.my_offloaded_task_list_tail_link, NULL );
416 orphans = const_cast<task*>(my_orphaned_tasks);
417 *
s.my_offloaded_task_list_tail_link = orphans;
418 }
while (
as_atomic(my_orphaned_tasks).compare_and_swap(
s.my_offloaded_tasks, orphans) != orphans );
419 s.my_offloaded_tasks = NULL;
421 s.my_offloaded_task_list_tail_link = NULL;
441 advertise_new_work<work_enqueued>();
442 #if __TBB_TASK_PRIORITY 447 if ( p < my_bottom_priority || p > my_top_priority )
473 #if __TBB_TASK_PRIORITY 475 intptr_t top_priority = my_top_priority;
479 for( k=0; k<n; ++k ) {
490 bool work_absent = k == n;
491 #if __TBB_PREVIEW_CRITICAL_TASKS 492 bool no_critical_tasks = my_critical_task_stream.empty(0);
493 work_absent &= no_critical_tasks;
495 #if __TBB_TASK_PRIORITY 498 bool tasks_present = !work_absent || my_orphaned_tasks;
499 bool dequeuing_possible =
false;
504 uintptr_t abandonment_epoch = my_abandonment_epoch;
510 the_context_state_propagation_mutex.lock();
511 work_absent = !may_have_tasks(
my_slots[0].my_scheduler, tasks_present, dequeuing_possible );
512 the_context_state_propagation_mutex.unlock();
525 for( k = 1; work_absent && k < n; ++k ) {
528 work_absent = !may_have_tasks(
my_slots[k].my_scheduler, tasks_present, dequeuing_possible );
531 work_absent = work_absent
533 && abandonment_epoch == my_abandonment_epoch;
538 #if __TBB_TASK_PRIORITY 540 work_absent = work_absent && (!dequeuing_possible || no_fifo_tasks)
541 && top_priority == my_top_priority && reload_epoch == my_reload_epoch;
544 work_absent = work_absent && no_fifo_tasks;
547 #if __TBB_TASK_PRIORITY 548 if ( top_priority > my_bottom_priority ) {
549 if (
my_market->lower_arena_priority(*
this, top_priority - 1, reload_epoch)
552 atomic_update( my_skipped_fifo_priority, top_priority, std::less<intptr_t>());
555 else if ( !tasks_present && !my_orphaned_tasks && no_fifo_tasks ) {
568 #if __TBB_TASK_PRIORITY 585 #if __TBB_COUNT_TASK_NODES 586 intptr_t arena::workers_task_node_count() {
591 result +=
s->my_task_node_count;
599 #if __TBB_RECYCLE_TO_ENQUEUE 610 __TBB_ASSERT( ref_count!=0,
"attempt to enqueue task whose parent has a ref_count==0 (forgot to set_ref_count?)" );
611 __TBB_ASSERT( ref_count>0,
"attempt to enqueue task whose parent has a ref_count<0" );
616 #if __TBB_PREVIEW_CRITICAL_TASKS 618 #if __TBB_TASK_PRIORITY 621 bool is_critical = internal::is_critical( t ); 629 if(
s &&
s->my_arena_slot ) {
632 #if __TBB_TASK_ISOLATION 635 unsigned& lane =
s->my_arena_slot->hint_for_critical;
642 advertise_new_work<work_spawned>();
648 #if __TBB_TASK_PRIORITY 649 intptr_t
p = prio ? normalize_priority(
priority_t(prio)) : normalized_normal_priority;
650 assert_priority_valid(
p);
651 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 654 my_task_stream.push( &t,
p, random );
656 if (
p != my_top_priority )
657 my_market->update_arena_priority( *
this,
p );
659 __TBB_ASSERT_EX(prio == 0,
"the library is not configured to respect the task priority");
660 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD 663 my_task_stream.push( &t, 0, random );
666 advertise_new_work<work_enqueued>();
667 #if __TBB_TASK_PRIORITY 668 if (
p != my_top_priority )
669 my_market->update_arena_priority( *
this,
p );
683 #if __TBB_PREVIEW_RESUMABLE_TASKS 688 s->nested_arena_entry(a, slot_index);
692 #if __TBB_TASK_GROUP_CONTEXT 701 #if __TBB_TASK_PRIORITY 718 #if __TBB_PREVIEW_CRITICAL_TASKS 721 #if __TBB_TASK_GROUP_CONTEXT 734 #if __TBB_TASK_PRIORITY 735 if ( my_offloaded_tasks )
736 my_arena->orphan_offloaded_tasks( *
this );
737 my_offloaded_tasks = NULL;
747 #if __TBB_ARENA_OBSERVER 748 my_last_local_observer = 0;
749 my_arena->my_observers.notify_entry_observers( my_last_local_observer,
false );
751 #if __TBB_PREVIEW_RESUMABLE_TASKS 757 #if __TBB_ARENA_OBSERVER 758 my_arena->my_observers.notify_exit_observers( my_last_local_observer,
false );
760 #if __TBB_TASK_PRIORITY 761 if ( my_offloaded_tasks )
762 my_arena->orphan_offloaded_tasks( *
this );
779 #if __TBB_PREVIEW_RESUMABLE_TASKS 780 class resume_task :
public task {
787 if (
s->prepare_resume(my_target)) {
788 s->resume(my_target);
792 prefix().state = task::to_resume;
800 generic_scheduler* co_sched = curr.my_arena->my_co_cache.pop();
807 co_sched->my_arena = curr.my_arena;
810 co_sched->my_dummy_task->prefix().context = co_sched->my_arena->my_default_ctx;
817 void internal_suspend(
void* suspend_callback,
void* user_callback) {
819 __TBB_ASSERT(
s.my_arena_slot->my_scheduler_is_recalled != NULL, NULL);
820 bool is_recalled = *
s.my_arena_slot->my_scheduler_is_recalled;
821 generic_scheduler& target = is_recalled ? *
s.my_arena_slot->my_scheduler :
create_coroutine(
s);
823 generic_scheduler::callback_t callback = {
824 (generic_scheduler::suspend_callback_t)suspend_callback, user_callback, &
s };
825 target.set_post_resume_action(generic_scheduler::PRA_CALLBACK, &callback);
829 void internal_resume(task::suspend_point tag) {
830 generic_scheduler&
s = *static_cast<generic_scheduler*>(tag);
831 task* t =
new(&
s.allocate_task(
sizeof(resume_task),
__TBB_CONTEXT_ARG(NULL,
s.my_dummy_task->context()))) resume_task(
s);
837 arena& a = *
s.my_arena;
848 task::suspend_point internal_current_suspend_point() {
860 namespace interface7 {
866 #if __TBB_NUMA_SUPPORT 876 #if __TBB_TASK_GROUP_CONTEXT 877 new_arena->my_default_ctx =
new (
NFS_Allocate(1,
sizeof(task_group_context), NULL) )
880 new_arena->my_default_ctx->capture_fp_settings();
887 m.release(
true,
false );
889 #if __TBB_TASK_GROUP_CONTEXT 892 #if __TBB_TASK_GROUP_CONTEXT || __TBB_NUMA_SUPPORT 894 #if __TBB_NUMA_SUPPORT 895 my_arena->my_numa_binding_observer = tbb::internal::construct_binding_observer(
898 #if __TBB_TASK_GROUP_CONTEXT 911 #if __TBB_NUMA_SUPPORT 912 if(
my_arena->my_numa_binding_observer != NULL ) {
913 tbb::internal::destroy_binding_observer(
my_arena->my_numa_binding_observer);
914 my_arena->my_numa_binding_observer = NULL;
920 #if __TBB_TASK_GROUP_CONTEXT 929 if(
s &&
s->my_arena ) {
935 #if __TBB_TASK_GROUP_CONTEXT 951 #if __TBB_TASK_GROUP_CONTEXT 954 "The task will not be executed because default task_group_context of task_arena is cancelled. Has previously enqueued task thrown an exception?");
959 class delegated_task :
public task {
960 internal::delegate_base & my_delegate;
961 concurrent_monitor & my_monitor;
964 generic_scheduler&
s = *(generic_scheduler*)prefix().owner;
965 __TBB_ASSERT(
s.outermost_level(),
"expected to be enqueued and received on the outermost level");
966 struct outermost_context : internal::no_copy {
968 generic_scheduler &
s;
970 task_group_context * orig_ctx;
971 scheduler_properties orig_props;
972 outermost_context(delegated_task *_t, generic_scheduler &_s)
973 : t(_t),
s(_s), orig_dummy(
s.my_dummy_task), orig_props(
s.my_properties) {
975 #if __TBB_TASK_GROUP_CONTEXT 976 orig_ctx = t->prefix().context;
977 t->prefix().context =
s.my_arena->my_default_ctx;
983 ~outermost_context() {
984 #if __TBB_TASK_GROUP_CONTEXT 986 t->prefix().context = orig_ctx;
989 s.my_properties = orig_props;
990 s.my_dummy_task = orig_dummy;
999 task_prefix& prefix = my_root->prefix();
1000 #if __TBB_PREVIEW_RESUMABLE_TASKS 1001 reference_count old_ref_count = __TBB_FetchAndStoreW(&prefix.ref_count, 1);
1003 if (old_ref_count == internal::abandon_flag + 2) {
1006 tbb::task::resume(prefix.abandoned_scheduler);
1011 my_monitor.notify(*
this);
1014 delegated_task( internal::delegate_base &
d, concurrent_monitor &
s,
task * t )
1015 : my_delegate(
d), my_monitor(
s), my_root(t) {}
1017 bool operator()(uintptr_t ctx)
const {
return (
void*)ctx == (
void*)&my_delegate; }
1025 bool same_arena =
s->my_arena ==
my_arena;
1026 size_t index1 =
s->my_arena_index;
1031 #if __TBB_USE_OPTIONAL_RTTI 1039 internal::delegated_function< graph_funct, void >* deleg_funct =
1040 dynamic_cast< internal::delegated_function< graph_funct, void>*
>(&
d);
1045 (internal::forward< graph_funct >(deleg_funct->my_func)), 0);
1049 concurrent_monitor::thread_context waiter;
1050 #if __TBB_TASK_GROUP_CONTEXT 1052 #if __TBB_FP_CONTEXT 1057 root.prefix().ref_count = 2;
1072 s->local_wait_for_all(root, NULL);
1073 #if TBB_USE_EXCEPTIONS 1086 #if TBB_USE_EXCEPTIONS 1089 TbbRethrowException(pe);
1092 #if __TBB_USE_OPTIONAL_RTTI 1098 context_guard_helper<
false> context_guard;
1100 #if TBB_USE_EXCEPTIONS 1106 #if TBB_USE_EXCEPTIONS 1109 context_guard.restore_default();
1114 exception_container.register_pending_exception();
1116 TbbRethrowException(exception_container.my_exception);
1124 class wait_task :
public task {
1125 binary_semaphore & my_signal;
1129 __TBB_ASSERT(
s->outermost_level(),
"The enqueued task can be processed only on outermost level" );
1130 if (
s->is_worker() ) {
1133 s->my_innermost_running_task =
s->my_dummy_task;
1134 s->local_wait_for_all( *
s->my_dummy_task, NULL );
1135 s->my_innermost_running_task =
this;
1136 }
else s->my_arena->is_out_of_work();
1141 wait_task ( binary_semaphore & sema ) : my_signal(sema) {}
1148 __TBB_ASSERT(
s->my_arena !=
my_arena ||
s->my_arena_index == 0,
"task_arena::wait_until_empty() is not supported within a worker context" );
1152 if( !
s->my_arena_index )
1154 s->wait_until_empty();
1160 s->wait_until_empty();
1162 binary_semaphore waiter;
1175 return s?
int(
s->my_arena_index) : -1;
1178 #if __TBB_TASK_ISOLATION 1183 isolation_guard(
isolation_tag &isolation ) : guarded( isolation ), previous_value( isolation ) {}
1184 ~isolation_guard() {
1185 guarded = previous_value;
1192 __TBB_ASSERT(
s,
"this_task_arena::isolate() needs an initialized scheduler" );
1195 isolation_tag& current_isolation =
s->my_innermost_running_task->prefix().isolation;
1197 isolation_guard guard( current_isolation );
1198 current_isolation = isolation? isolation : reinterpret_cast<isolation_tag>(&
d);
1211 __TBB_ASSERT( !ta || ta->my_max_concurrency==1, NULL );
1212 return a->my_num_reserved_slots + a->my_max_num_workers;
void attach_mailbox(affinity_id id)
#define GATHER_STATISTIC(x)
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
Used to form groups of tasks.
Work stealing task scheduler.
const isolation_tag no_isolation
A fast random number generator.
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
static int allocation_size(unsigned num_slots)
void set_is_idle(bool value)
Indicate whether thread that reads this mailbox is idle.
unsigned my_master_slots
Reserved master slots.
isolation_tag isolation
The tag used for task isolation.
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
task * my_dummy_task
Fake root task created by slave threads.
unsigned num_workers_active() const
The number of workers active in the arena.
intptr_t drain()
Drain the mailbox.
static const intptr_t num_priority_levels
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id head
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
static void one_time_init()
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
Base class for user-defined tasks.
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
generic_scheduler & my_scheduler
uintptr_t my_arenas_aba_epoch
ABA prevention marker to assign to newly created arenas.
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
state_type state() const
Current execution state.
unsigned char state
A task::state_type, stored as a byte for compactness.
uintptr_t my_aba_epoch
ABA prevention marker.
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
bool is_recall_requested() const
Check if the recall is requested by the market.
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
static generic_scheduler * create_worker(market &m, size_t index, bool geniune)
Initialize a scheduler for a worker thread.
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
T1 atomic_update(tbb::atomic< T1 > &dst, T2 newValue, Pred compare)
Atomically replaces value of dst with newValue if they satisfy condition of compare predicate.
static unsigned default_num_threads()
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
#define __TBB_CONTEXT_ARG(arg1, context)
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available,...
A functor that spawns a task.
void prepare_wait(thread_context &thr, uintptr_t ctx=0)
prepare wait by inserting 'thr' into the wait queue
market * my_market
The market I am in.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
unsigned my_num_slots
The number of slots in the arena.
void notify_one()
Notify one thread about the event.
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
void make_critical(task &t)
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
static market & global_market(bool is_public, unsigned max_num_workers=0, size_t stack_size=0)
Factory method creating new market object.
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
void __TBB_store_with_release(volatile T &location, V value)
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
task is in ready pool, or is going to be put there, or was just taken off.
market * my_market
The market that owns this arena.
intptr_t my_version_and_traits
Special settings.
cpu_ctl_env my_cpu_ctl_env
FPU control settings of arena's master thread captured at the moment of arena instantiation.
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
void free_task_pool()
Deallocate task pool that was allocated by means of allocate_task_pool.
int my_max_concurrency
Concurrency level for deferred initialization.
void nested_arena_entry(arena *, size_t)
static const int priority_critical
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
unsigned hint_for_pop
Hint provided for operations with the container of starvation-resistant tasks.
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
atomic< unsigned > my_references
Reference counter for the arena.
intptr_t isolation_tag
A tag for task isolation.
internal::arena * my_arena
NULL if not currently initialized.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
void __TBB_EXPORTED_METHOD internal_initialize()
task_group_context * my_context
default context of the arena
task is running, and will be destroyed after method execute() completes.
void const char const char int ITT_FORMAT __itt_group_sync p
void __TBB_EXPORTED_METHOD internal_wait() const
static const pool_state_t SNAPSHOT_FULL
At least one task has been offered for stealing since the last snapshot started.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void mimic_outermost_level(arena *a, bool type)
bool is_out_of_work()
Check if there is job anywhere in arena.
unsigned short affinity_id
An id as used for specifying affinity.
bool type
Indicates that a scheduler acts as a master or a worker.
internal::tbb_exception_ptr exception_container_type
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
intptr_t drain()
Destroys all remaining tasks in every lane. Returns the number of destroyed tasks.
bool is_worker() const
True if running on a worker thread, false otherwise.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
Class representing where mail is put.
static generic_scheduler * local_scheduler_if_initialized()
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
#define __TBB_ISOLATION_ARG(arg1, isolation)
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Base class for types that should not be copied or assigned.
void on_thread_leaving()
Notification that worker or master leaves its arena.
static generic_scheduler * local_scheduler_weak()
void cancel_wait(thread_context &thr)
Cancel the wait. Removes the thread from the wait queue if not removed yet.
static const int automatic
Typedef for number of threads that is automatic.
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
bool release(bool is_public, bool blocking_terminate)
Decrements market's refcount and destroys it in the end.
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
static const size_t out_of_arena
void create_coroutine(coroutine_type &c, size_t stack_size, void *arg)
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
scheduler_state my_orig_state
void free_arena()
Completes arena shutdown, destructs and deallocates it.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id tail
void const char const char int ITT_FORMAT __itt_group_sync s
static int __TBB_EXPORTED_FUNC internal_current_slot()
bool outermost
Indicates that a scheduler is on outermost level.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
virtual void local_wait_for_all(task &parent, task *child)=0
void __TBB_EXPORTED_METHOD internal_enqueue(task &, intptr_t) const
#define ITT_SYNC_CREATE(obj, type, name)
void initialize(unsigned n_lanes)
bool empty(int level)
Checks existence of a task.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
#define ITT_NOTIFY(name, obj)
T __TBB_load_relaxed(const volatile T &location)
int current_thread_index()
Returns the index, aka slot number, of the calling thread in its current arena.
static bool occupy_slot(generic_scheduler *&slot, generic_scheduler &s)
atomic< T > & as_atomic(T &t)
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
void __TBB_EXPORTED_METHOD internal_execute(delegate_base &) const
void attach_arena(arena *, size_t index, bool is_master)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
void __TBB_EXPORTED_METHOD internal_attach()
task_group_context * my_orig_ctx
T __TBB_load_with_acquire(const volatile T &location)
size_t __TBB_EXPORTED_FUNC NFS_GetLineSize()
Cache/sector line size.
nested_arena_context(generic_scheduler *s, arena *a, size_t slot_index, bool type, bool same)
Set if ref_count might be changed by another thread. Used for debugging.
void construct()
Construct *this as a mailbox from zeroed memory.
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
intptr_t reference_count
A reference count.
bool is_critical(task &t)
bool commit_wait(thread_context &thr)
Commit wait if event count has not changed; otherwise, cancel wait.
task object is freshly allocated or recycled.
scheduler_properties my_properties
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d __itt_event ITT_FORMAT __itt_group_mark d void const wchar_t const wchar_t int ITT_FORMAT __itt_group_sync __itt_group_fsync x void const wchar_t int const wchar_t int int ITT_FORMAT __itt_group_sync __itt_group_fsync x void ITT_FORMAT __itt_group_sync __itt_group_fsync p void ITT_FORMAT __itt_group_sync __itt_group_fsync p void size_t ITT_FORMAT lu no args __itt_obj_prop_t __itt_obj_state_t ITT_FORMAT d const char ITT_FORMAT s const char ITT_FORMAT s __itt_frame ITT_FORMAT p __itt_counter ITT_FORMAT p __itt_counter unsigned long long ITT_FORMAT lu __itt_counter unsigned long long ITT_FORMAT lu __itt_counter __itt_clock_domain unsigned long long void ITT_FORMAT p const wchar_t ITT_FORMAT S __itt_mark_type const wchar_t ITT_FORMAT S __itt_mark_type const char ITT_FORMAT s __itt_mark_type ITT_FORMAT d __itt_caller ITT_FORMAT p __itt_caller ITT_FORMAT p no args const __itt_domain __itt_clock_domain unsigned long long __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_id void ITT_FORMAT p const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_clock_domain unsigned long long __itt_id __itt_string_handle __itt_scope scope
static int unsigned num_arena_slots(unsigned num_slots)
static const unsigned ref_external
Reference increment values for externals and workers.
void __TBB_EXPORTED_METHOD internal_terminate()
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
#define __TBB_CONTEXT_ARG1(context)
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.