27 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD    32 #include "../rml/include/rml_tbb.h"    39 #if __TBB_PREVIEW_RESUMABLE_TASKS    45 class task_group_context;
    46 class allocate_root_with_context_proxy;
    50 #if __TBB_NUMA_SUPPORT    51 class numa_binding_observer;
    54 #if __TBB_PREVIEW_RESUMABLE_TASKS    55 class arena_co_cache {
    58     generic_scheduler** my_co_scheduler_cache;
    62     unsigned my_max_index;
    66     unsigned next_index() {
    67         return ( my_head == my_max_index ) ? 0 : my_head + 1;
    70     unsigned prev_index() {
    71         return ( my_head == 0 ) ? my_max_index : my_head - 1;
    74     bool internal_empty() {
    75         return my_co_scheduler_cache[prev_index()] == NULL;
    78     void internal_scheduler_cleanup(generic_scheduler* to_cleanup) {
    79         to_cleanup->my_arena_slot = NULL;
    86     void init(
unsigned cache_capacity) {
    87         size_t alloc_size = cache_capacity * 
sizeof(generic_scheduler*);
    88         my_co_scheduler_cache = (generic_scheduler**)
NFS_Allocate(1, alloc_size, NULL);
    89         memset( my_co_scheduler_cache, 0, alloc_size );
    91         my_max_index = cache_capacity - 1;
    96         while (generic_scheduler* to_cleanup = pop()) {
    97             internal_scheduler_cleanup(to_cleanup);
   105     void push(generic_scheduler* 
s) {
   106         generic_scheduler* to_cleanup = NULL;
   110             if (my_co_scheduler_cache[my_head] != NULL) {
   111                 to_cleanup = my_co_scheduler_cache[my_head];
   114             my_co_scheduler_cache[my_head] = 
s;
   116             my_head = next_index();
   121             internal_scheduler_cleanup(to_cleanup);
   127     generic_scheduler* pop() {
   130         if (internal_empty()) 
return NULL;
   132         my_head = prev_index();
   134         generic_scheduler* to_return = my_co_scheduler_cache[my_head];
   136         my_co_scheduler_cache[my_head] = NULL;
   140 #endif // __TBB_PREVIEW_RESUMABLE_TASKS   155 #if __TBB_TASK_PRIORITY   156     volatile intptr_t my_top_priority;  
   169 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD   175 #if __TBB_PREVIEW_CRITICAL_TASKS   197 #if __TBB_ARENA_OBSERVER   198     observer_list my_observers;
   202 #if __TBB_NUMA_SUPPORT   203     numa_binding_observer* my_numa_binding_observer;
   207 #if __TBB_TASK_PRIORITY   208     intptr_t my_bottom_priority;
   214     uintptr_t my_reload_epoch;
   217     task* my_orphaned_tasks;
   220     tbb::atomic<uintptr_t> my_abandonment_epoch;
   226     tbb::atomic<intptr_t> my_skipped_fifo_priority;
   237 #if !__TBB_FP_CONTEXT   242 #if __TBB_TASK_GROUP_CONTEXT   255 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY   257     bool my_local_concurrency_mode;
   259     bool my_global_concurrency_mode;
   265 #if __TBB_PREVIEW_RESUMABLE_TASKS   266     arena_co_cache my_co_cache;
   291     arena ( 
market&, 
unsigned max_num_workers, 
unsigned num_reserved_slots );
   297         return max(2u, num_slots);
   306         __TBB_ASSERT( 0<
id, 
"affinity id must be positive integer" );
   357     template<
unsigned ref_param>
   361     void dump_arena_statistics ();
   365 #if __TBB_TASK_PRIORITY   368     inline bool may_have_tasks ( 
generic_scheduler*, 
bool& tasks_present, 
bool& dequeuing_possible );
   374 #if __TBB_COUNT_TASK_NODES   375     intptr_t workers_task_node_count();
   384     template <
bool as_worker>
   393 template<
unsigned ref_param>
   452 #if __TBB_STATISTICS_EARLY_DUMP   458 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY   486 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY   488             my_market->enable_mandatory_concurrency(
this);
   492             my_local_concurrency_mode = 
true;
   503     else if( work_type == 
wakeup ) {
   530 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY   532                 if( my_local_concurrency_mode ) {
   540                     my_local_concurrency_mode = 
false;
   543                 if ( 
as_atomic(my_global_concurrency_mode) == 
true )
   544                     my_market->mandatory_concurrency_disable( 
this );
 #define GATHER_STATISTIC(x)
 
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
 
argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor)
A function to compute arg modulo divisor where divisor is a power of 2.
 
void restore_priority_if_need()
If enqueued tasks found, restore arena priority and task presence status.
 
Used to form groups of tasks.
 
Work stealing task scheduler.
 
A fast random number generator.
 
arena(market &, unsigned max_num_workers, unsigned num_reserved_slots)
Constructor.
 
static int allocation_size(unsigned num_slots)
 
new_work_type
Types of work advertised by advertise_new_work()
 
tbb::atomic< uintptr_t > my_pool_state
Current task pool state and estimate of available tasks amount.
 
atomic< unsigned > my_limit
The maximal number of currently busy slots.
 
unsigned num_workers_active() const
The number of workers active in the arena.
 
static const intptr_t num_priority_levels
 
unsigned my_num_reserved_slots
The number of reserved slots (can be occupied only by masters).
 
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
 
Base class for user-defined tasks.
 
uintptr_t my_aba_epoch
ABA prevention marker.
 
bool is_recall_requested() const
Check if the recall is requested by the market.
 
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
 
int my_num_workers_requested
The number of workers that are currently requested from the resource manager.
 
A lock that occupies a single byte.
 
The container for "fairness-oriented" aka "enqueued" tasks.
 
unsigned my_max_num_workers
The number of workers requested by the master thread owning the arena.
 
static const pool_state_t SNAPSHOT_EMPTY
No tasks to steal since last snapshot was taken.
 
size_t occupy_free_slot(generic_scheduler &s)
Tries to occupy a slot in the arena. On success, returns the slot index; if no slot is available,...
 
unsigned my_num_slots
The number of slots in the arena.
 
void adjust_demand(arena &, int delta)
Request that arena's need in workers should be adjusted.
 
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
 
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
 
void process(generic_scheduler &)
Registers the worker with the arena and enters TBB scheduler dispatch loop.
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
 
static const unsigned ref_worker
 
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
 
market * my_market
The market that owns this arena.
 
cpu_ctl_env my_cpu_ctl_env
FPU control settings of arena's master thread captured at the moment of arena instantiation.
 
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
 
concurrent_monitor my_exit_monitors
Waiting object for master threads that cannot join the arena.
 
bool has_enqueued_tasks()
Check for the presence of enqueued tasks at all priority levels.
 
atomic< unsigned > my_references
Reference counter for the arena.
 
static bool is_busy_or_empty(pool_state_t s)
No tasks to steal or snapshot is being taken.
 
Pads type T to fill out to a multiple of cache line size.
 
static const pool_state_t SNAPSHOT_FULL
At least one task has been offered for stealing since the last snapshot started.
 
The structure of an arena, except the array of slots.
 
bool is_out_of_work()
Check if there is job anywhere in arena.
 
padded< arena_base > base_type
 
unsigned short affinity_id
An id as used for specifying affinity.
 
unsigned my_num_workers_soft_limit
Current application-imposed limit on the number of workers (see set_active_num_workers())
 
Represents acquisition of a mutex.
 
Class representing where mail is put.
 
void try_destroy_arena(arena *, uintptr_t aba_epoch)
Removes the arena from the market's list.
 
static generic_scheduler * local_scheduler_if_initialized()
 
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
 
void on_thread_leaving()
Notification that worker or master leaves its arena.
 
static arena & allocate_arena(market &, unsigned num_slots, unsigned num_reserved_slots)
Allocate an instance of arena.
 
static const size_t out_of_arena
 
void atomic_fence()
Sequentially consistent full memory fence.
 
void free_arena()
Completes arena shutdown, destructs and deallocates it.
 
void const char const char int ITT_FORMAT __itt_group_sync s
 
T max(const T &val1, const T &val2)
Utility template function returning greater of the two values.
 
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
 
size_t occupy_free_slot_in_range(generic_scheduler &s, size_t lower, size_t upper)
Tries to occupy a slot in the specified range.
 
atomic< T > & as_atomic(T &t)
 
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
 
static const unsigned ref_external_bits
The number of least significant bits for external references.
 
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
 
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
 
static int unsigned num_arena_slots(unsigned num_slots)
 
static const unsigned ref_external
Reference increment values for externals and workers.