17 #ifndef _TBB_task_stream_extended_H 18 #define _TBB_task_stream_extended_H 28 #if _TBB_task_stream_H 29 #error Either task_stream.h or this file can be included at the same time. 33 #error This code bears a preview status until it proves its usefulness/performance suitability. 51 template<
typename T,
typename mutex_t >
52 struct queue_and_mutex {
80 return (val & (
one<<pos)) != 0;
84 #if __INTEL_COMPILER == 1110 || __INTEL_COMPILER == 1500 92 __TBB_ASSERT( ((out_of-1) & out_of) == 0,
"number of lanes is not power of two." );
100 #if __INTEL_COMPILER == 1110 || __INTEL_COMPILER == 1500 113 __TBB_ASSERT( ((out_of-1) & out_of) == 0,
"number of lanes is not power of two." );
121 __TBB_ASSERT( ((out_of-1) & out_of) == 0,
"number of lanes is not power of two." );
135 template<task_stream_accessor_type accessor>
140 task* result = queue.front();
152 result = queue.back();
154 }
while( !result && !queue.empty() );
160 template<
int Levels, task_stream_accessor_type accessor>
169 for(
int level = 0; level < Levels; level++) {
176 const unsigned max_lanes =
sizeof(
population_t) * CHAR_BIT;
178 N = n_lanes>=max_lanes ? max_lanes : n_lanes>2 ? 1<<(
__TBB_Log2(n_lanes-1)+1) : 2;
179 __TBB_ASSERT(
N==max_lanes ||
N>=n_lanes && ((
N-1)&
N)==0,
"number of lanes miscalculated");
181 for(
int level = 0; level < Levels; level++) {
188 for(
int level = 0; level < Levels; level++)
194 __TBB_ASSERT( 0 <= level && level < Levels,
"Incorrect lane level specified." );
196 if(
lock.try_acquire(
lanes[level][lane_idx].my_mutex ) ) {
197 lanes[level][lane_idx].my_queue.push_back( source );
205 template<
typename lane_selector_t>
206 void push(
task* source,
int level,
const lane_selector_t& next_lane ) {
207 bool succeed =
false;
210 lane = next_lane(
N );
212 }
while( ! (succeed =
try_push( source, level, lane )) );
217 __TBB_ASSERT( 0 <= level && level < Levels,
"Incorrect lane level specified." );
233 template<
typename lane_selector_t>
234 task*
pop(
int level,
const lane_selector_t& next_lane ) {
238 lane = next_lane(
N );
240 }
while( !
empty( level ) && !(popped =
try_pop( level, lane )) );
249 typename lane_t::queue_base_t::iterator curr = queue.end();
252 task* result = *--curr;
254 if( queue.end() - curr == 1 )
261 }
while( curr != queue.begin() );
269 unsigned idx = last_used_lane & (
N-1);
283 }
while( !
empty(level) && idx != last_used_lane );
284 last_used_lane = idx;
298 for(
int level = 0; level < Levels; level++)
299 for(
unsigned i=0; i<
N; ++i) {
302 for(
typename lane_t::queue_base_t::iterator it=lane.
my_queue.begin();
303 it!=lane.
my_queue.end(); ++it, ++result)
307 tbb::task::destroy(*t);
task_stream_accessor< accessor >::lane_t lane_t
A fast random number generator.
isolation_tag isolation
The tag used for task isolation.
#define __TBB_ISOLATION_EXPR(isolation)
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
unsigned operator()(unsigned out_of) const
Base class for user-defined tasks.
bool try_push(task *source, int level, unsigned lane_idx)
Returns true on successful push, otherwise - false.
task * try_pop(int level, unsigned lane_idx)
Returns pointer to task on successful pop, otherwise - NULL.
task * pop(int level, const lane_selector_t &next_lane)
task * get_item(lane_t::queue_base_t &queue)
The container for "fairness-oriented" aka "enqueued" tasks.
task * look_specific(__TBB_ISOLATION_ARG(task_stream_base::lane_t::queue_base_t &queue, isolation_tag isolation))
Base class for types that should not be assigned.
void set_one_bit(population_t &dest, int pos)
task * get_item(lane_t::queue_base_t &queue)
random_lane_selector(FastRandom &random)
unsigned short get()
Get a random number.
task_stream_accessor_type
void __TBB_AtomicOR(volatile void *operand, uintptr_t addend)
preceding_lane_selector(unsigned &previous)
void __TBB_AtomicAND(volatile void *operand, uintptr_t addend)
intptr_t isolation_tag
A tag for task isolation.
Essentially, this is just a pair of a queue and a mutex to protect the queue.
bool is_bit_set(population_t val, int pos)
Pads type T to fill out to a multiple of cache line size.
padded< lane_t > * lanes[Levels]
intptr_t drain()
Destroys all remaining tasks in every lane. Returns the number of destroyed tasks.
Represents acquisition of a mutex.
unsigned operator()(unsigned out_of) const
lane_selector_base(unsigned &previous)
#define __TBB_ISOLATION_ARG(arg1, isolation)
Base class for types that should not be copied or assigned.
void clear_one_bit(population_t &dest, int pos)
intptr_t __TBB_Log2(uintptr_t x)
unsigned operator()(unsigned out_of) const
population_t population[Levels]
void initialize(unsigned n_lanes)
bool empty(int level)
Checks existence of a task.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
queue_and_mutex< task *, spin_mutex > lane_t
void push(task *source, int level, const lane_selector_t &next_lane)
Push a task into a lane. Lane selection is performed by passed functor.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
std::deque< T, tbb_allocator< T > > queue_base_t
task * pop_specific(int level, __TBB_ISOLATION_ARG(unsigned &last_used_lane, isolation_tag isolation))
Try finding and popping a related task.
subsequent_lane_selector(unsigned &previous)