Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
partitioner.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_partitioner_H
18 #define __TBB_partitioner_H
19 
20 #define __TBB_partitioner_H_include_area
22 
23 #ifndef __TBB_INITIAL_CHUNKS
24 // initial task divisions per thread
25 #define __TBB_INITIAL_CHUNKS 2
26 #endif
27 #ifndef __TBB_RANGE_POOL_CAPACITY
28 // maximum number of elements in range pool
29 #define __TBB_RANGE_POOL_CAPACITY 8
30 #endif
31 #ifndef __TBB_INIT_DEPTH
32 // initial value for depth of range pool
33 #define __TBB_INIT_DEPTH 5
34 #endif
35 #ifndef __TBB_DEMAND_DEPTH_ADD
36 // when imbalance is found range splits this value times more
37 #define __TBB_DEMAND_DEPTH_ADD 1
38 #endif
39 #ifndef __TBB_STATIC_THRESHOLD
40 // necessary number of clocks for the work to be distributed among all tasks
41 #define __TBB_STATIC_THRESHOLD 40000
42 #endif
43 #if __TBB_DEFINE_MIC
44 #define __TBB_NONUNIFORM_TASK_CREATION 1
45 #ifdef __TBB_time_stamp
46 #define __TBB_USE_MACHINE_TIME_STAMPS 1
47 #define __TBB_task_duration() __TBB_STATIC_THRESHOLD
48 #endif // __TBB_machine_time_stamp
49 #endif // __TBB_DEFINE_MIC
50 
51 #include "task.h"
52 #include "task_arena.h"
53 #include "aligned_space.h"
54 #include "atomic.h"
56 
57 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
58  // Workaround for overzealous compiler warnings
59  #pragma warning (push)
60  #pragma warning (disable: 4244)
61 #endif
62 
63 namespace tbb {
64 
65 class auto_partitioner;
66 class simple_partitioner;
67 class static_partitioner;
68 class affinity_partitioner;
69 
70 namespace interface9 {
71  namespace internal {
72  class affinity_partition_type;
73  }
74 }
75 
76 namespace internal { //< @cond INTERNAL
78 
84 
87  size_t my_size;
93 
94  void __TBB_EXPORTED_METHOD resize( unsigned factor );
95 };
96 
99 public:
100  void set_affinity( task & ) {}
103  bool decide_whether_to_delay() {return false;}
104  void spawn_or_delay( bool, task& b ) {
105  task::spawn(b);
106  }
107 };
108 
109 template<typename Range, typename Body, typename Partitioner> class start_scan;
110 
111 } //< namespace internal @endcond
112 
113 namespace serial {
114 namespace interface9 {
115 template<typename Range, typename Body, typename Partitioner> class start_for;
116 }
117 }
118 
119 namespace interface9 {
121 namespace internal {
122 using namespace tbb::internal;
123 template<typename Range, typename Body, typename Partitioner> class start_for;
124 template<typename Range, typename Body, typename Partitioner> class start_reduce;
125 template<typename Range, typename Body, typename Partitioner> class start_deterministic_reduce;
126 
128 class flag_task: public task {
129 public:
130  tbb::atomic<bool> my_child_stolen;
131  flag_task() { my_child_stolen = false; }
132  task* execute() __TBB_override { return NULL; }
133  static void mark_task_stolen(task &t) {
134  tbb::atomic<bool> &flag = static_cast<flag_task*>(t.parent())->my_child_stolen;
135 #if TBB_USE_THREADING_TOOLS
136  // Threading tools respect lock prefix but report false-positive data-race via plain store
137  flag.fetch_and_store<release>(true);
138 #else
139  flag = true;
140 #endif //TBB_USE_THREADING_TOOLS
141  }
142  static bool is_peer_stolen(task &t) {
143  return static_cast<flag_task*>(t.parent())->my_child_stolen;
144  }
145 };
146 
150 typedef unsigned char depth_t;
151 
153 template <typename T, depth_t MaxCapacity>
158  depth_t my_depth[MaxCapacity]; // relative depths of stored ranges
159  tbb::aligned_space<T, MaxCapacity> my_pool;
160 
161 public:
163  range_vector(const T& elem) : my_head(0), my_tail(0), my_size(1) {
164  my_depth[0] = 0;
165  new( static_cast<void *>(my_pool.begin()) ) T(elem);//TODO: std::move?
166  }
168  while( !empty() ) pop_back();
169  }
170  bool empty() const { return my_size == 0; }
171  depth_t size() const { return my_size; }
174  void split_to_fill(depth_t max_depth) {
175  while( my_size < MaxCapacity && is_divisible(max_depth) ) {
176  depth_t prev = my_head;
177  my_head = (my_head + 1) % MaxCapacity;
178  new(my_pool.begin()+my_head) T(my_pool.begin()[prev]); // copy TODO: std::move?
179  my_pool.begin()[prev].~T(); // instead of assignment
180  new(my_pool.begin()+prev) T(my_pool.begin()[my_head], split()); // do 'inverse' split
181  my_depth[my_head] = ++my_depth[prev];
182  my_size++;
183  }
184  }
185  void pop_back() {
186  __TBB_ASSERT(my_size > 0, "range_vector::pop_back() with empty size");
187  my_pool.begin()[my_head].~T();
188  my_size--;
189  my_head = (my_head + MaxCapacity - 1) % MaxCapacity;
190  }
191  void pop_front() {
192  __TBB_ASSERT(my_size > 0, "range_vector::pop_front() with empty size");
193  my_pool.begin()[my_tail].~T();
194  my_size--;
195  my_tail = (my_tail + 1) % MaxCapacity;
196  }
197  T& back() {
198  __TBB_ASSERT(my_size > 0, "range_vector::back() with empty size");
199  return my_pool.begin()[my_head];
200  }
201  T& front() {
202  __TBB_ASSERT(my_size > 0, "range_vector::front() with empty size");
203  return my_pool.begin()[my_tail];
204  }
207  __TBB_ASSERT(my_size > 0, "range_vector::front_depth() with empty size");
208  return my_depth[my_tail];
209  }
211  __TBB_ASSERT(my_size > 0, "range_vector::back_depth() with empty size");
212  return my_depth[my_head];
213  }
214  bool is_divisible(depth_t max_depth) {
215  return back_depth() < max_depth && back().is_divisible();
216  }
217 };
218 
220 template <typename Partition>
222  typedef split split_type;
223  // decision makers
224  void set_affinity( task & ) {}
226  bool check_being_stolen(task &) { return false; } // part of old should_execute_range()
227  bool check_for_demand(task &) { return false; }
228  bool is_divisible() { return true; } // part of old should_execute_range()
229  depth_t max_depth() { return 0; }
231  template <typename Range> split_type get_split() { return split(); }
232  Partition& self() { return *static_cast<Partition*>(this); } // CRTP helper
233 
234  template<typename StartType, typename Range>
235  void work_balance(StartType &start, Range &range) {
236  start.run_body( range ); // simple partitioner goes always here
237  }
238 
239  template<typename StartType, typename Range>
240  void execute(StartType &start, Range &range) {
241  // The algorithm in a few words ([]-denotes calls to decision methods of partitioner):
242  // [If this task is stolen, adjust depth and divisions if necessary, set flag].
243  // If range is divisible {
244  // Spread the work while [initial divisions left];
245  // Create trap task [if necessary];
246  // }
247  // If not divisible or [max depth is reached], execute, else do the range pool part
248  if ( range.is_divisible() ) {
249  if ( self().is_divisible() ) {
250  do { // split until is divisible
251  typename Partition::split_type split_obj = self().template get_split<Range>();
252  start.offer_work( split_obj );
253  } while ( range.is_divisible() && self().is_divisible() );
254  }
255  }
256  self().work_balance(start, range);
257  }
258 };
259 
261 template <typename Partition>
262 struct adaptive_mode : partition_type_base<Partition> {
263  typedef Partition my_partition;
264  size_t my_divisor;
265  // For affinity_partitioner, my_divisor indicates the number of affinity array indices the task reserves.
266  // A task which has only one index must produce the right split without reserved index in order to avoid
267  // it to be overwritten in note_affinity() of the created (right) task.
268  // I.e. a task created deeper than the affinity array can remember must not save its affinity (LIFO order)
269  static const unsigned factor = 1;
271  adaptive_mode(adaptive_mode &src, split) : my_divisor(do_split(src, split())) {}
273  size_t do_split(adaptive_mode &src, split) {
274  return src.my_divisor /= 2u;
275  }
276 };
277 
279 
282 // TODO: check if this helper can be a nested class of proportional_mode.
283 template <typename Range, typename = void>
285  static proportional_split get_split(size_t) { return proportional_split(1,1); }
286 };
287 template <typename Range>
288 struct proportion_helper<Range, typename enable_if<Range::is_splittable_in_proportion, void>::type> {
289  static proportional_split get_split(size_t n) {
290 #if __TBB_NONUNIFORM_TASK_CREATION
291  size_t right = (n + 2) / 3;
292 #else
293  size_t right = n / 2;
294 #endif
295  size_t left = n - right;
296  return proportional_split(left, right);
297  }
298 };
299 
301 template <typename Partition>
302 struct proportional_mode : adaptive_mode<Partition> {
303  typedef Partition my_partition;
304  using partition_type_base<Partition>::self; // CRTP helper to get access to derived classes
305 
306  proportional_mode() : adaptive_mode<Partition>() {}
308  proportional_mode(proportional_mode &src, const proportional_split& split_obj) { self().my_divisor = do_split(src, split_obj); }
309  size_t do_split(proportional_mode &src, const proportional_split& split_obj) {
310 #if __TBB_ENABLE_RANGE_FEEDBACK
311  size_t portion = size_t(float(src.my_divisor) * float(split_obj.right())
312  / float(split_obj.left() + split_obj.right()) + 0.5f);
313 #else
314  size_t portion = split_obj.right() * my_partition::factor;
315 #endif
316  portion = (portion + my_partition::factor/2) & (0ul - my_partition::factor);
317 #if __TBB_ENABLE_RANGE_FEEDBACK
318 
319  if (!portion)
320  portion = my_partition::factor;
321  else if (portion == src.my_divisor)
322  portion = src.my_divisor - my_partition::factor;
323 #endif
324  src.my_divisor -= portion;
325  return portion;
326  }
327  bool is_divisible() { // part of old should_execute_range()
328  return self().my_divisor > my_partition::factor;
329  }
330  template <typename Range>
332  // Create a proportion for the number of threads expected to handle "this" subrange
333  return proportion_helper<Range>::get_split( self().my_divisor / my_partition::factor );
334  }
335 };
336 
337 static size_t get_initial_partition_head() {
338  int current_index = tbb::this_task_arena::current_thread_index();
339  if (current_index == tbb::task_arena::not_initialized)
340  current_index = 0;
341  return size_t(current_index);
342 }
343 
345 template <typename Partition>
347  size_t my_head;
351  my_max_affinity(self().my_divisor) {}
353  , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {}
354  linear_affinity_mode(linear_affinity_mode &src, const proportional_split& split_obj) : proportional_mode<Partition>(src, split_obj)
355  , my_head((src.my_head + src.my_divisor) % src.my_max_affinity), my_max_affinity(src.my_max_affinity) {}
356  void set_affinity( task &t ) {
357  if( self().my_divisor )
358  t.set_affinity( affinity_id(my_head) + 1 );
359  }
360 };
361 
363 template<class Mode>
364 struct dynamic_grainsize_mode : Mode {
365  using Mode::self;
366 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
367  tbb::internal::machine_tsc_t my_dst_tsc;
368 #endif
369  enum {
370  begin = 0,
372  pass
373  } my_delay;
375  static const unsigned range_pool_size = __TBB_RANGE_POOL_CAPACITY;
377 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
378  , my_dst_tsc(0)
379 #endif
380  , my_delay(begin)
381  , my_max_depth(__TBB_INIT_DEPTH) {}
383  : Mode(p, split())
384 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
385  , my_dst_tsc(0)
386 #endif
387  , my_delay(pass)
388  , my_max_depth(p.my_max_depth) {}
390  : Mode(p, split_obj)
391 #ifdef __TBB_USE_MACHINE_TIME_STAMPS
392  , my_dst_tsc(0)
393 #endif
394  , my_delay(begin)
395  , my_max_depth(p.my_max_depth) {}
396  bool check_being_stolen(task &t) { // part of old should_execute_range()
397  if( !(self().my_divisor / Mode::my_partition::factor) ) { // if not from the top P tasks of binary tree
398  self().my_divisor = 1; // TODO: replace by on-stack flag (partition_state's member)?
399  if( t.is_stolen_task() && t.parent()->ref_count() >= 2 ) { // runs concurrently with the left task
400 #if __TBB_USE_OPTIONAL_RTTI
401  // RTTI is available, check whether the cast is valid
402  __TBB_ASSERT(dynamic_cast<flag_task*>(t.parent()), 0);
403  // correctness of the cast relies on avoiding the root task for which:
404  // - initial value of my_divisor != 0 (protected by separate assertion)
405  // - is_stolen_task() always returns false for the root task.
406 #endif
408  if( !my_max_depth ) my_max_depth++;
409  my_max_depth += __TBB_DEMAND_DEPTH_ADD;
410  return true;
411  }
412  }
413  return false;
414  }
415  depth_t max_depth() { return my_max_depth; }
416  void align_depth(depth_t base) {
417  __TBB_ASSERT(base <= my_max_depth, 0);
418  my_max_depth -= base;
419  }
420  template<typename StartType, typename Range>
421  void work_balance(StartType &start, Range &range) {
422  if( !range.is_divisible() || !self().max_depth() ) {
423  start.run_body( range ); // simple partitioner goes always here
424  }
425  else { // do range pool
427  do {
428  range_pool.split_to_fill(self().max_depth()); // fill range pool
429  if( self().check_for_demand( start ) ) {
430  if( range_pool.size() > 1 ) {
431  start.offer_work( range_pool.front(), range_pool.front_depth() );
432  range_pool.pop_front();
433  continue;
434  }
435  if( range_pool.is_divisible(self().max_depth()) ) // was not enough depth to fork a task
436  continue; // note: next split_to_fill() should split range at least once
437  }
438  start.run_body( range_pool.back() );
439  range_pool.pop_back();
440  } while( !range_pool.empty() && !start.is_cancelled() );
441  }
442  }
443  bool check_for_demand( task &t ) {
444  if( pass == my_delay ) {
445  if( self().my_divisor > 1 ) // produce affinitized tasks while they have slot in array
446  return true; // do not do my_max_depth++ here, but be sure range_pool is splittable once more
447  else if( self().my_divisor && my_max_depth ) { // make balancing task
448  self().my_divisor = 0; // once for each task; depth will be decreased in align_depth()
449  return true;
450  }
451  else if( flag_task::is_peer_stolen(t) ) {
452  my_max_depth += __TBB_DEMAND_DEPTH_ADD;
453  return true;
454  }
455  } else if( begin == my_delay ) {
456 #ifndef __TBB_USE_MACHINE_TIME_STAMPS
457  my_delay = pass;
458 #else
459  my_dst_tsc = __TBB_time_stamp() + __TBB_task_duration();
460  my_delay = run;
461  } else if( run == my_delay ) {
462  if( __TBB_time_stamp() < my_dst_tsc ) {
463  __TBB_ASSERT(my_max_depth > 0, NULL);
464  my_max_depth--; // increase granularity since tasks seem having too small work
465  return false;
466  }
467  my_delay = pass;
468  return true;
469 #endif // __TBB_USE_MACHINE_TIME_STAMPS
470  }
471  return false;
472  }
473 };
474 
475 class auto_partition_type: public dynamic_grainsize_mode<adaptive_mode<auto_partition_type> > {
476 public:
479  my_divisor *= __TBB_INITIAL_CHUNKS;
480  }
483  bool is_divisible() { // part of old should_execute_range()
484  if( my_divisor > 1 ) return true;
485  if( my_divisor && my_max_depth ) { // can split the task. TODO: on-stack flag instead
486  // keep same fragmentation while splitting for the local task pool
487  my_max_depth--;
488  my_divisor = 0; // decrease max_depth once per task
489  return true;
490  } else return false;
491  }
493  if( flag_task::is_peer_stolen(t) ) {
494  my_max_depth += __TBB_DEMAND_DEPTH_ADD;
495  return true;
496  } else return false;
497  }
498 };
499 
500 class simple_partition_type: public partition_type_base<simple_partition_type> {
501 public:
505  template<typename StartType, typename Range>
506  void execute(StartType &start, Range &range) {
507  split_type split_obj = split(); // start.offer_work accepts split_type as reference
508  while( range.is_divisible() )
509  start.offer_work( split_obj );
510  start.run_body( range );
511  }
512 };
513 
514 class static_partition_type : public linear_affinity_mode<static_partition_type> {
515 public:
523 };
524 
525 class affinity_partition_type : public dynamic_grainsize_mode<linear_affinity_mode<affinity_partition_type> > {
526  static const unsigned factor_power = 4; // TODO: get a unified formula based on number of computing units
528 public:
529  static const unsigned factor = 1 << factor_power; // number of slots in affinity array per task
533  __TBB_ASSERT( (factor&(factor-1))==0, "factor must be power of two" );
534  ap.resize(factor);
535  my_array = ap.my_array;
536  my_max_depth = factor_power + 1;
537  __TBB_ASSERT( my_max_depth < __TBB_RANGE_POOL_CAPACITY, 0 );
538  }
541  , my_array(p.my_array) {}
544  , my_array(p.my_array) {}
545  void set_affinity( task &t ) {
546  if( my_divisor ) {
547  if( !my_array[my_head] )
548  // TODO: consider new ideas with my_array for both affinity and static partitioner's, then code reuse
549  t.set_affinity( affinity_id(my_head / factor + 1) );
550  else
551  t.set_affinity( my_array[my_head] );
552  }
553  }
555  if( my_divisor )
556  my_array[my_head] = id;
557  }
558 };
559 
562  size_t num_chunks;
563  static const size_t VICTIM_CHUNKS = 4;
564 public:
565  bool should_execute_range(const task &t) {
566  if( num_chunks<VICTIM_CHUNKS && t.is_stolen_task() )
567  num_chunks = VICTIM_CHUNKS;
568  return num_chunks==1;
569  }
575  num_chunks = pt.num_chunks = (pt.num_chunks+1u) / 2u;
576  }
577 };
578 
579 } // namespace interfaceX::internal
581 } // namespace interfaceX
582 
584 
587 public:
589 private:
590  template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
591  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
592  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
593  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_deterministic_reduce;
594  template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
595  // backward compatibility
597  public:
598  bool should_execute_range(const task& ) {return false;}
601  };
602  // new implementation just extends existing interface
604 
605  // TODO: consider to make split_type public
606  typedef interface9::internal::simple_partition_type::split_type split_type;
607 };
608 
610 
614 public:
616 
617 private:
618  template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
619  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
620  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
621  template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
622  // backward compatibility
624  // new implementation just extends existing interface
626 
627  // TODO: consider to make split_type public
628  typedef interface9::internal::auto_partition_type::split_type split_type;
629 };
630 
633 public:
635 private:
636  template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
637  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
638  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
639  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_deterministic_reduce;
640  template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
641  // backward compatibility
643  // new implementation just extends existing interface
645 
646  // TODO: consider to make split_type public
648 };
649 
652 public:
654 
655 private:
656  template<typename Range, typename Body, typename Partitioner> friend class serial::interface9::start_for;
657  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_for;
658  template<typename Range, typename Body, typename Partitioner> friend class interface9::internal::start_reduce;
659  template<typename Range, typename Body, typename Partitioner> friend class internal::start_scan;
660  // backward compatibility - for parallel_scan only
662  // new implementation just extends existing interface
664 
665  // TODO: consider to make split_type public
667 };
668 
669 } // namespace tbb
670 
671 #if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
672  #pragma warning (pop)
673 #endif // warning 4244 is back
674 #undef __TBB_INITIAL_CHUNKS
675 #undef __TBB_RANGE_POOL_CAPACITY
676 #undef __TBB_INIT_DEPTH
677 
679 #undef __TBB_partitioner_H_include_area
680 
681 #endif /* __TBB_partitioner_H */
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
#define __TBB_INIT_DEPTH
Definition: partitioner.h:33
Join task node that contains shared flag for stealing feedback.
Definition: partitioner.h:128
void work_balance(StartType &start, Range &range)
Definition: partitioner.h:235
void execute(StartType &start, Range &range)
simplified algorithm
Definition: partitioner.h:506
uint64_t machine_tsc_t
auto_partition_type(const auto_partitioner &)
Definition: partitioner.h:477
simple_partition_type(const simple_partitioner &)
Definition: partitioner.h:502
affinity_partition_type(tbb::internal::affinity_partitioner_base_v3 &ap)
Definition: partitioner.h:531
old_auto_partition_type(const affinity_partitioner &)
Definition: partitioner.h:572
static void mark_task_stolen(task &t)
Definition: partitioner.h:133
static size_t get_initial_partition_head()
Definition: partitioner.h:337
static_partition_type(static_partition_type &p, split)
Definition: partitioner.h:519
void work_balance(StartType &start, Range &range)
Definition: partitioner.h:421
tbb::aligned_space< T, MaxCapacity > my_pool
Definition: partitioner.h:159
Task type used to split the work of parallel_reduce.
Base class for user-defined tasks.
Definition: task.h:615
size_t __TBB_EXPORTED_FUNC get_initial_auto_partitioner_divisor()
Definition: task.cpp:149
linear_affinity_mode(linear_affinity_mode &src, const proportional_split &split_obj)
Definition: partitioner.h:354
~affinity_partitioner_base_v3()
Deallocates my_array.
Definition: partitioner.h:91
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp begin
size_t my_size
Number of elements in my_array.
Definition: partitioner.h:87
Task type used to split the work of parallel_deterministic_reduce.
#define __TBB_INITIAL_CHUNKS
Definition: partitioner.h:25
Provides default linear indexing of partitioner's sequence.
Definition: partitioner.h:346
size_t right() const
Definition: tbb_stddef.h:431
void note_affinity(task::affinity_id)
Definition: partitioner.h:101
adaptive_mode(adaptive_mode &src, split)
Definition: partitioner.h:271
interface9::internal::affinity_partition_type task_partition_type
Definition: partitioner.h:663
#define __TBB_time_stamp()
auto_partition_type(auto_partition_type &src, split)
Definition: partitioner.h:481
partition_type(const partition_type &, split)
Definition: partitioner.h:600
range_vector(const T &elem)
initialize via first range in pool
Definition: partitioner.h:163
dynamic_grainsize_mode(dynamic_grainsize_mode &p, split)
Definition: partitioner.h:382
Type enables transmission of splitting proportion from partitioners to range objects.
Definition: tbb_stddef.h:426
affinity_id * my_array
Array that remembers affinities of tree positions to affinity_id.
Definition: partitioner.h:85
interface9::internal::old_auto_partition_type partition_type
Definition: partitioner.h:661
void __TBB_EXPORTED_METHOD resize(unsigned factor)
Resize my_array.
Definition: task.cpp:157
static proportional_split get_split(size_t)
Definition: partitioner.h:285
proportional_mode(proportional_mode &src, const proportional_split &split_obj)
Definition: partitioner.h:308
interface9::internal::simple_partition_type::split_type split_type
Definition: partitioner.h:606
A helper class to create a proportional_split object for a given type of Range.
Definition: partitioner.h:284
A static partitioner.
Definition: partitioner.h:632
Release.
Definition: atomic.h:59
Range pool stores ranges of type T in a circular buffer with MaxCapacity.
Definition: partitioner.h:154
static_partition_type(const static_partitioner &)
Definition: partitioner.h:517
proportional_mode(proportional_mode &src, split)
Definition: partitioner.h:307
size_t do_split(adaptive_mode &src, split)
Definition: partitioner.h:273
static_partition_type(static_partition_type &p, const proportional_split &split_obj)
Definition: partitioner.h:521
void set_affinity(affinity_id id)
Set affinity for this task.
Definition: task.h:943
size_t do_split(proportional_mode &src, const proportional_split &split_obj)
Definition: partitioner.h:309
Provides proportional splitting strategy for partition objects.
Definition: partitioner.h:302
Provides default splitting strategy for partition objects.
Definition: partitioner.h:262
void const char const char int ITT_FORMAT __itt_group_sync p
#define __TBB_EXPORTED_FUNC
interface9::internal::old_auto_partition_type partition_type
Definition: partitioner.h:623
depth_t front_depth()
similarly to front(), returns depth of the first range in the pool
Definition: partitioner.h:206
partition_type(const simple_partitioner &)
Definition: partitioner.h:599
Initial task to split the work.
void spawn_or_delay(bool, task &b)
Definition: partitioner.h:104
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: partitioner.h:132
A simple partitioner.
Definition: partitioner.h:586
interface9::internal::simple_partition_type task_partition_type
Definition: partitioner.h:603
static bool is_peer_stolen(task &t)
Definition: partitioner.h:142
affinity_partition_type(affinity_partition_type &p, const proportional_split &split_obj)
Definition: partitioner.h:542
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:139
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:416
interface9::internal::static_partition_type::split_type split_type
Definition: partitioner.h:647
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
linear_affinity_mode(linear_affinity_mode &src, split)
Definition: partitioner.h:352
interface9::internal::old_auto_partition_type partition_type
Definition: partitioner.h:642
bool is_divisible(depth_t max_depth)
Definition: partitioner.h:214
int ref_count() const
The internal reference count.
Definition: task.h:915
tbb::atomic< bool > my_child_stolen
Definition: partitioner.h:130
Enables one or the other code branches.
An auto partitioner.
Definition: partitioner.h:613
#define __TBB_override
Definition: tbb_stddef.h:240
simple_partition_type(const simple_partition_type &, split)
Definition: partitioner.h:503
Backward-compatible partition for auto and affinity partition objects.
Definition: partitioner.h:561
bool is_stolen_task() const
True if task was stolen from the task pool of another thread.
Definition: task.h:885
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
Provides backward-compatible methods for partition objects without affinity.
Definition: partitioner.h:98
void execute(StartType &start, Range &range)
Definition: partitioner.h:240
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
dynamic_grainsize_mode(dynamic_grainsize_mode &p, const proportional_split &split_obj)
Definition: partitioner.h:389
Task type used in parallel_for.
size_t left() const
Definition: tbb_stddef.h:430
An affinity partitioner.
Definition: partitioner.h:651
int current_thread_index()
Returns the index, aka slot number, of the calling thread in its current arena.
Definition: task_arena.h:484
void split_to_fill(depth_t max_depth)
Definition: partitioner.h:174
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:865
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
The graph class.
interface9::internal::auto_partition_type::split_type split_type
Definition: partitioner.h:628
#define __TBB_DEMAND_DEPTH_ADD
Definition: partitioner.h:37
old_auto_partition_type(old_auto_partition_type &pt, split)
Definition: partitioner.h:574
affinity_partition_type(affinity_partition_type &p, split)
Definition: partitioner.h:539
Defines entry point for affinity partitioner into TBB run-time library.
Definition: partitioner.h:80
interface9::internal::auto_partition_type task_partition_type
Definition: partitioner.h:625
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:65
#define __TBB_RANGE_POOL_CAPACITY
Definition: partitioner.h:29
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition: task.h:940
interface9::internal::static_partition_type task_partition_type
Definition: partitioner.h:644
interface9::internal::affinity_partition_type::split_type split_type
Definition: partitioner.h:666

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.