Home ⌂Doc Index ◂Up ▴
Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
task_arena.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2020 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 */
16 
17 #ifndef __TBB_task_arena_H
18 #define __TBB_task_arena_H
19 
20 #define __TBB_task_arena_H_include_area
22 
23 #include "task.h"
24 #include "tbb_exception.h"
26 #if __TBB_NUMA_SUPPORT
27 #include "info.h"
28 #endif /*__TBB_NUMA_SUPPORT*/
29 #if TBB_USE_THREADING_TOOLS
30 #include "atomic.h" // for as_atomic
31 #endif
32 #include "aligned_space.h"
33 
34 namespace tbb {
35 
36 namespace this_task_arena {
37  int max_concurrency();
38 } // namespace this_task_arena
39 
41 namespace internal {
43 
44  class arena;
46 } // namespace internal
48 
49 namespace interface7 {
50 class task_arena;
51 
53 namespace internal {
54 using namespace tbb::internal; //e.g. function_task from task.h
55 
57 public:
58  virtual void operator()() const = 0;
59  virtual ~delegate_base() {}
60 };
61 
62 // If decltype is available, the helper detects the return type of functor of specified type,
63 // otherwise it defines the void type.
64 template <typename F>
66 #if __TBB_CPP11_DECLTYPE_PRESENT && !__TBB_CPP11_DECLTYPE_OF_FUNCTION_RETURN_TYPE_BROKEN
67  typedef decltype(declval<F>()()) type;
68 #else
69  typedef void type;
70 #endif
71 };
72 
73 template<typename F, typename R>
75  F &my_func;
76  tbb::aligned_space<R> my_return_storage;
77  // The function should be called only once.
78  void operator()() const __TBB_override {
79  new (my_return_storage.begin()) R(my_func());
80  }
81 public:
82  delegated_function(F& f) : my_func(f) {}
83  // The function can be called only after operator() and only once.
84  R consume_result() const {
85  return tbb::internal::move(*(my_return_storage.begin()));
86  }
88  my_return_storage.begin()->~R();
89  }
90 };
91 
92 template<typename F>
94  F &my_func;
95  void operator()() const __TBB_override {
96  my_func();
97  }
98 public:
99  delegated_function(F& f) : my_func(f) {}
100  void consume_result() const {}
101 
102  friend class task_arena_base;
103 };
104 
106 #if __TBB_NUMA_SUPPORT
107 public:
108  // TODO: consider version approach to resolve backward compatibility potential issues.
109  struct constraints {
110  constraints(numa_node_id id = automatic, int maximal_concurrency = automatic)
111  : numa_id(id)
112  , max_concurrency(maximal_concurrency)
113  {}
114  numa_node_id numa_id;
115  int max_concurrency;
116  };
117 #endif /*__TBB_NUMA_SUPPORT*/
118 protected:
121 
122 #if __TBB_TASK_GROUP_CONTEXT
123  task_group_context *my_context;
125 #endif
126 
129 
131  unsigned my_master_slots;
132 
135 
137 
138 #if __TBB_NUMA_SUPPORT
139  numa_node_id my_numa_id;
141 
142  // Do not access my_numa_id without the following runtime check.
143  // Despite my_numa_id is accesible, it does not exist in task_arena_base on user side
144  // if TBB_PREVIEW_NUMA_SUPPORT macro is not defined by the user. To be sure that
145  // my_numa_id exists in task_arena_base layout we check the traits.
146  // TODO: Consider increasing interface version for task_arena_base instead of this runtime check.
147  numa_node_id numa_id() {
148  return (my_version_and_traits & numa_support_flag) == numa_support_flag ? my_numa_id : automatic;
149  }
150 #endif
151 
152  enum {
153  default_flags = 0
154 #if __TBB_TASK_GROUP_CONTEXT
156  , exact_exception_flag = task_group_context::exact_exception // used to specify flag for context directly
157 #endif
158 #if __TBB_NUMA_SUPPORT
159  , numa_support_flag = 1
160 #endif
161  };
162 
163  task_arena_base(int max_concurrency, unsigned reserved_for_masters)
164  : my_arena(0)
166  , my_context(0)
167 #endif
168  , my_max_concurrency(max_concurrency)
169  , my_master_slots(reserved_for_masters)
170 #if __TBB_NUMA_SUPPORT
171  , my_version_and_traits(default_flags | numa_support_flag)
172 #else
173  , my_version_and_traits(default_flags)
174 #endif
175  , my_initialized(false)
176 #if __TBB_NUMA_SUPPORT
177  , my_numa_id(automatic)
178 #endif
179  {}
180 
181 #if __TBB_NUMA_SUPPORT
182  task_arena_base(const constraints& constraints_, unsigned reserved_for_masters)
183  : my_arena(0)
185  , my_context(0)
186 #endif
187  , my_max_concurrency(constraints_.max_concurrency)
188  , my_master_slots(reserved_for_masters)
189  , my_version_and_traits(default_flags | numa_support_flag)
190  , my_initialized(false)
191  , my_numa_id(constraints_.numa_id )
192  {}
193 #endif /*__TBB_NUMA_SUPPORT*/
194 
195  void __TBB_EXPORTED_METHOD internal_initialize();
196  void __TBB_EXPORTED_METHOD internal_terminate();
197  void __TBB_EXPORTED_METHOD internal_attach();
198  void __TBB_EXPORTED_METHOD internal_enqueue( task&, intptr_t ) const;
199  void __TBB_EXPORTED_METHOD internal_execute( delegate_base& ) const;
200  void __TBB_EXPORTED_METHOD internal_wait() const;
201  static int __TBB_EXPORTED_FUNC internal_current_slot();
202  static int __TBB_EXPORTED_FUNC internal_max_concurrency( const task_arena * );
203 public:
205  static const int automatic = -1;
206  static const int not_initialized = -2;
207 
208 };
209 
210 #if __TBB_TASK_ISOLATION
211 void __TBB_EXPORTED_FUNC isolate_within_arena( delegate_base& d, intptr_t isolation = 0 );
212 
213 template<typename R, typename F>
214 R isolate_impl(F& f) {
217  return d.consume_result();
218 }
219 #endif /* __TBB_TASK_ISOLATION */
220 } // namespace internal
222 
230  friend void task::enqueue(task&, task_arena&
232  , priority_t
233 #endif
234  );
237  __TBB_ASSERT( my_arena, "task_arena initialization is incomplete" );
238 #if __TBB_TASK_GROUP_CONTEXT
239  __TBB_ASSERT( my_context, "task_arena initialization is incomplete" );
240 #endif
241 #if TBB_USE_THREADING_TOOLS
242  // Actual synchronization happens in internal_initialize & internal_attach.
243  // The race on setting my_initialized is benign, but should be hidden from Intel(R) Inspector
244  internal::as_atomic(my_initialized).fetch_and_store<release>(true);
245 #else
246  my_initialized = true;
247 #endif
248  }
249 
250  template<typename F>
253  , priority_t p = priority_t(0)
254 #endif
255  ) {
256 #if !__TBB_TASK_PRIORITY
257  intptr_t p = 0;
258 #endif
259  initialize();
260 #if __TBB_TASK_GROUP_CONTEXT
261  internal_enqueue(*new(task::allocate_root(*my_context)) internal::function_task< typename internal::strip<F>::type >(internal::forward<F>(f)), p);
262 #else
263  internal_enqueue(*new(task::allocate_root()) internal::function_task< typename internal::strip<F>::type >(internal::forward<F>(f)), p);
264 #endif /* __TBB_TASK_GROUP_CONTEXT */
265  }
266 
267  template<typename R, typename F>
268  R execute_impl(F& f) {
269  initialize();
271  internal_execute(d);
272  return d.consume_result();
273  }
274 
275 public:
277 
282  task_arena(int max_concurrency_ = automatic, unsigned reserved_for_masters = 1)
283  : task_arena_base(max_concurrency_, reserved_for_masters)
284  {}
285 
286 #if __TBB_NUMA_SUPPORT
287  task_arena(const constraints& constraints_, unsigned reserved_for_masters = 1)
289  : task_arena_base(constraints_, reserved_for_masters)
290  {}
291 
293  task_arena(const task_arena &s) // copy settings but not the reference or instance
294  : task_arena_base(constraints(s.my_numa_id, s.my_max_concurrency), s.my_master_slots)
295  {}
296 #else
297  task_arena(const task_arena &s) // copy settings but not the reference or instance
299  : task_arena_base(s.my_max_concurrency, s.my_master_slots)
300  {}
301 #endif /*__TBB_NUMA_SUPPORT*/
302 
304  struct attach {};
305 
307  explicit task_arena( attach )
308  : task_arena_base(automatic, 1) // use default settings if attach fails
309  {
310  internal_attach();
311  if( my_arena ) my_initialized = true;
312  }
313 
315  inline void initialize() {
316  if( !my_initialized ) {
317  internal_initialize();
318  mark_initialized();
319  }
320  }
321 
323  inline void initialize(int max_concurrency_, unsigned reserved_for_masters = 1) {
324  // TODO: decide if this call must be thread-safe
325  __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
326  if( !my_initialized ) {
327  my_max_concurrency = max_concurrency_;
328  my_master_slots = reserved_for_masters;
329  initialize();
330  }
331  }
332 
333 #if __TBB_NUMA_SUPPORT
334  inline void initialize(constraints constraints_, unsigned reserved_for_masters = 1) {
335  // TODO: decide if this call must be thread-safe
336  __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
337  if( !my_initialized ) {
338  my_numa_id = constraints_.numa_id;
339  my_max_concurrency = constraints_.max_concurrency;
340  my_master_slots = reserved_for_masters;
341  initialize();
342  }
343  }
344 #endif /*__TBB_NUMA_SUPPORT*/
345 
347  inline void initialize(attach) {
348  // TODO: decide if this call must be thread-safe
349  __TBB_ASSERT(!my_arena, "Impossible to modify settings of an already initialized task_arena");
350  if( !my_initialized ) {
351  internal_attach();
352  if ( !my_arena ) internal_initialize();
353  mark_initialized();
354  }
355  }
356 
359  inline void terminate() {
360  if( my_initialized ) {
361  internal_terminate();
362  my_initialized = false;
363  }
364  }
365 
369  terminate();
370  }
371 
374  bool is_active() const { return my_initialized; }
375 
378 
379 #if __TBB_CPP11_RVALUE_REF_PRESENT
380  template<typename F>
381  void enqueue( F&& f ) {
382  enqueue_impl(std::forward<F>(f));
383  }
384 #else
385  template<typename F>
386  void enqueue( const F& f ) {
387  enqueue_impl(f);
388  }
389 #endif
390 
391 #if __TBB_TASK_PRIORITY
392  template<typename F>
395 #if __TBB_CPP11_RVALUE_REF_PRESENT
397 #if __TBB_PREVIEW_CRITICAL_TASKS
399  || p == internal::priority_critical, "Invalid priority level value");
400 #else
401  __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
402 #endif
403  enqueue_impl(std::forward<F>(f), p);
404  }
405 #else
406  __TBB_DEPRECATED void enqueue( const F& f, priority_t p ) {
407 #if __TBB_PREVIEW_CRITICAL_TASKS
409  || p == internal::priority_critical, "Invalid priority level value");
410 #else
411  __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
412 #endif
413  enqueue_impl(f,p);
414  }
415 #endif
416 #endif// __TBB_TASK_PRIORITY
417 
422  template<typename F>
425  }
426 
431  template<typename F>
434  }
435 
436 #if __TBB_EXTRA_DEBUG
437  void debug_wait_until_empty() {
441  initialize();
442  internal_wait();
443  }
444 #endif //__TBB_EXTRA_DEBUG
445 
448  inline static int current_thread_index() {
449  return internal_current_slot();
450  }
451 
453  inline int max_concurrency() const {
454  // Handle special cases inside the library
455  return (my_max_concurrency>1) ? my_max_concurrency : internal_max_concurrency(this);
456  }
457 };
458 
459 namespace this_task_arena {
460 #if __TBB_TASK_ISOLATION
461  template<typename F>
466  }
467 
470  template<typename F>
473  }
474 #endif /* __TBB_TASK_ISOLATION */
475 } // namespace this_task_arena
476 } // namespace interfaceX
477 
478 using interface7::task_arena;
479 
480 namespace this_task_arena {
481  using namespace interface7::this_task_arena;
482 
484  inline int current_thread_index() {
486  return idx == -1 ? tbb::task_arena::not_initialized : idx;
487  }
488 
490  inline int max_concurrency() {
492  }
493 } // namespace this_task_arena
494 
496 #if __TBB_TASK_PRIORITY
498 #else
499 void task::enqueue( task& t, task_arena& arena ) {
500  intptr_t p = 0;
501 #endif
502  arena.initialize();
504  arena.internal_enqueue(t, p);
505 }
506 } // namespace tbb
507 
509 #undef __TBB_task_arena_H_include_area
510 
511 #endif /* __TBB_task_arena_H */
Used to form groups of tasks.
Definition: task.h:358
unsigned my_master_slots
Reserved master slots.
Definition: task_arena.h:131
#define __TBB_TASK_GROUP_CONTEXT
Definition: tbb_config.h:541
void enqueue_impl(__TBB_FORWARDING_REF(F) f, priority_t p=priority_t(0))
Definition: task_arena.h:251
Base class for user-defined tasks.
Definition: task.h:615
void initialize()
Forces allocation of the resources for the task_arena as specified in constructor arguments.
Definition: task_arena.h:315
tbb::aligned_space< R > my_return_storage
Definition: task_arena.h:76
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322
void operator()() const __TBB_override
Definition: task_arena.h:78
task_arena(int max_concurrency_=automatic, unsigned reserved_for_masters=1)
Creates task_arena with certain concurrency limits.
Definition: task_arena.h:282
#define __TBB_DEPRECATED
Definition: tbb_config.h:636
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
Definition: task.h:836
Tag class used to indicate the "attaching" constructor.
Definition: task_arena.h:304
priority_t
Definition: task.h:317
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:663
internal::return_type_or_void< F >::type isolate(const F &f)
Definition: task_arena.h:471
Release.
Definition: atomic.h:59
intptr_t my_version_and_traits
Special settings.
Definition: task_arena.h:134
int my_max_concurrency
Concurrency level for deferred initialization.
Definition: task_arena.h:128
static const int priority_critical
Definition: task.h:313
void initialize(attach)
Attaches this instance to the current arena of the thread.
Definition: task_arena.h:347
int max_concurrency() const
Returns the maximal number of threads that can work inside the arena.
Definition: task_arena.h:453
void initialize(int max_concurrency_, unsigned reserved_for_masters=1)
Overrides concurrency level and forces initialization of internal representation.
Definition: task_arena.h:323
__TBB_DEPRECATED void enqueue(F &&f, priority_t p)
Definition: task_arena.h:396
int max_concurrency()
Returns the maximal number of threads that can work inside the arena.
Definition: task_arena.h:490
internal::arena * my_arena
NULL if not currently initialized.
Definition: task_arena.h:120
void const char const char int ITT_FORMAT __itt_group_sync p
#define __TBB_EXPORTED_FUNC
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
task_arena(attach)
Creates an instance of task_arena attached to the current arena of the thread.
Definition: task_arena.h:307
#define __TBB_TASK_PRIORITY
Definition: tbb_config.h:571
task_arena_base(int max_concurrency, unsigned reserved_for_masters)
Definition: task_arena.h:163
#define __TBB_override
Definition: tbb_stddef.h:240
void __TBB_EXPORTED_FUNC isolate_within_arena(delegate_base &d, intptr_t isolation=0)
void const char const char int ITT_FORMAT __itt_group_sync s
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:517
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
static int __TBB_EXPORTED_FUNC internal_max_concurrency(const task_arena *)
int current_thread_index()
Returns the index, aka slot number, of the calling thread in its current arena.
Definition: task_arena.h:484
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type type
internal::return_type_or_void< F >::type execute(const F &f)
Definition: task_arena.h:432
The graph class.
internal::return_type_or_void< F >::type execute(F &f)
Definition: task_arena.h:423
static int current_thread_index()
Definition: task_arena.h:448
Identifiers declared inside namespace internal should never be used directly by client code.
Definition: atomic.h:65
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.